code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/12/9 19:09
Desc: HTTP 测试
"""
import requests
import pandas as pd
url = "http://1172.16.17.32:8080/api/stock_financial_hk_analysis_indicator_em"
params = {
"stock": "00700",
"indicator": "年度"
}
r = requests.get(url, params=params)
temp_df = pd.DataFrame.from_dict(r.json())
print(temp_df)
|
[
"requests.get"
] |
[((267, 299), 'requests.get', 'requests.get', (['url'], {'params': 'params'}), '(url, params=params)\n', (279, 299), False, 'import requests\n')]
|
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.template.defaultfilters import slugify
# Create your models here.
class Tournament(models.Model):
year = models.CharField(max_length=99)
title = models.CharField(max_length=99, blank=True)
slug = models.SlugField(blank=True)
active = models.BooleanField(default=False)
def save(self, *args, **kwargs):
self.slug = slugify(self.year + ' ' + self.title)
super(Tournament, self).save(*args, **kwargs)
def __str__(self):
return self.year
class Team(models.Model):
COLORS = (
('Black', 'Nero'),
('Silver', 'Argento'),
('Gray', 'Grigio'),
('White', 'Bianco'),
('Maroon', 'Amaranto'),
('Red', 'Rosso'),
('Orange', 'Arancione'),
('Purple', 'Viola'),
('Fuchsia', 'Fucsia'),
('Green', 'Verde Scuro'),
('Lime', ' Verde Lime'),
('Yellow', 'Giallo'),
('Navy', 'Blue Navy'),
('Blue', 'Blu'),
('Teal', 'Verde Acqua'),
('Azure', 'Azzurro'),
('Pink', 'Rosa'))
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE, null=True, related_name='teams')
name = models.CharField(max_length=999)
short_name = models.CharField(max_length=12, blank=True)
city = models.CharField(max_length=36, blank=True)
slug = models.SlugField(blank=True)
color = models.CharField(max_length=16, choices=COLORS, default='White', blank=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
self.name = self.name.title()
self.short_name = self.short_name.title()
self.city = self.city.title()
super(Team, self).save(*args, **kwargs)
def __str__(self):
return self.name
class AllStarGame(models.Model):
name = models.CharField(max_length=16)
rules = models.CharField(max_length=999, blank=True)
slug = models.SlugField(blank=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(AllStarGame, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Human(models.Model):
SHIRT_SIZES = (('XXS', 'XXS'), ('XS', 'XS'), ('S', 'S'), ('M', 'M'), ('L', 'L'), ('XL', 'XL'), ('XXL', 'XXL'))
first_name = models.CharField(max_length=16)
last_name = models.CharField(max_length=16)
jersey_size = models.CharField(max_length=4, choices=SHIRT_SIZES, blank=True)
slug = models.SlugField(blank=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.first_name + ' ' + self.last_name)
self.first_name = self.first_name.title()
self.last_name = self.last_name.title()
super(Human, self).save(*args, **kwargs)
def __str__(self):
return self.first_name + ' ' + self.last_name
class Player(Human):
team = models.ForeignKey(Team, on_delete=models.CASCADE, related_name='players')
year_of_birth = models.IntegerField(validators=[MinValueValidator(1900), MaxValueValidator(2100)], blank=True, null=True)
jersey_number = models.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(999)], blank=True, null=True)
all_star_game = models.ForeignKey(AllStarGame, on_delete=models.SET_NULL, blank=True, null=True, related_name='players')
class Coach(Human):
team = models.ForeignKey(Team, on_delete=models.CASCADE, related_name='coaches')
cell_number = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
email = models.EmailField(max_length=64, blank=True, null=True)
class Stage(models.Model): # Fase
name = models.CharField(max_length=16, blank=True)
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE, related_name='stages')
precedent_stage = models.OneToOneField('self', on_delete=models.SET_NULL, blank=True, null=True, related_name='next_stage')
protected = models.BooleanField(default=False)
def __str__(self):
return self.name
class Group(models.Model): # Girone
name = models.CharField(max_length=16, blank=True)
FORMAT_TYPES = (('Round-Robin', "All'italiana"), ('Elimination', 'Ad eliminazione'))
stage = models.ForeignKey(Stage, on_delete=models.CASCADE, related_name='groups')
format = models.CharField(max_length=32, choices=FORMAT_TYPES, default='Round-Robin')
number_of_teams = models.IntegerField(validators=[MinValueValidator(0)], default=0)
importance = models.IntegerField(validators=[MinValueValidator(0)], default=0)
def __str__(self):
return self.name + ' - ' + self.stage.name
class Round(models.Model): # Giornata
round = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name='rounds')
def __str__(self):
return str(self.round)
class Match(models.Model): # Partita
SIXTHS = (('1', 'Primo Tempo'), ('2', 'Secondo Tempo'), ('3', 'Terzo Tempo'), ('4', 'Quarto Tempo'), ('5', 'Quinto Tempo'), ('6', 'Sesto Tempo'), ('7', 'Supplementare'))
COLORS = (
('Black', 'Nero'),
('Silver', 'Argento'),
('Gray', 'Grigio'),
('White', 'Bianco'),
('Maroon', 'Amaranto'),
('Red', 'Rosso'),
('Orange', 'Arancione'),
('Purple', 'Viola'),
('Fuchsia', 'Fucsia'),
('Green', 'Verde Scuro'),
('Lime', ' Verde Lime'),
('Yellow', 'Giallo'),
('Navy', 'Blue Navy'),
('Blue', 'Blu'),
('Teal', 'Verde Acqua'),
('Azure', 'Azzurro'),
('Pink', 'Rosa'))
round = models.ForeignKey(Round, on_delete=models.CASCADE, blank=True, related_name='matches')
team_A = models.ForeignKey(Team, on_delete=models.CASCADE, blank=True, null=True, related_name='matches_A')
points_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
team_B = models.ForeignKey(Team, on_delete=models.CASCADE, blank=True, null=True, related_name='matches_B')
points_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
time = models.ForeignKey('Time', on_delete=models.SET_NULL, blank=True, null=True, related_name='matches')
court = models.ForeignKey('Court', on_delete=models.SET_NULL, blank=True, null=True, related_name='matches')
number = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_current_sixth = models.CharField(max_length=16, choices=SIXTHS, blank=True)
sb_timer = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_partial_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_partial_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_1_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_1_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_2_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_2_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_3_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_3_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_4_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_4_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_5_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_5_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_6_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_6_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_7_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_7_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_color_A = models.CharField(max_length=16, choices=COLORS, blank=True)
sb_color_B = models.CharField(max_length=16, choices=COLORS, blank=True)
def save(self, *args, **kwargs):
if not self.sb_color_A:
self.sb_color_A = self.team_A.color
if not self.sb_color_B:
self.sb_color_B = self.team_B.color
super(Match, self).save(*args, **kwargs)
def __str__(self):
ptA = str(self.points_A) if self.points_A else ''
ptB = str(self.points_B) if self.points_B else ''
return str(self.team_A) + ' ' + ptA + ' - ' + ptB + ' ' + str(self.team_B)
class Score(models.Model): # Punteggio
team = models.ForeignKey(Team, on_delete=models.CASCADE, related_name='scores')
group = models.ForeignKey(Group, on_delete=models.CASCADE, null=True, related_name='scores')
score = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
games_played = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
wins = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
losses = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
points_made = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
points_conceded = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
goals_made = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
goals_conceded = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
def __str__(self):
return str(self.group) + ': ' + str(self.team) + ' -> ' + str(self.score)
class Court(models.Model): # Campo
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE, null=True, related_name='courts')
name = models.CharField(max_length=16, blank=True)
importance = models.IntegerField(validators=[MinValueValidator(0)], blank=True, default=0)
def __str__(self):
return self.name
class Day(models.Model):
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE, null=True, related_name='days')
name = models.CharField(max_length=16)
def __str__(self):
return self.name
class Time(models.Model):
day = models.ForeignKey(Day, on_delete=models.CASCADE, null=True, related_name='times')
time = models.CharField(max_length=16, blank=True)
event = models.CharField(max_length=32, blank=True) # se non ci sono partite
precedent_time = models.OneToOneField('self', on_delete=models.SET_NULL, blank=True, null=True, related_name='next_time')
initial = models.BooleanField(default=False)
def __str__(self):
return self.time
|
[
"django.db.models.OneToOneField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.core.validators.MinValueValidator",
"django.db.models.BooleanField",
"django.db.models.SlugField",
"django.db.models.EmailField",
"django.template.defaultfilters.slugify",
"django.core.validators.MaxValueValidator"
] |
[((224, 255), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(99)'}), '(max_length=99)\n', (240, 255), False, 'from django.db import models\n'), ((268, 311), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(99)', 'blank': '(True)'}), '(max_length=99, blank=True)\n', (284, 311), False, 'from django.db import models\n'), ((323, 351), 'django.db.models.SlugField', 'models.SlugField', ([], {'blank': '(True)'}), '(blank=True)\n', (339, 351), False, 'from django.db import models\n'), ((365, 399), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (384, 399), False, 'from django.db import models\n'), ((1167, 1259), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Tournament'], {'on_delete': 'models.CASCADE', 'null': '(True)', 'related_name': '"""teams"""'}), "(Tournament, on_delete=models.CASCADE, null=True,\n related_name='teams')\n", (1184, 1259), False, 'from django.db import models\n'), ((1267, 1299), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(999)'}), '(max_length=999)\n', (1283, 1299), False, 'from django.db import models\n'), ((1317, 1360), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(12)', 'blank': '(True)'}), '(max_length=12, blank=True)\n', (1333, 1360), False, 'from django.db import models\n'), ((1372, 1415), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(36)', 'blank': '(True)'}), '(max_length=36, blank=True)\n', (1388, 1415), False, 'from django.db import models\n'), ((1427, 1455), 'django.db.models.SlugField', 'models.SlugField', ([], {'blank': '(True)'}), '(blank=True)\n', (1443, 1455), False, 'from django.db import models\n'), ((1468, 1544), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'choices': 'COLORS', 'default': '"""White"""', 'blank': '(True)'}), "(max_length=16, choices=COLORS, default='White', blank=True)\n", (1484, 1544), False, 'from django.db import models\n'), ((1891, 1922), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (1907, 1922), False, 'from django.db import models\n'), ((1935, 1979), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(999)', 'blank': '(True)'}), '(max_length=999, blank=True)\n', (1951, 1979), False, 'from django.db import models\n'), ((1991, 2019), 'django.db.models.SlugField', 'models.SlugField', ([], {'blank': '(True)'}), '(blank=True)\n', (2007, 2019), False, 'from django.db import models\n'), ((2362, 2393), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (2378, 2393), False, 'from django.db import models\n'), ((2410, 2441), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (2426, 2441), False, 'from django.db import models\n'), ((2460, 2523), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'choices': 'SHIRT_SIZES', 'blank': '(True)'}), '(max_length=4, choices=SHIRT_SIZES, blank=True)\n', (2476, 2523), False, 'from django.db import models\n'), ((2535, 2563), 'django.db.models.SlugField', 'models.SlugField', ([], {'blank': '(True)'}), '(blank=True)\n', (2551, 2563), False, 'from django.db import models\n'), ((2929, 3002), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Team'], {'on_delete': 'models.CASCADE', 'related_name': '"""players"""'}), "(Team, on_delete=models.CASCADE, related_name='players')\n", (2946, 3002), False, 'from django.db import models\n'), ((3271, 3380), 'django.db.models.ForeignKey', 'models.ForeignKey', (['AllStarGame'], {'on_delete': 'models.SET_NULL', 'blank': '(True)', 'null': '(True)', 'related_name': '"""players"""'}), "(AllStarGame, on_delete=models.SET_NULL, blank=True, null=\n True, related_name='players')\n", (3288, 3380), False, 'from django.db import models\n'), ((3409, 3482), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Team'], {'on_delete': 'models.CASCADE', 'related_name': '"""coaches"""'}), "(Team, on_delete=models.CASCADE, related_name='coaches')\n", (3426, 3482), False, 'from django.db import models\n'), ((3591, 3646), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(64)', 'blank': '(True)', 'null': '(True)'}), '(max_length=64, blank=True, null=True)\n', (3608, 3646), False, 'from django.db import models\n'), ((3695, 3738), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'blank': '(True)'}), '(max_length=16, blank=True)\n', (3711, 3738), False, 'from django.db import models\n'), ((3756, 3834), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Tournament'], {'on_delete': 'models.CASCADE', 'related_name': '"""stages"""'}), "(Tournament, on_delete=models.CASCADE, related_name='stages')\n", (3773, 3834), False, 'from django.db import models\n'), ((3857, 3967), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""self"""'], {'on_delete': 'models.SET_NULL', 'blank': '(True)', 'null': '(True)', 'related_name': '"""next_stage"""'}), "('self', on_delete=models.SET_NULL, blank=True, null=\n True, related_name='next_stage')\n", (3877, 3967), False, 'from django.db import models\n'), ((3979, 4013), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3998, 4013), False, 'from django.db import models\n'), ((4113, 4156), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'blank': '(True)'}), '(max_length=16, blank=True)\n', (4129, 4156), False, 'from django.db import models\n'), ((4258, 4331), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Stage'], {'on_delete': 'models.CASCADE', 'related_name': '"""groups"""'}), "(Stage, on_delete=models.CASCADE, related_name='groups')\n", (4275, 4331), False, 'from django.db import models\n'), ((4345, 4421), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'choices': 'FORMAT_TYPES', 'default': '"""Round-Robin"""'}), "(max_length=32, choices=FORMAT_TYPES, default='Round-Robin')\n", (4361, 4421), False, 'from django.db import models\n'), ((4811, 4884), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Group'], {'on_delete': 'models.CASCADE', 'related_name': '"""rounds"""'}), "(Group, on_delete=models.CASCADE, related_name='rounds')\n", (4828, 4884), False, 'from django.db import models\n'), ((5689, 5780), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Round'], {'on_delete': 'models.CASCADE', 'blank': '(True)', 'related_name': '"""matches"""'}), "(Round, on_delete=models.CASCADE, blank=True, related_name\n ='matches')\n", (5706, 5780), False, 'from django.db import models\n'), ((5789, 5891), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Team'], {'on_delete': 'models.CASCADE', 'blank': '(True)', 'null': '(True)', 'related_name': '"""matches_A"""'}), "(Team, on_delete=models.CASCADE, blank=True, null=True,\n related_name='matches_A')\n", (5806, 5891), False, 'from django.db import models\n'), ((5994, 6096), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Team'], {'on_delete': 'models.CASCADE', 'blank': '(True)', 'null': '(True)', 'related_name': '"""matches_B"""'}), "(Team, on_delete=models.CASCADE, blank=True, null=True,\n related_name='matches_B')\n", (6011, 6096), False, 'from django.db import models\n'), ((6197, 6300), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Time"""'], {'on_delete': 'models.SET_NULL', 'blank': '(True)', 'null': '(True)', 'related_name': '"""matches"""'}), "('Time', on_delete=models.SET_NULL, blank=True, null=True,\n related_name='matches')\n", (6214, 6300), False, 'from django.db import models\n'), ((6309, 6413), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Court"""'], {'on_delete': 'models.SET_NULL', 'blank': '(True)', 'null': '(True)', 'related_name': '"""matches"""'}), "('Court', on_delete=models.SET_NULL, blank=True, null=True,\n related_name='matches')\n", (6326, 6413), False, 'from django.db import models\n'), ((6524, 6583), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'choices': 'SIXTHS', 'blank': '(True)'}), '(max_length=16, choices=SIXTHS, blank=True)\n', (6540, 6583), False, 'from django.db import models\n'), ((8246, 8305), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'choices': 'COLORS', 'blank': '(True)'}), '(max_length=16, choices=COLORS, blank=True)\n', (8262, 8305), False, 'from django.db import models\n'), ((8323, 8382), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'choices': 'COLORS', 'blank': '(True)'}), '(max_length=16, choices=COLORS, blank=True)\n', (8339, 8382), False, 'from django.db import models\n'), ((8906, 8978), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Team'], {'on_delete': 'models.CASCADE', 'related_name': '"""scores"""'}), "(Team, on_delete=models.CASCADE, related_name='scores')\n", (8923, 8978), False, 'from django.db import models\n'), ((8991, 9080), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Group'], {'on_delete': 'models.CASCADE', 'null': '(True)', 'related_name': '"""scores"""'}), "(Group, on_delete=models.CASCADE, null=True, related_name=\n 'scores')\n", (9008, 9080), False, 'from django.db import models\n'), ((10083, 10176), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Tournament'], {'on_delete': 'models.CASCADE', 'null': '(True)', 'related_name': '"""courts"""'}), "(Tournament, on_delete=models.CASCADE, null=True,\n related_name='courts')\n", (10100, 10176), False, 'from django.db import models\n'), ((10184, 10227), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'blank': '(True)'}), '(max_length=16, blank=True)\n', (10200, 10227), False, 'from django.db import models\n'), ((10416, 10507), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Tournament'], {'on_delete': 'models.CASCADE', 'null': '(True)', 'related_name': '"""days"""'}), "(Tournament, on_delete=models.CASCADE, null=True,\n related_name='days')\n", (10433, 10507), False, 'from django.db import models\n'), ((10515, 10546), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (10531, 10546), False, 'from django.db import models\n'), ((10634, 10720), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Day'], {'on_delete': 'models.CASCADE', 'null': '(True)', 'related_name': '"""times"""'}), "(Day, on_delete=models.CASCADE, null=True, related_name=\n 'times')\n", (10651, 10720), False, 'from django.db import models\n'), ((10727, 10770), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'blank': '(True)'}), '(max_length=16, blank=True)\n', (10743, 10770), False, 'from django.db import models\n'), ((10783, 10826), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'blank': '(True)'}), '(max_length=32, blank=True)\n', (10799, 10826), False, 'from django.db import models\n'), ((10874, 10983), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""self"""'], {'on_delete': 'models.SET_NULL', 'blank': '(True)', 'null': '(True)', 'related_name': '"""next_time"""'}), "('self', on_delete=models.SET_NULL, blank=True, null=\n True, related_name='next_time')\n", (10894, 10983), False, 'from django.db import models\n'), ((10993, 11027), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (11012, 11027), False, 'from django.db import models\n'), ((458, 495), 'django.template.defaultfilters.slugify', 'slugify', (["(self.year + ' ' + self.title)"], {}), "(self.year + ' ' + self.title)\n", (465, 495), False, 'from django.template.defaultfilters import slugify\n'), ((1603, 1621), 'django.template.defaultfilters.slugify', 'slugify', (['self.name'], {}), '(self.name)\n', (1610, 1621), False, 'from django.template.defaultfilters import slugify\n'), ((2078, 2096), 'django.template.defaultfilters.slugify', 'slugify', (['self.name'], {}), '(self.name)\n', (2085, 2096), False, 'from django.template.defaultfilters import slugify\n'), ((2622, 2669), 'django.template.defaultfilters.slugify', 'slugify', (["(self.first_name + ' ' + self.last_name)"], {}), "(self.first_name + ' ' + self.last_name)\n", (2629, 2669), False, 'from django.template.defaultfilters import slugify\n'), ((3055, 3078), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(1900)'], {}), '(1900)\n', (3072, 3078), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((3080, 3103), 'django.core.validators.MaxValueValidator', 'MaxValueValidator', (['(2100)'], {}), '(2100)\n', (3097, 3103), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((3181, 3201), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (3198, 3201), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((3203, 3225), 'django.core.validators.MaxValueValidator', 'MaxValueValidator', (['(999)'], {}), '(999)\n', (3220, 3225), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((3533, 3553), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (3550, 3553), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((4476, 4496), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (4493, 4496), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((4559, 4579), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (4576, 4579), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((4753, 4773), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (4770, 4773), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((5935, 5955), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (5952, 5955), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((6140, 6160), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (6157, 6160), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((6455, 6475), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (6472, 6475), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((6631, 6651), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (6648, 6651), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((6728, 6748), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (6745, 6748), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((6825, 6845), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (6842, 6845), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((6922, 6942), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (6939, 6942), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((7019, 7039), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (7036, 7039), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((7116, 7136), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (7133, 7136), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((7213, 7233), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (7230, 7233), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((7310, 7330), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (7327, 7330), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((7407, 7427), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (7424, 7427), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((7504, 7524), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (7521, 7524), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((7601, 7621), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (7618, 7621), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((7698, 7718), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (7715, 7718), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((7795, 7815), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (7812, 7815), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((7892, 7912), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (7909, 7912), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((7989, 8009), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (8006, 8009), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((8086, 8106), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (8103, 8106), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((8183, 8203), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (8200, 8203), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((9120, 9140), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (9137, 9140), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((9228, 9248), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (9245, 9248), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((9328, 9348), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (9345, 9348), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((9430, 9450), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (9447, 9450), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((9537, 9557), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (9554, 9557), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((9648, 9668), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (9665, 9668), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((9754, 9774), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (9771, 9774), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((9864, 9884), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (9881, 9884), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((10277, 10297), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (10294, 10297), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n')]
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# Copyright 2012 <EMAIL>
#
import tornado.httpserver
import tornado.ioloop
from tornadows import soaphandler
from tornadows import webservices
from tornadows import xmltypes
from tornadows.soaphandler import webservice
from time import ctime,sleep
import CommonDefinition
#process web service class import
import os
import sys
cur_dir=os.path.dirname(os.path.abspath(__file__))
AIX_dir=cur_dir+CommonDefinition.path_dir_sep+'AIX'
sys.path.append(AIX_dir)
EMC_dir=cur_dir+CommonDefinition.path_dir_sep+'EMC'
sys.path.append(EMC_dir)
VMware_dir=cur_dir+CommonDefinition.path_dir_sep+'VMware'
sys.path.append(VMware_dir)
HP_dir=cur_dir+CommonDefinition.path_dir_sep+'HP'
sys.path.append(HP_dir)
from ControlAIX import ControlAIX
from ControlEMC import ControlEMC
from ControlVMware import ControlVMware
from ControlHP import ControlHP
if __name__ == '__main__':
service=[('ControlAIX',ControlAIX),
('ControlEMC',ControlEMC),
('ControlVMware',ControlVMware),
('ControlHP',ControlHP)]
app=webservices.WebService(service)
ws=tornado.httpserver.HTTPServer(app)
port=CommonDefinition.Tornado_port
ws.listen(port)
tornado.ioloop.IOLoop.instance().start()
|
[
"sys.path.append",
"os.path.abspath",
"tornadows.webservices.WebService"
] |
[((497, 521), 'sys.path.append', 'sys.path.append', (['AIX_dir'], {}), '(AIX_dir)\n', (512, 521), False, 'import sys\n'), ((578, 602), 'sys.path.append', 'sys.path.append', (['EMC_dir'], {}), '(EMC_dir)\n', (593, 602), False, 'import sys\n'), ((665, 692), 'sys.path.append', 'sys.path.append', (['VMware_dir'], {}), '(VMware_dir)\n', (680, 692), False, 'import sys\n'), ((747, 770), 'sys.path.append', 'sys.path.append', (['HP_dir'], {}), '(HP_dir)\n', (762, 770), False, 'import sys\n'), ((416, 441), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (431, 441), False, 'import os\n'), ((1125, 1156), 'tornadows.webservices.WebService', 'webservices.WebService', (['service'], {}), '(service)\n', (1147, 1156), False, 'from tornadows import webservices\n')]
|
import json
from os.path import abspath, dirname, isfile, join
CURRENT_DIR = dirname(abspath(__file__))
TEST_DATA_DIR = join(dirname(dirname(dirname(CURRENT_DIR))), 'test_data')
from opendp_apps.analysis.testing.base_stat_spec_test import StatSpecTestCase
from opendp_apps.analysis.tools.dp_count_spec import DPCountSpec
from opendp_apps.model_helpers.msg_util import msgt
from opendp_apps.analysis import static_vals as astatic
from opendp_apps.profiler import static_vals as pstatic
from opendp_apps.utils.extra_validators import *
class DPCountStatSpecTest(StatSpecTestCase):
fixtures = ['test_dataset_data_001.json', ]
def test_05_valid_noise_mechanism(self):
"""Check for the correct noise_mechanism"""
dp_count = DPCountSpec({})
self.assertEqual(dp_count.noise_mechanism, astatic.NOISE_GEOMETRIC_MECHANISM)
def test_10_count_valid_spec(self):
"""(10) Run DP Count valid spec, float column"""
msgt(self.test_10_count_valid_spec.__doc__)
spec_props = {'variable': 'EyeHeight',
'col_index': 19,
'statistic': astatic.DP_COUNT,
'dataset_size': 183,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_99,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': '182',
'variable_info': {'min': -8,
'max': 5,
'type': 'Float', },
}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
#if dp_count.has_error():
# print(dp_count.get_err_msgs())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 20)]
# File object
#
eye_fatigue_filepath = join(TEST_DATA_DIR, 'Fatigue_data.tab')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(eye_fatigue_filepath))
file_obj = open(eye_fatigue_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char="\t")
file_obj.close()
self.assertFalse(dp_count.has_error())
# val from local machine: 4.6051702036798
#self.assertTrue(dp_count.accuracy_val > 4.5)
#self.assertTrue(dp_count.accuracy_val < 4.7)
# Actual count 184
self.assertTrue(dp_count.value > 170) # should be well within range
def test_20_count_valid_spec(self):
"""(20) Run DP Count valid spec, integer column"""
msgt(self.test_20_count_valid_spec.__doc__)
spec_props = {'variable': 'age',
'col_index': 1,
'statistic': astatic.DP_COUNT,
'dataset_size': 10_000,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_95,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': '44',
'variable_info': {'min': 18,
'max': 95,
'type': pstatic.VAR_TYPE_INTEGER},
}
dp_count = DPCountSpec(spec_props)
self.assertTrue(dp_count.is_chain_valid())
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
self.assertFalse(dp_count.has_error())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 11 columns
col_indexes = [idx for idx in range(0, 11)]
# File object
#
pums_extract_10_000 = join(TEST_DATA_DIR, 'PUMS5extract10000.csv')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(pums_extract_10_000))
file_obj = open(pums_extract_10_000, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char=",")
file_obj.close()
self.assertFalse(dp_count.has_error())
self.show_release_result(dp_count.get_release_dict())
# val from local machine: 2.9957322850627124
self.assertTrue(dp_count.accuracy_val > 2.995)
self.assertTrue(dp_count.accuracy_val < 2.996)
# Actual count 10_000
self.assertTrue(dp_count.value > 9_980) # should be well within range
final_dict = dp_count.get_release_dict()
self.assertIn('description', final_dict)
self.assertIn('text', final_dict['description'])
self.assertIn('html', final_dict['description'])
def test_30_count_valid_another_spec(self):
"""(30) Run DP Count on another valid spec"""
msgt(self.test_30_count_valid_another_spec.__doc__)
spec_props = {'variable': 'TypingSpeed',
'col_index': 5,
'statistic': astatic.DP_COUNT,
'dataset_size': 183,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_99,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': '62',
'variable_info': {'min': 1,
'max': 61,
'type': pstatic.VAR_TYPE_FLOAT},
}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 20)]
# File object
#
eye_fatigue_filepath = join(TEST_DATA_DIR, 'Fatigue_data.tab')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(eye_fatigue_filepath))
file_obj = open(eye_fatigue_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char="\t")
file_obj.close()
self.assertFalse(dp_count.has_error())
self.show_release_result(dp_count.get_release_dict())
# (test has wide accuracy latitude)
self.assertTrue(dp_count.accuracy_val > 4.4)
self.assertTrue(dp_count.accuracy_val < 4.8)
# Actual count 184
self.assertTrue(dp_count.value > 170) # should be well within range
final_dict = dp_count.get_release_dict()
self.assertIn('description', final_dict)
self.assertIn('text', final_dict['description'])
self.assertIn('html', final_dict['description'])
def test_40_count_valid_str_spec(self):
"""(40) Run DP Count string"""
msgt(self.test_40_count_valid_str_spec.__doc__)
spec_props = {'variable': 'Subject',
'col_index': 0,
'statistic': astatic.DP_COUNT,
'dataset_size': 183,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_95,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': 'ac',
'variable_info': {'type': pstatic.VAR_TYPE_CATEGORICAL},
}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
self.assertTrue(dp_count.is_chain_valid())
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
self.assertFalse(dp_count.has_error())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 20)]
# File object
#
eye_fatigue_filepath = join(TEST_DATA_DIR, 'Fatigue_data.tab')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(eye_fatigue_filepath))
file_obj = open(eye_fatigue_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char="\t")
file_obj.close()
self.assertFalse(dp_count.has_error())
# val from local machine: 4.6051702036798
# self.assertTrue(dp_count.accuracy_val > 4.5)
# self.assertTrue(dp_count.accuracy_val < 4.7)
# Actual count 184
self.assertTrue(dp_count.value > 170) # should be well within range
self.show_release_result(dp_count.get_release_dict())
# (test has wide accuracy latitude)
self.assertTrue(dp_count.accuracy_val > 2)
self.assertTrue(dp_count.accuracy_val < 4)
final_dict = dp_count.get_release_dict()
self.assertIn('description', final_dict)
self.assertIn('text', final_dict['description'])
self.assertIn('html', final_dict['description'])
def test_50_count_missing_vals_str(self):
"""(50) Run DP Count string"""
msgt(self.test_50_count_missing_vals_str.__doc__)
xspec_props = {'variable': 'gender',
'col_index': 4,
'statistic': astatic.DP_COUNT,
'dataset_size': 1_000,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_95,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': 'Genderfluid',
'variable_info': {'type': pstatic.VAR_TYPE_CATEGORICAL},
}
# right from UI
spec_props = {'error': '', 'label': 'gender', 'locked': False,
'epsilon': 1.0, 'delta': 0.0, 'cl': 0.95,
'variable': 'gender', 'statistic': 'count',
'fixed_value': 'male', 'handle_as_fixed': True,
'missing_values_handling': 'insert_fixed', 'dataset_size': 1000,
'variable_info': {'name': 'gender', 'type': 'Categorical',
'label': 'gender', 'selected': True,
'categories': ['Genderfluid'], 'sort_order': 4}, 'col_index': 4}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
self.assertTrue(dp_count.is_chain_valid())
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 28)]
# File object
#
bonabo_filepath = join(TEST_DATA_DIR, 'bonabo MOCK_DATA.csv')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(bonabo_filepath))
file_obj = open(bonabo_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char=",")
file_obj.close()
self.assertFalse(dp_count.has_error())
# val from local machine: 4.6051702036798
# self.assertTrue(dp_count.accuracy_val > 4.5)
# self.assertTrue(dp_count.accuracy_val < 4.7)
# Actual count 184
self.assertTrue(dp_count.value > 970) # should be well within range
def test_60_count_missing_vals_bool(self):
"""(60) Run DP Count bool"""
msgt(self.test_60_count_missing_vals_bool.__doc__)
spec_props = {'variable': 'Boolean2',
'col_index': 8,
'statistic': astatic.DP_COUNT,
'dataset_size': 1_000,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_95,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': 'true',
'variable_info': {'type': pstatic.VAR_TYPE_BOOLEAN},
}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
self.assertTrue(dp_count.is_chain_valid())
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 28)]
# File object
#
bonabo_filepath = join(TEST_DATA_DIR, 'bonabo MOCK_DATA.csv')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(bonabo_filepath))
file_obj = open(bonabo_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char=",")
file_obj.close()
self.assertFalse(dp_count.has_error())
# val from local machine: 4.6051702036798
# self.assertTrue(dp_count.accuracy_val > 4.5)
# self.assertTrue(dp_count.accuracy_val < 4.7)
# Actual count 184
self.assertTrue(dp_count.value > 970) # should be well within range
def show_release_result(self, release_dict:{}):
"""print the result to the screen"""
print(json.dumps(release_dict, indent=4))
|
[
"os.path.abspath",
"opendp_apps.model_helpers.msg_util.msgt",
"os.path.dirname",
"json.dumps",
"opendp_apps.analysis.tools.dp_count_spec.DPCountSpec",
"os.path.isfile",
"os.path.join"
] |
[((86, 103), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (93, 103), False, 'from os.path import abspath, dirname, isfile, join\n'), ((749, 764), 'opendp_apps.analysis.tools.dp_count_spec.DPCountSpec', 'DPCountSpec', (['{}'], {}), '({})\n', (760, 764), False, 'from opendp_apps.analysis.tools.dp_count_spec import DPCountSpec\n'), ((957, 1000), 'opendp_apps.model_helpers.msg_util.msgt', 'msgt', (['self.test_10_count_valid_spec.__doc__'], {}), '(self.test_10_count_valid_spec.__doc__)\n', (961, 1000), False, 'from opendp_apps.model_helpers.msg_util import msgt\n'), ((1633, 1656), 'opendp_apps.analysis.tools.dp_count_spec.DPCountSpec', 'DPCountSpec', (['spec_props'], {}), '(spec_props)\n', (1644, 1656), False, 'from opendp_apps.analysis.tools.dp_count_spec import DPCountSpec\n'), ((2107, 2146), 'os.path.join', 'join', (['TEST_DATA_DIR', '"""Fatigue_data.tab"""'], {}), "(TEST_DATA_DIR, 'Fatigue_data.tab')\n", (2111, 2146), False, 'from os.path import abspath, dirname, isfile, join\n'), ((2860, 2903), 'opendp_apps.model_helpers.msg_util.msgt', 'msgt', (['self.test_20_count_valid_spec.__doc__'], {}), '(self.test_20_count_valid_spec.__doc__)\n', (2864, 2903), False, 'from opendp_apps.model_helpers.msg_util import msgt\n'), ((3547, 3570), 'opendp_apps.analysis.tools.dp_count_spec.DPCountSpec', 'DPCountSpec', (['spec_props'], {}), '(spec_props)\n', (3558, 3570), False, 'from opendp_apps.analysis.tools.dp_count_spec import DPCountSpec\n'), ((4085, 4129), 'os.path.join', 'join', (['TEST_DATA_DIR', '"""PUMS5extract10000.csv"""'], {}), "(TEST_DATA_DIR, 'PUMS5extract10000.csv')\n", (4089, 4129), False, 'from os.path import abspath, dirname, isfile, join\n'), ((5130, 5181), 'opendp_apps.model_helpers.msg_util.msgt', 'msgt', (['self.test_30_count_valid_another_spec.__doc__'], {}), '(self.test_30_count_valid_another_spec.__doc__)\n', (5134, 5181), False, 'from opendp_apps.model_helpers.msg_util import msgt\n'), ((5827, 5850), 'opendp_apps.analysis.tools.dp_count_spec.DPCountSpec', 'DPCountSpec', (['spec_props'], {}), '(spec_props)\n', (5838, 5850), False, 'from opendp_apps.analysis.tools.dp_count_spec import DPCountSpec\n'), ((6302, 6341), 'os.path.join', 'join', (['TEST_DATA_DIR', '"""Fatigue_data.tab"""'], {}), "(TEST_DATA_DIR, 'Fatigue_data.tab')\n", (6306, 6341), False, 'from os.path import abspath, dirname, isfile, join\n'), ((7308, 7355), 'opendp_apps.model_helpers.msg_util.msgt', 'msgt', (['self.test_40_count_valid_str_spec.__doc__'], {}), '(self.test_40_count_valid_str_spec.__doc__)\n', (7312, 7355), False, 'from opendp_apps.model_helpers.msg_util import msgt\n'), ((7902, 7925), 'opendp_apps.analysis.tools.dp_count_spec.DPCountSpec', 'DPCountSpec', (['spec_props'], {}), '(spec_props)\n', (7913, 7925), False, 'from opendp_apps.analysis.tools.dp_count_spec import DPCountSpec\n'), ((8475, 8514), 'os.path.join', 'join', (['TEST_DATA_DIR', '"""Fatigue_data.tab"""'], {}), "(TEST_DATA_DIR, 'Fatigue_data.tab')\n", (8479, 8514), False, 'from os.path import abspath, dirname, isfile, join\n'), ((9639, 9688), 'opendp_apps.model_helpers.msg_util.msgt', 'msgt', (['self.test_50_count_missing_vals_str.__doc__'], {}), '(self.test_50_count_missing_vals_str.__doc__)\n', (9643, 9688), False, 'from opendp_apps.model_helpers.msg_util import msgt\n'), ((10865, 10888), 'opendp_apps.analysis.tools.dp_count_spec.DPCountSpec', 'DPCountSpec', (['spec_props'], {}), '(spec_props)\n', (10876, 10888), False, 'from opendp_apps.analysis.tools.dp_count_spec import DPCountSpec\n'), ((11386, 11429), 'os.path.join', 'join', (['TEST_DATA_DIR', '"""bonabo MOCK_DATA.csv"""'], {}), "(TEST_DATA_DIR, 'bonabo MOCK_DATA.csv')\n", (11390, 11429), False, 'from os.path import abspath, dirname, isfile, join\n'), ((12120, 12170), 'opendp_apps.model_helpers.msg_util.msgt', 'msgt', (['self.test_60_count_missing_vals_bool.__doc__'], {}), '(self.test_60_count_missing_vals_bool.__doc__)\n', (12124, 12170), False, 'from opendp_apps.model_helpers.msg_util import msgt\n'), ((12718, 12741), 'opendp_apps.analysis.tools.dp_count_spec.DPCountSpec', 'DPCountSpec', (['spec_props'], {}), '(spec_props)\n', (12729, 12741), False, 'from opendp_apps.analysis.tools.dp_count_spec import DPCountSpec\n'), ((13239, 13282), 'os.path.join', 'join', (['TEST_DATA_DIR', '"""bonabo MOCK_DATA.csv"""'], {}), "(TEST_DATA_DIR, 'bonabo MOCK_DATA.csv')\n", (13243, 13282), False, 'from os.path import abspath, dirname, isfile, join\n'), ((142, 162), 'os.path.dirname', 'dirname', (['CURRENT_DIR'], {}), '(CURRENT_DIR)\n', (149, 162), False, 'from os.path import abspath, dirname, isfile, join\n'), ((2233, 2261), 'os.path.isfile', 'isfile', (['eye_fatigue_filepath'], {}), '(eye_fatigue_filepath)\n', (2239, 2261), False, 'from os.path import abspath, dirname, isfile, join\n'), ((4216, 4243), 'os.path.isfile', 'isfile', (['pums_extract_10_000'], {}), '(pums_extract_10_000)\n', (4222, 4243), False, 'from os.path import abspath, dirname, isfile, join\n'), ((6428, 6456), 'os.path.isfile', 'isfile', (['eye_fatigue_filepath'], {}), '(eye_fatigue_filepath)\n', (6434, 6456), False, 'from os.path import abspath, dirname, isfile, join\n'), ((8601, 8629), 'os.path.isfile', 'isfile', (['eye_fatigue_filepath'], {}), '(eye_fatigue_filepath)\n', (8607, 8629), False, 'from os.path import abspath, dirname, isfile, join\n'), ((11516, 11539), 'os.path.isfile', 'isfile', (['bonabo_filepath'], {}), '(bonabo_filepath)\n', (11522, 11539), False, 'from os.path import abspath, dirname, isfile, join\n'), ((13369, 13392), 'os.path.isfile', 'isfile', (['bonabo_filepath'], {}), '(bonabo_filepath)\n', (13375, 13392), False, 'from os.path import abspath, dirname, isfile, join\n'), ((13992, 14026), 'json.dumps', 'json.dumps', (['release_dict'], {'indent': '(4)'}), '(release_dict, indent=4)\n', (14002, 14026), False, 'import json\n')]
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from func import linear
from rnns import cell as cell
class gru(cell.Cell):
"""The Gated Recurrent Unit."""
def __init__(self, d, ln=False, scope='gru'):
super(gru, self).__init__(d, ln=ln, scope=scope)
def get_init_state(self, shape=None, x=None, scope=None):
return self._get_init_state(
self.d, shape=shape, x=x, scope=scope)
def fetch_states(self, x):
with tf.variable_scope(
"fetch_state_{}".format(self.scope or "gru")):
g = linear(x, self.d * 2,
bias=False, ln=self.ln, scope="gate_x")
h = linear(x, self.d,
bias=False, ln=self.ln, scope="hide_x")
return g, h
def __call__(self, h_, x):
# h_: the previous hidden state
# x_g/x: the current input state for gate
# x_h/x: the current input state for hidden
"""
z = sigmoid(h_, x)
r = sigmoid(h_, x)
h' = tanh(x, r * h_)
h = z * h_ + (1. - z) * h'
"""
with tf.variable_scope(
"cell_{}".format(self.scope or "gru")):
x_g, x_h = x
h_g = linear(h_, self.d * 2,
ln=self.ln, scope="gate_h")
z, r = tf.split(
tf.sigmoid(x_g + h_g), 2, -1)
h_h = linear(h_ * r, self.d,
ln=self.ln, scope="hide_h")
h = tf.tanh(x_h + h_h)
h = z * h_ + (1. - z) * h
return h
|
[
"tensorflow.tanh",
"func.linear",
"tensorflow.sigmoid"
] |
[((668, 729), 'func.linear', 'linear', (['x', '(self.d * 2)'], {'bias': '(False)', 'ln': 'self.ln', 'scope': '"""gate_x"""'}), "(x, self.d * 2, bias=False, ln=self.ln, scope='gate_x')\n", (674, 729), False, 'from func import linear\n'), ((769, 826), 'func.linear', 'linear', (['x', 'self.d'], {'bias': '(False)', 'ln': 'self.ln', 'scope': '"""hide_x"""'}), "(x, self.d, bias=False, ln=self.ln, scope='hide_x')\n", (775, 826), False, 'from func import linear\n'), ((1334, 1384), 'func.linear', 'linear', (['h_', '(self.d * 2)'], {'ln': 'self.ln', 'scope': '"""gate_h"""'}), "(h_, self.d * 2, ln=self.ln, scope='gate_h')\n", (1340, 1384), False, 'from func import linear\n'), ((1504, 1554), 'func.linear', 'linear', (['(h_ * r)', 'self.d'], {'ln': 'self.ln', 'scope': '"""hide_h"""'}), "(h_ * r, self.d, ln=self.ln, scope='hide_h')\n", (1510, 1554), False, 'from func import linear\n'), ((1596, 1614), 'tensorflow.tanh', 'tf.tanh', (['(x_h + h_h)'], {}), '(x_h + h_h)\n', (1603, 1614), True, 'import tensorflow as tf\n'), ((1455, 1476), 'tensorflow.sigmoid', 'tf.sigmoid', (['(x_g + h_g)'], {}), '(x_g + h_g)\n', (1465, 1476), True, 'import tensorflow as tf\n')]
|
import json
import os
import tempfile
from unittest import mock
from remeha import read_config, FileLogger
from remeha_core import Frame
from tests.test_base import TestBase
class TestRemeha(TestBase):
raw_test_data = bytearray([0x02, 0x01, 0xfe, 0x06, 0x48, 0x02, 0x01, 0xa2,
0x12, 0x00, 0x0a, 0x80, 0xf3, 0xc2, 0x01, 0xfc,
0x12, 0x00, 0x80, 0x9c, 0x0e, 0xd1, 0x06, 0x8e,
0x12, 0x88, 0x13, 0x98, 0x08, 0x68, 0x09, 0x6a,
0x09, 0x3a, 0x8e, 0x12, 0x47, 0x45, 0x00, 0x64,
0x47, 0x00, 0x00, 0x13, 0xc6, 0x40, 0x05, 0x03,
0xff, 0xff, 0x1e, 0x30, 0x0f, 0x04, 0xff, 0xff,
0x00, 0xc0, 0x4e, 0x12, 0x00, 0x00, 0x00, 0x00,
0x80, 0x47, 0x03, 0x40, 0x35, 0x00, 0x00, 0x17,
0xef, 0x03])
def setUp(self):
self.test_config_directory = tempfile.TemporaryDirectory()
def tearDown(self):
self.test_config_directory.cleanup()
def test_read_config(self):
test_config_path = os.path.join(self.test_config_directory.name, 'test_config.json')
test_config = open(test_config_path, mode='w+')
with mock.patch.dict('os.environ', {'REMEHA_CONF': test_config.name}):
test_config.write('{ "database_logger": { "host": "testserver.local", "user_name": "database_user", "password": "<PASSWORD>" } }')
test_config.close()
config = read_config()
assert 'database_logger' in config
def test_read_config_does_not_crash_on_unreadable_config(self):
test_config_path = os.path.join(self.test_config_directory.name, 'test_config.json')
test_config = open(test_config_path, mode='w+')
with mock.patch.dict('os.environ', {'REMEHA_CONF': test_config.name}):
test_config.write('{ "database_logger": { "host": "testserver.local", "user_name": "database_user", "password": "<PASSWORD>" } ')
test_config.close()
config = read_config()
assert config is None
def test_read_default_config_if_REMEHA_CONF_not_set(self):
with mock.patch.dict('os.environ', clear=True):
config = read_config()
assert config is not None
def test_filelogger_does_nothing_if_configured_off(self):
with mock.patch('remeha.csv') as csv_mock:
file_logger = FileLogger(None, None)
file_logger.log_data(self.raw_test_data)
csv_mock.writer.assert_not_called()
def test_filelogger_uses_filename_if_provided(self):
with mock.patch('remeha.csv') as csv_mock:
with tempfile.TemporaryDirectory() as temp_dir:
file_name = temp_dir + '/test.csv'
file_logger = FileLogger(None, file_name)
file_logger.log_data(Frame(frame_data=TestRemeha.raw_test_data))
file_logger.close()
assert os.path.exists(file_name)
csv_mock.writer.assert_called()
def test_filelogger_uses_config_path_if_provided(self):
with tempfile.TemporaryDirectory() as temp_dir:
file_name = temp_dir + '/test.csv'
file_logger = FileLogger(json.loads('{"enabled": true, "path": "%s"}' % file_name), None)
file_logger.log_data(Frame(frame_data=TestRemeha.raw_test_data))
file_logger.close()
assert os.path.exists(file_name)
def test_filelogger_no_file_logging_if_disabled_in_config(self):
with tempfile.TemporaryDirectory() as temp_dir:
file_name = temp_dir + '/test.csv'
file_logger = FileLogger(json.loads('{"enabled": false, "path": "%s"}' % file_name), None)
file_logger.log_data(Frame(frame_data=TestRemeha.raw_test_data))
file_logger.close()
assert not os.path.exists(file_name)
def test_filelogger_commandline_parameter_overwrites_config_path(self):
with tempfile.TemporaryDirectory() as temp_dir:
expected_file_name = temp_dir + '/test.csv'
not_expected_file_name = temp_dir + '/test2.csv'
file_logger = FileLogger(json.loads('{"enabled": true, "path": "%s"}' % not_expected_file_name), expected_file_name)
file_logger.log_data(Frame(frame_data=TestRemeha.raw_test_data))
file_logger.close()
assert not os.path.exists(not_expected_file_name)
assert os.path.exists(expected_file_name)
|
[
"tempfile.TemporaryDirectory",
"json.loads",
"os.path.exists",
"unittest.mock.patch.dict",
"unittest.mock.patch",
"remeha.FileLogger",
"remeha.read_config",
"remeha_core.Frame",
"os.path.join"
] |
[((1019, 1048), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1046, 1048), False, 'import tempfile\n'), ((1179, 1244), 'os.path.join', 'os.path.join', (['self.test_config_directory.name', '"""test_config.json"""'], {}), "(self.test_config_directory.name, 'test_config.json')\n", (1191, 1244), False, 'import os\n'), ((1729, 1794), 'os.path.join', 'os.path.join', (['self.test_config_directory.name', '"""test_config.json"""'], {}), "(self.test_config_directory.name, 'test_config.json')\n", (1741, 1794), False, 'import os\n'), ((1314, 1378), 'unittest.mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', "{'REMEHA_CONF': test_config.name}"], {}), "('os.environ', {'REMEHA_CONF': test_config.name})\n", (1329, 1378), False, 'from unittest import mock\n'), ((1576, 1589), 'remeha.read_config', 'read_config', ([], {}), '()\n', (1587, 1589), False, 'from remeha import read_config, FileLogger\n'), ((1864, 1928), 'unittest.mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', "{'REMEHA_CONF': test_config.name}"], {}), "('os.environ', {'REMEHA_CONF': test_config.name})\n", (1879, 1928), False, 'from unittest import mock\n'), ((2125, 2138), 'remeha.read_config', 'read_config', ([], {}), '()\n', (2136, 2138), False, 'from remeha import read_config, FileLogger\n'), ((2246, 2287), 'unittest.mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""'], {'clear': '(True)'}), "('os.environ', clear=True)\n", (2261, 2287), False, 'from unittest import mock\n'), ((2310, 2323), 'remeha.read_config', 'read_config', ([], {}), '()\n', (2321, 2323), False, 'from remeha import read_config, FileLogger\n'), ((2434, 2458), 'unittest.mock.patch', 'mock.patch', (['"""remeha.csv"""'], {}), "('remeha.csv')\n", (2444, 2458), False, 'from unittest import mock\n'), ((2498, 2520), 'remeha.FileLogger', 'FileLogger', (['None', 'None'], {}), '(None, None)\n', (2508, 2520), False, 'from remeha import read_config, FileLogger\n'), ((2693, 2717), 'unittest.mock.patch', 'mock.patch', (['"""remeha.csv"""'], {}), "('remeha.csv')\n", (2703, 2717), False, 'from unittest import mock\n'), ((3188, 3217), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3215, 3217), False, 'import tempfile\n'), ((3508, 3533), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (3522, 3533), False, 'import os\n'), ((3617, 3646), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3644, 3646), False, 'import tempfile\n'), ((4058, 4087), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (4085, 4087), False, 'import tempfile\n'), ((4537, 4571), 'os.path.exists', 'os.path.exists', (['expected_file_name'], {}), '(expected_file_name)\n', (4551, 4571), False, 'import os\n'), ((2748, 2777), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2775, 2777), False, 'import tempfile\n'), ((2872, 2899), 'remeha.FileLogger', 'FileLogger', (['None', 'file_name'], {}), '(None, file_name)\n', (2882, 2899), False, 'from remeha import read_config, FileLogger\n'), ((3040, 3065), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (3054, 3065), False, 'import os\n'), ((3315, 3372), 'json.loads', 'json.loads', (['(\'{"enabled": true, "path": "%s"}\' % file_name)'], {}), '(\'{"enabled": true, "path": "%s"}\' % file_name)\n', (3325, 3372), False, 'import json\n'), ((3413, 3455), 'remeha_core.Frame', 'Frame', ([], {'frame_data': 'TestRemeha.raw_test_data'}), '(frame_data=TestRemeha.raw_test_data)\n', (3418, 3455), False, 'from remeha_core import Frame\n'), ((3744, 3802), 'json.loads', 'json.loads', (['(\'{"enabled": false, "path": "%s"}\' % file_name)'], {}), '(\'{"enabled": false, "path": "%s"}\' % file_name)\n', (3754, 3802), False, 'import json\n'), ((3843, 3885), 'remeha_core.Frame', 'Frame', ([], {'frame_data': 'TestRemeha.raw_test_data'}), '(frame_data=TestRemeha.raw_test_data)\n', (3848, 3885), False, 'from remeha_core import Frame\n'), ((3942, 3967), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (3956, 3967), False, 'import os\n'), ((4255, 4325), 'json.loads', 'json.loads', (['(\'{"enabled": true, "path": "%s"}\' % not_expected_file_name)'], {}), '(\'{"enabled": true, "path": "%s"}\' % not_expected_file_name)\n', (4265, 4325), False, 'import json\n'), ((4380, 4422), 'remeha_core.Frame', 'Frame', ([], {'frame_data': 'TestRemeha.raw_test_data'}), '(frame_data=TestRemeha.raw_test_data)\n', (4385, 4422), False, 'from remeha_core import Frame\n'), ((4479, 4517), 'os.path.exists', 'os.path.exists', (['not_expected_file_name'], {}), '(not_expected_file_name)\n', (4493, 4517), False, 'import os\n'), ((2937, 2979), 'remeha_core.Frame', 'Frame', ([], {'frame_data': 'TestRemeha.raw_test_data'}), '(frame_data=TestRemeha.raw_test_data)\n', (2942, 2979), False, 'from remeha_core import Frame\n')]
|
from talon import Context
ctx = Context()
ctx.matches = r"""
tag: user.vim_ultisnips
mode: user.python
mode: command
and code.language: python
"""
# spoken name -> snippet name
ultisnips_snippets = {
"header": "#!",
"if main": "ifmain",
"for loop": "for",
"class": "class",
"function": "def",
"method": "deff",
"class method": "defc",
"static method": "defs",
"from": "from",
"if": "if",
"if else": "ife",
"if if else": "ifee",
"try": "try",
"try except": "trye",
"finally": "tryf",
"trip string": '"',
"trip tick": "'",
}
private_snippets = {
"print success": "psuccess",
"print fail": "pfail",
"dick string": "dstr",
"dick format string": "dfstr",
"new arg parser": "argparse",
"add argument": "narg",
"dock param": "dockparam",
}
ctx.lists["user.snippets"] = {**ultisnips_snippets, **private_snippets}
|
[
"talon.Context"
] |
[((33, 42), 'talon.Context', 'Context', ([], {}), '()\n', (40, 42), False, 'from talon import Context\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# DSA nonce recovery from repeated nonce
#
# Cryptanalytic MVP award.
#
# This attack (in an elliptic curve group) broke the PS3. It is a great,
# great attack.
#
# In this file:
#
# https://cryptopals.com/static/challenge-data/44.txt
#
# find a collection of DSA-signed messages. (NB: each msg has a trailing
# space.)
#
# These were signed under the following pubkey:
#
# y = 2d026f4bf30195ede3a088da85e398ef869611d0f68f07
# 13d51c9c1a3a26c95105d915e2d8cdf26d056b86b8a7b8
# 5519b1c23cc3ecdc6062650462e3063bd179c2a6581519
# f674a61f1d89a1fff27171ebc1b93d4dc57bceb7ae2430
# f98a6a4d83d8279ee65d71c1203d2c96d65ebbf7cce9d3
# 2971c3de5084cce04a2e147821
#
# (using the same domain parameters as the previous exercise)
#
# It should not be hard to find the messages for which we have accidentally
# used a repeated "k". Given a pair of such messages, you can discover the "k"
# we used with the following formula:
#
# (m1 - m2)
# k = --------- mod q
# (s1 - s2)
#
# 9th Grade Math: Study It!
#
# If you want to demystify this, work out that equation from the original
# DSA equations.
#
# Basic cyclic group math operations want to screw you.
#
# Remember all this math is mod q; s2 may be larger than s1, for instance,
# which isn't a problem if you're doing the subtraction mod q. If you're
# like me, you'll definitely lose an hour to forgetting a paren or a mod q.
# (And don't forget that modular inverse function!)
#
# What's my private key? Its SHA-1 (from hex) is:
#
# ca8f6f7c66fa362d40760d135b763eb8527d3d52
#
import inspect
import os
import sys
from itertools import combinations
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0)))))
from util.dsa import G, P, Q
from util.loader import loader
from util.misc import invmod
from util.sha1 import SHA1
from util.text import from_bytes, to_bytes, to_hexstring
def recover_dsa_privkey(k, r, s, z, p=P, q=Q, g=G):
k_inv = invmod(k, q)
if k_inv is None:
return None
x = (((((s * k) % q) - z) % q) * invmod(r, q)) % q
# This check isn't necessary. :)
# from util.misc import modexp
#
# sk, rk = (k_inv * (z + x * r)) % q, modexp(g, k, p) % q
# if s == sk and r == rk:
# return x
return x
def main():
# NOTE BEFORE STARTING: Do not be fooled! Signatures are created using
# private keys, notwithstanding the (perhaps intentionally misleading?)
# wording in this and the previous challenge that might imply otherwise.
lines = loader("44.txt", lambda l: l.rstrip("\n").split(": "))
msgs = []
for i in range(0, len(lines), 4):
block = lines[i : i + 4]
# After the rstrip() and split() above, a block looks like:
#
# [["msg", "Listen for me, you better listen for me now. "],
# ["s", "1267396447369736888040262262183731677867615804316"],
# ["r", "1105520928110492191417703162650245113664610474875"],
# ["m", "a4db3de27e2db3e5ef085ced2bced91b82e0df19"]]
#
# We have a message, the components of a DSA signature, and the SHA1
# hash of the message. Everything here's a `str` at the moment, so
# we'll transform the values.
#
# There's an error in "m" for one of the blocks in the challenge data,
# but we can just calculate the hash ourselves.
block[0][1] = block[0][1].encode()
block[1][1] = int(block[1][1])
block[2][1] = int(block[2][1])
block[3][1] = from_bytes(SHA1(block[0][1]).digest())
msgs.append({data[0]: data[1] for data in block})
print(f"Loaded {len(msgs)} DSA-signed messages.")
print("Recovering private key from the first repeated nonce we detect.")
for msg1, msg2 in combinations(msgs, 2):
if msg1["r"] != msg2["r"]:
continue
m1, m2 = msg1["m"], msg2["m"]
s1, s2 = msg1["s"], msg2["s"]
k = (((m1 - m2) % Q) * invmod(s1 - s2, Q)) % Q
privkey = recover_dsa_privkey(k, msg1["r"], s1, m1)
digest = SHA1(to_hexstring(to_bytes(privkey))).hexdigest()
assert digest == "ca8f6f7c66fa362d40760d135b763eb8527d3d52"
print()
print("Recovered key:", privkey)
break
else:
print("Failed to recover key!")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
# Output:
#
# Loaded 11 DSA-signed messages.
# Recovering private key from the first repeated nonce we detect.
#
# Recovered key: 1379952329417023174824742221952501647027600451162
#
|
[
"util.text.to_bytes",
"util.sha1.SHA1",
"inspect.getfile",
"itertools.combinations",
"util.misc.invmod"
] |
[((2037, 2049), 'util.misc.invmod', 'invmod', (['k', 'q'], {}), '(k, q)\n', (2043, 2049), False, 'from util.misc import invmod\n'), ((3850, 3871), 'itertools.combinations', 'combinations', (['msgs', '(2)'], {}), '(msgs, 2)\n', (3862, 3871), False, 'from itertools import combinations\n'), ((2130, 2142), 'util.misc.invmod', 'invmod', (['r', 'q'], {}), '(r, q)\n', (2136, 2142), False, 'from util.misc import invmod\n'), ((1766, 1793), 'inspect.getfile', 'inspect.getfile', (['(lambda : 0)'], {}), '(lambda : 0)\n', (1781, 1793), False, 'import inspect\n'), ((4038, 4056), 'util.misc.invmod', 'invmod', (['(s1 - s2)', 'Q'], {}), '(s1 - s2, Q)\n', (4044, 4056), False, 'from util.misc import invmod\n'), ((3608, 3625), 'util.sha1.SHA1', 'SHA1', (['block[0][1]'], {}), '(block[0][1])\n', (3612, 3625), False, 'from util.sha1 import SHA1\n'), ((4158, 4175), 'util.text.to_bytes', 'to_bytes', (['privkey'], {}), '(privkey)\n', (4166, 4175), False, 'from util.text import from_bytes, to_bytes, to_hexstring\n')]
|
import socket
from typing import Union
from urllib.parse import urlparse
from . import types
__all__ = [
'Host',
'Address',
'InvalidHost',
'InvalidIP',
]
class Host:
hostname: str
port: Union[int, None]
username: Union[str, None]
password: Union[str, None]
def __init__(self, netloc):
if isinstance(netloc, Host):
self._load_host(netloc)
elif isinstance(netloc, str):
self._load_str(netloc)
else:
self._load_tuple(netloc)
def _load_tuple(self, netloc):
if len(netloc) == 2:
self.hostname, self.port = netloc
self.username = self.password = None
else:
self.hostname, self.port, self.username, self.password = netloc
def _load_host(self, host):
self.hostname = host.hostname
self.port = host.port
self.username = host.username
self.password = host.password
def _load_str(self, netloc: str):
userinfo, _, host = netloc.rpartition('@')
if host.count(':') == 1 or '[' in host:
hostname, _, port = host.rpartition(':')
port = int(port)
else:
hostname, port = host, None
if hostname.startswith('[') and hostname.endswith(']'):
hostname = hostname[1:-1]
if userinfo:
username, _, password = userinfo.partition(':')
else:
username = password = None
self.hostname = hostname
self.port = port
self.username = username
self.password = password
@property
def host(self):
host = f'[{self.hostname}]' if ':' in self.hostname else self.hostname
if self.port:
host = f'{host}:{self.port}'
return host
def __str__(self):
userinfo = ''
if self.username:
userinfo += self.username
if self.password:
userinfo += ':' + self.password
userinfo += '@'
return userinfo + self.host
class InvalidHost(Exception):
pass
class InvalidIP(Exception):
pass
def get_ip_type(hostname):
if ':' in hostname:
# ipv6
try:
socket.inet_pton(socket.AF_INET6, hostname)
except OSError:
raise InvalidHost(hostname)
return types.AAAA
# ipv4 or domain name
try:
socket.inet_pton(socket.AF_INET, hostname)
except OSError:
# domain name
pass
else:
return types.A
class Address:
def __init__(self, hostinfo: Host, protocol: str, path: str=None):
self.hostinfo = hostinfo
self.protocol = protocol
self.path = path
self.ip_type = get_ip_type(self.hostinfo.hostname)
def __str__(self):
protocol = self.protocol or '-'
host = self.hostinfo.host
path = self.path or ''
return f'{protocol}://{host}{path}'
def __eq__(self, other):
return str(self) == str(other)
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
def copy(self):
return Address(Host(self.hostinfo), self.protocol, self.path)
def to_addr(self):
return self.hostinfo.hostname, self.hostinfo.port
def to_ptr(self):
if self.ip_type is types.A:
return '.'.join(reversed(
self.hostinfo.hostname.split('.'))) + '.in-addr.arpa'
raise InvalidIP(self.hostinfo.hostname)
default_ports = {
'tcp': 53,
'udp': 53,
'tcps': 853,
'https': 443,
}
@classmethod
def parse(cls, value, default_protocol=None, allow_domain=False):
if isinstance(value, Address):
return value.copy()
if '://' not in value:
value = '//' + value
data = urlparse(value, scheme=default_protocol or 'udp')
hostinfo = Host(data.netloc)
if hostinfo.port is None:
hostinfo.port = cls.default_ports.get(data.scheme, 53)
addr = Address(hostinfo, data.scheme, data.path)
if not allow_domain and addr.ip_type is None:
raise InvalidHost(
hostinfo.hostname,
'You may pass `allow_domain=True` to allow domain names.')
return addr
|
[
"socket.inet_pton",
"urllib.parse.urlparse"
] |
[((2383, 2425), 'socket.inet_pton', 'socket.inet_pton', (['socket.AF_INET', 'hostname'], {}), '(socket.AF_INET, hostname)\n', (2399, 2425), False, 'import socket\n'), ((3836, 3885), 'urllib.parse.urlparse', 'urlparse', (['value'], {'scheme': "(default_protocol or 'udp')"}), "(value, scheme=default_protocol or 'udp')\n", (3844, 3885), False, 'from urllib.parse import urlparse\n'), ((2206, 2249), 'socket.inet_pton', 'socket.inet_pton', (['socket.AF_INET6', 'hostname'], {}), '(socket.AF_INET6, hostname)\n', (2222, 2249), False, 'import socket\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2021 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the resolution of conflicting proofs via avalanche."""
import time
from test_framework.avatools import (
create_coinbase_stakes,
gen_proof,
get_ava_p2p_interface,
get_proof_ids,
)
from test_framework.key import ECPubKey
from test_framework.messages import (
AvalancheVote,
AvalancheVoteError,
FromHex,
LegacyAvalancheProof,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error, try_rpc
from test_framework.wallet_util import bytes_to_wif
QUORUM_NODE_COUNT = 16
class AvalancheProofVotingTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.conflicting_proof_cooldown = 100
self.peer_replacement_cooldown = 2000
self.extra_args = [
['-enableavalanche=1', '-enableavalancheproofreplacement=1',
f'-avalancheconflictingproofcooldown={self.conflicting_proof_cooldown}', f'-avalanchepeerreplacementcooldown={self.peer_replacement_cooldown}', '-avacooldown=0'],
]
self.supports_cli = False
# Build a fake quorum of nodes.
def get_quorum(self, node):
quorum = [get_ava_p2p_interface(node)
for _ in range(0, QUORUM_NODE_COUNT)]
for n in quorum:
success = node.addavalanchenode(
n.nodeid,
self.privkey.get_pubkey().get_bytes().hex(),
self.quorum_proof.serialize().hex(),
)
assert success is True
return quorum
def can_find_proof_in_poll(self, hash, response):
found_hash = False
for n in self.quorum:
poll = n.get_avapoll_if_available()
# That node has not received a poll
if poll is None:
continue
# We got a poll, check for the hash and repond
votes = []
for inv in poll.invs:
# Vote yes to everything
r = AvalancheVoteError.ACCEPTED
# Look for what we expect
if inv.hash == hash:
r = response
found_hash = True
votes.append(AvalancheVote(r, inv.hash))
n.send_avaresponse(poll.round, votes, self.privkey)
return found_hash
@staticmethod
def send_proof(from_peer, proof_hex):
proof = FromHex(LegacyAvalancheProof(), proof_hex)
from_peer.send_avaproof(proof)
return proof.proofid
def send_and_check_for_polling(self, peer,
proof_hex, response=AvalancheVoteError.ACCEPTED):
proofid = self.send_proof(peer, proof_hex)
self.wait_until(lambda: self.can_find_proof_in_poll(proofid, response))
def build_conflicting_proof(self, node, sequence):
return node.buildavalancheproof(
sequence, 0, self.privkey_wif, self.conflicting_stakes)
def run_test(self):
node = self.nodes[0]
self.privkey, self.quorum_proof = gen_proof(node)
self.privkey_wif = bytes_to_wif(self.privkey.get_bytes())
self.quorum = self.get_quorum(node)
addrkey0 = node.get_deterministic_priv_key()
blockhash = node.generatetoaddress(10, addrkey0.address)
self.conflicting_stakes = create_coinbase_stakes(
node, blockhash[5:], addrkey0.key)
self.poll_tests(node)
self.update_tests(node)
def poll_tests(self, node):
proof_seq10 = self.build_conflicting_proof(node, 10)
proof_seq20 = self.build_conflicting_proof(node, 20)
proof_seq30 = self.build_conflicting_proof(node, 30)
proof_seq40 = self.build_conflicting_proof(node, 40)
orphan = node.buildavalancheproof(
100, 2000000000, self.privkey_wif, [{
'txid': '0' * 64,
'vout': 0,
'amount': 10e6,
'height': 42,
'iscoinbase': False,
'privatekey': self.privkey_wif,
}]
)
no_stake = node.buildavalancheproof(
200, 2000000000, self.privkey_wif, []
)
# Get the key so we can verify signatures.
avakey = ECPubKey()
avakey.set(bytes.fromhex(node.getavalanchekey()))
self.log.info("Trigger polling from the node...")
peer = get_ava_p2p_interface(node)
mock_time = int(time.time())
node.setmocktime(mock_time)
self.log.info("Check we poll for valid proof")
self.send_and_check_for_polling(peer, proof_seq30)
self.log.info(
"Check we don't poll for subsequent proofs if the cooldown is not elapsed, proof not the favorite")
with node.assert_debug_log(["Not polling the avalanche proof (cooldown-not-elapsed)"]):
peer.send_avaproof(FromHex(LegacyAvalancheProof(), proof_seq20))
self.log.info(
"Check we don't poll for subsequent proofs if the cooldown is not elapsed, proof is the favorite")
with node.assert_debug_log(["Not polling the avalanche proof (cooldown-not-elapsed)"]):
peer.send_avaproof(FromHex(LegacyAvalancheProof(), proof_seq40))
self.log.info(
"Check we poll for conflicting proof if the proof is not the favorite")
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
self.send_and_check_for_polling(
peer, proof_seq20, response=AvalancheVoteError.INVALID)
self.log.info(
"Check we poll for conflicting proof if the proof is the favorite")
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
self.send_and_check_for_polling(peer, proof_seq40)
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
self.log.info("Check we don't poll for orphans")
with node.assert_debug_log(["Not polling the avalanche proof (orphan-proof)"]):
peer.send_avaproof(FromHex(LegacyAvalancheProof(), orphan))
self.log.info("Check we don't poll for proofs that get rejected")
with node.assert_debug_log(["Not polling the avalanche proof (rejected-proof)"]):
peer.send_avaproof(FromHex(LegacyAvalancheProof(), proof_seq10))
self.log.info("Check we don't poll for invalid proofs and get banned")
with node.assert_debug_log(["Misbehaving", "invalid-proof"]):
peer.send_avaproof(FromHex(LegacyAvalancheProof(), no_stake))
peer.wait_for_disconnect()
def update_tests(self, node):
# Restart the node to get rid og in-flight requests
self.restart_node(0)
mock_time = int(time.time())
node.setmocktime(mock_time)
self.quorum = self.get_quorum(node)
peer = get_ava_p2p_interface(node)
proof_seq30 = self.build_conflicting_proof(node, 30)
proof_seq40 = self.build_conflicting_proof(node, 40)
proof_seq50 = self.build_conflicting_proof(node, 50)
proofid_seq30 = FromHex(LegacyAvalancheProof(), proof_seq30).proofid
proofid_seq40 = FromHex(LegacyAvalancheProof(), proof_seq40).proofid
proofid_seq50 = FromHex(LegacyAvalancheProof(), proof_seq50).proofid
node.sendavalancheproof(proof_seq40)
self.wait_until(lambda: proofid_seq40 in get_proof_ids(node))
assert proofid_seq40 in get_proof_ids(node)
assert proofid_seq30 not in get_proof_ids(node)
self.log.info("Test proof acceptance")
def accept_proof(proofid):
self.wait_until(lambda: self.can_find_proof_in_poll(
proofid, response=AvalancheVoteError.ACCEPTED), timeout=5)
return proofid in get_proof_ids(node)
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
self.send_and_check_for_polling(peer, proof_seq30)
# Let the quorum vote for it
self.wait_until(lambda: accept_proof(proofid_seq30))
assert proofid_seq40 not in get_proof_ids(node)
self.log.info("Test the peer replacement rate limit")
# Wait until proof_seq30 is finalized
with node.assert_debug_log([f"Avalanche accepted proof {proofid_seq30:0{64}x}, status 3"]):
self.wait_until(lambda: not self.can_find_proof_in_poll(
proofid_seq30, response=AvalancheVoteError.ACCEPTED))
# Not enough
assert self.conflicting_proof_cooldown < self.peer_replacement_cooldown
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
peer = get_ava_p2p_interface(node)
with node.assert_debug_log(["Not polling the avalanche proof (cooldown-not-elapsed)"]):
self.send_proof(peer, proof_seq50)
mock_time += self.peer_replacement_cooldown
node.setmocktime(mock_time)
self.log.info("Test proof rejection")
self.send_proof(peer, proof_seq50)
self.wait_until(lambda: proofid_seq50 in get_proof_ids(node))
assert proofid_seq40 not in get_proof_ids(node)
def reject_proof(proofid):
self.wait_until(
lambda: self.can_find_proof_in_poll(
proofid, response=AvalancheVoteError.INVALID))
return proofid not in get_proof_ids(node)
self.wait_until(lambda: reject_proof(proofid_seq50))
assert proofid_seq50 not in get_proof_ids(node)
assert proofid_seq40 in get_proof_ids(node)
self.log.info("Test proof invalidation")
def invalidate_proof(proofid):
self.wait_until(
lambda: self.can_find_proof_in_poll(
proofid, response=AvalancheVoteError.INVALID))
return try_rpc(-8, "Proof not found",
node.getrawavalancheproof, f"{proofid:0{64}x}")
self.wait_until(lambda: invalidate_proof(proofid_seq50))
self.log.info("The node will now ignore the invalid proof")
for i in range(5):
with node.assert_debug_log(["received: avaproof"]):
self.send_proof(peer, proof_seq50)
assert_raises_rpc_error(-8,
"Proof not found",
node.getrawavalancheproof,
f"{proofid_seq50:0{64}x}")
if __name__ == '__main__':
AvalancheProofVotingTest().main()
|
[
"test_framework.messages.LegacyAvalancheProof",
"test_framework.util.try_rpc",
"test_framework.avatools.gen_proof",
"test_framework.avatools.get_ava_p2p_interface",
"time.time",
"test_framework.messages.AvalancheVote",
"test_framework.avatools.create_coinbase_stakes",
"test_framework.util.assert_raises_rpc_error",
"test_framework.avatools.get_proof_ids",
"test_framework.key.ECPubKey"
] |
[((3281, 3296), 'test_framework.avatools.gen_proof', 'gen_proof', (['node'], {}), '(node)\n', (3290, 3296), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((3561, 3618), 'test_framework.avatools.create_coinbase_stakes', 'create_coinbase_stakes', (['node', 'blockhash[5:]', 'addrkey0.key'], {}), '(node, blockhash[5:], addrkey0.key)\n', (3583, 3618), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((4474, 4484), 'test_framework.key.ECPubKey', 'ECPubKey', ([], {}), '()\n', (4482, 4484), False, 'from test_framework.key import ECPubKey\n'), ((4618, 4645), 'test_framework.avatools.get_ava_p2p_interface', 'get_ava_p2p_interface', (['node'], {}), '(node)\n', (4639, 4645), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((7077, 7104), 'test_framework.avatools.get_ava_p2p_interface', 'get_ava_p2p_interface', (['node'], {}), '(node)\n', (7098, 7104), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((8880, 8907), 'test_framework.avatools.get_ava_p2p_interface', 'get_ava_p2p_interface', (['node'], {}), '(node)\n', (8901, 8907), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((1430, 1457), 'test_framework.avatools.get_ava_p2p_interface', 'get_ava_p2p_interface', (['node'], {}), '(node)\n', (1451, 1457), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((2652, 2674), 'test_framework.messages.LegacyAvalancheProof', 'LegacyAvalancheProof', ([], {}), '()\n', (2672, 2674), False, 'from test_framework.messages import AvalancheVote, AvalancheVoteError, FromHex, LegacyAvalancheProof\n'), ((4671, 4682), 'time.time', 'time.time', ([], {}), '()\n', (4680, 4682), False, 'import time\n'), ((6968, 6979), 'time.time', 'time.time', ([], {}), '()\n', (6977, 6979), False, 'import time\n'), ((7669, 7688), 'test_framework.avatools.get_proof_ids', 'get_proof_ids', (['node'], {}), '(node)\n', (7682, 7688), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((7725, 7744), 'test_framework.avatools.get_proof_ids', 'get_proof_ids', (['node'], {}), '(node)\n', (7738, 7744), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((8304, 8323), 'test_framework.avatools.get_proof_ids', 'get_proof_ids', (['node'], {}), '(node)\n', (8317, 8323), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((9338, 9357), 'test_framework.avatools.get_proof_ids', 'get_proof_ids', (['node'], {}), '(node)\n', (9351, 9357), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((9696, 9715), 'test_framework.avatools.get_proof_ids', 'get_proof_ids', (['node'], {}), '(node)\n', (9709, 9715), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((9748, 9767), 'test_framework.avatools.get_proof_ids', 'get_proof_ids', (['node'], {}), '(node)\n', (9761, 9767), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((10026, 10104), 'test_framework.util.try_rpc', 'try_rpc', (['(-8)', '"""Proof not found"""', 'node.getrawavalancheproof', 'f"""{proofid:0{64}x}"""'], {}), "(-8, 'Proof not found', node.getrawavalancheproof, f'{proofid:0{64}x}')\n", (10033, 10104), False, 'from test_framework.util import assert_raises_rpc_error, try_rpc\n'), ((10422, 10526), 'test_framework.util.assert_raises_rpc_error', 'assert_raises_rpc_error', (['(-8)', '"""Proof not found"""', 'node.getrawavalancheproof', 'f"""{proofid_seq50:0{64}x}"""'], {}), "(-8, 'Proof not found', node.getrawavalancheproof,\n f'{proofid_seq50:0{64}x}')\n", (10445, 10526), False, 'from test_framework.util import assert_raises_rpc_error, try_rpc\n'), ((7321, 7343), 'test_framework.messages.LegacyAvalancheProof', 'LegacyAvalancheProof', ([], {}), '()\n', (7341, 7343), False, 'from test_framework.messages import AvalancheVote, AvalancheVoteError, FromHex, LegacyAvalancheProof\n'), ((7398, 7420), 'test_framework.messages.LegacyAvalancheProof', 'LegacyAvalancheProof', ([], {}), '()\n', (7418, 7420), False, 'from test_framework.messages import AvalancheVote, AvalancheVoteError, FromHex, LegacyAvalancheProof\n'), ((7475, 7497), 'test_framework.messages.LegacyAvalancheProof', 'LegacyAvalancheProof', ([], {}), '()\n', (7495, 7497), False, 'from test_framework.messages import AvalancheVote, AvalancheVoteError, FromHex, LegacyAvalancheProof\n'), ((7999, 8018), 'test_framework.avatools.get_proof_ids', 'get_proof_ids', (['node'], {}), '(node)\n', (8012, 8018), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((9577, 9596), 'test_framework.avatools.get_proof_ids', 'get_proof_ids', (['node'], {}), '(node)\n', (9590, 9596), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((2447, 2473), 'test_framework.messages.AvalancheVote', 'AvalancheVote', (['r', 'inv.hash'], {}), '(r, inv.hash)\n', (2460, 2473), False, 'from test_framework.messages import AvalancheVote, AvalancheVoteError, FromHex, LegacyAvalancheProof\n'), ((5106, 5128), 'test_framework.messages.LegacyAvalancheProof', 'LegacyAvalancheProof', ([], {}), '()\n', (5126, 5128), False, 'from test_framework.messages import AvalancheVote, AvalancheVoteError, FromHex, LegacyAvalancheProof\n'), ((5414, 5436), 'test_framework.messages.LegacyAvalancheProof', 'LegacyAvalancheProof', ([], {}), '()\n', (5434, 5436), False, 'from test_framework.messages import AvalancheVote, AvalancheVoteError, FromHex, LegacyAvalancheProof\n'), ((6285, 6307), 'test_framework.messages.LegacyAvalancheProof', 'LegacyAvalancheProof', ([], {}), '()\n', (6305, 6307), False, 'from test_framework.messages import AvalancheVote, AvalancheVoteError, FromHex, LegacyAvalancheProof\n'), ((6522, 6544), 'test_framework.messages.LegacyAvalancheProof', 'LegacyAvalancheProof', ([], {}), '()\n', (6542, 6544), False, 'from test_framework.messages import AvalancheVote, AvalancheVoteError, FromHex, LegacyAvalancheProof\n'), ((6749, 6771), 'test_framework.messages.LegacyAvalancheProof', 'LegacyAvalancheProof', ([], {}), '()\n', (6769, 6771), False, 'from test_framework.messages import AvalancheVote, AvalancheVoteError, FromHex, LegacyAvalancheProof\n'), ((7615, 7634), 'test_framework.avatools.get_proof_ids', 'get_proof_ids', (['node'], {}), '(node)\n', (7628, 7634), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n'), ((9281, 9300), 'test_framework.avatools.get_proof_ids', 'get_proof_ids', (['node'], {}), '(node)\n', (9294, 9300), False, 'from test_framework.avatools import create_coinbase_stakes, gen_proof, get_ava_p2p_interface, get_proof_ids\n')]
|
# -*- coding: utf-8 -*-
'''
Created on 20 июл. 2017 г.
@author: krtkr
'''
import sys
import getopt
from KicadSymGen.draw import Library
from KicadSymGen.generate import Generator
from KicadSymGen.generate import Layout
from KicadSymGen.parse.altera import Max10Reader
from KicadSymGen.parse.altera import Max10Parser
def print_help():
print('max10_generate.py -p <pinouts_path> -d <dcm_file> -l <lib_file>')
if __name__ == '__main__':
verbose = False
pinouts_path = "../docs/max10"
dcm_file_path = './max10.dcm'
lib_file_path = './max10.lib'
try:
opts, args = getopt.getopt(sys.argv[1:], "hvp:d:l:",["pinouts=","dcm=","lib="])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
elif opt == '-v':
verbose = True
elif opt in ("-p", "--pinouts"):
pinouts_path = arg
elif opt in ("-d", "--dcm_file"):
dcm_file_path = arg
elif opt in ("-l", "--lib_file"):
lib_file_path = arg
parse = Max10Parser(list())
layout = Layout()
max10Reader = Max10Reader(pinouts_path)
generator = Generator(max10Reader, parse, layout)
if generator.generate():
print("Done generating, write Library")
library = Library()
library.save(lib_file_path, dcm_file_path, generator.symbols)
print("Done writing Library, finish")
else:
print("Error: failed to generate")
pass
|
[
"getopt.getopt",
"KicadSymGen.generate.Layout",
"sys.exit",
"KicadSymGen.draw.Library",
"KicadSymGen.generate.Generator",
"KicadSymGen.parse.altera.Max10Reader"
] |
[((1154, 1162), 'KicadSymGen.generate.Layout', 'Layout', ([], {}), '()\n', (1160, 1162), False, 'from KicadSymGen.generate import Layout\n'), ((1182, 1207), 'KicadSymGen.parse.altera.Max10Reader', 'Max10Reader', (['pinouts_path'], {}), '(pinouts_path)\n', (1193, 1207), False, 'from KicadSymGen.parse.altera import Max10Reader\n'), ((1224, 1261), 'KicadSymGen.generate.Generator', 'Generator', (['max10Reader', 'parse', 'layout'], {}), '(max10Reader, parse, layout)\n', (1233, 1261), False, 'from KicadSymGen.generate import Generator\n'), ((598, 667), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""hvp:d:l:"""', "['pinouts=', 'dcm=', 'lib=']"], {}), "(sys.argv[1:], 'hvp:d:l:', ['pinouts=', 'dcm=', 'lib='])\n", (611, 667), False, 'import getopt\n'), ((1357, 1366), 'KicadSymGen.draw.Library', 'Library', ([], {}), '()\n', (1364, 1366), False, 'from KicadSymGen.draw import Library\n'), ((725, 736), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (733, 736), False, 'import sys\n'), ((824, 834), 'sys.exit', 'sys.exit', ([], {}), '()\n', (832, 834), False, 'import sys\n')]
|
"""
Module made to merge and process all configs for PromAC into a single nested
dictionary. Ready to be injected into the settings models that PromAC uses.
Copyright © 2020 <NAME> - Licensed under the Apache License 2.0
"""
import os
from box import Box
from loguru import logger
import prometheus_adaptive_cards.config.settings_utils as settings_utils
def _parse_args(args: list[str]) -> Box:
"""Parses arguments into nested dict.
Args:
args (list[str]):
List of all arguments passed to program. Use it like this:
`parse_args(sys.argv[1:])`. Args must start with one or two dashes
and only contain lower case chars, period and underscores.
Returns:
Box:
Lowercased. Box instead of dict. Already nested. Can be used just
like a dictionary. Read more [here](https://github.com/cdgriffith/Box).
Type casting is NOT done here. `box_dots` is `True`.
"""
logger.bind(args_to_parse=args).debug("Parse list of arguments.")
if len(args) % 2 != 0:
raise ValueError("Number of args must be not odd.")
names, values = args[::2], args[1::2]
cli_args_dict = {}
for idx in range(len(names)):
name = names[idx]
value = values[idx]
if name.startswith("--"):
cli_args_dict[name[2:]] = value
elif name.startswith("-"):
cli_args_dict[name[1:]] = value
return Box(settings_utils.unflatten(cli_args_dict), box_dots=True)
def _parse_files(
force_file: str or None = None, lookup_override: list[str] or None = None
) -> dict[str]:
"""Parses config from files and merges them together.
Args:
force_file (str or None, optional):
If set, this location will be the only one checked (in addition to
`.local.`). Should point to arg from CLI or env var. Defaults to `None`.
lookup_override (list[str] or None, optional):
If set, the given list of locations will be used to look for files
instead of the included one. Should generally only be necessary
during unit testing. Defaults to `None`.
Returns:
dict[str]: Represents the merged version of all found YAML files. If no
files have been parsed the returned `dict` will be empty.
"""
logger.bind(force_file=force_file, lookup_override=lookup_override).debug(
"Parse and merge files."
)
if force_file:
logger.debug(f"Only file '{force_file}' is considered.")
locations = settings_utils.generate_locations([force_file])
else:
locations = settings_utils.generate_locations(
lookup_override
or [
f"{os.path.dirname(__file__)}/promac.yml",
"/etc/promac/promac.yml",
]
)
configs = settings_utils.parse_yamls(locations)
settings = {}
if len(configs) > 1:
settings = configs[0]
settings_utils.merge(settings, configs[1:])
elif len(configs) > 0:
settings = configs[0]
return settings
def _parse_env_vars(all_env_vars: dict[str, str]) -> Box:
"""Extracts and transforms given dict of env vars.
Args:
all_env_vars (dict[str, str]): Environment variables.
Returns:
Box:
Lowercased. Box instead of dict. Already nested. Can be used just
like a dictionary. Read more [here](https://github.com/cdgriffith/Box).
Type casting is NOT done here. `box_dots` is `True`.
"""
logger.bind(all_env_vars=all_env_vars).debug("Parse env vars.")
env_vars = {}
for name, value in all_env_vars.items():
if name.startswith("PROMAC__") and len(name) > 8:
env_vars[name[8:].lower().replace("__", ".")] = value
return Box(settings_utils.unflatten(env_vars), box_dots=True)
def _cast_vars(box: Box) -> None:
"""Casts box fields to correct type in-place. No content validation.
Args:
box (Box): Nested Box with `box_dots=True`.
"""
logger.debug("Cast vars.")
settings_utils.cast(box, "logging.structured.custom_serializer", bool)
settings_utils.cast(box, "logging.unstructured.colorize", bool)
settings_utils.cast(box, "server.port", int)
def setup_raw_settings(cli_args: list[str], env: dict[str, str]) -> dict:
"""Creates one single dict that contains all settings for PromAC.
Args:
args (list[str]):
List of all arguments passed to program. Use it like this:
`parse_args(sys.argv[1:])`. Args must start with one or two dashes
and only contain lower case chars, period and underscores.
env (dict[str, str]): Dict with all enviornment variables.
Returns:
dict: Nested dictionary with all settings unvalidated.
"""
logger.debug("Parse CLI args with argparse.")
cli_args_box = _parse_args(cli_args)
logger.debug("Find, parse and merge YAML config files.")
config_file = cli_args_box.get("config_file", os.environ.get("CONFIG_FILE", None))
collected_settings_dict = _parse_files(force_file=config_file)
logger.debug("Extract and parse relevant env vars and merge into settings.")
env_vars_box = _parse_env_vars(env)
_cast_vars(env_vars_box)
settings_utils.merge(collected_settings_dict, env_vars_box.to_dict())
logger.debug("Extract and parse relevant CLI args")
if cli_args_box.get("config_file"):
del cli_args_box["config_file"]
_cast_vars(cli_args_box)
settings_utils.merge(collected_settings_dict, cli_args_box.to_dict())
return collected_settings_dict
|
[
"prometheus_adaptive_cards.config.settings_utils.cast",
"prometheus_adaptive_cards.config.settings_utils.merge",
"os.path.dirname",
"prometheus_adaptive_cards.config.settings_utils.parse_yamls",
"os.environ.get",
"prometheus_adaptive_cards.config.settings_utils.unflatten",
"loguru.logger.bind",
"prometheus_adaptive_cards.config.settings_utils.generate_locations",
"loguru.logger.debug"
] |
[((2855, 2892), 'prometheus_adaptive_cards.config.settings_utils.parse_yamls', 'settings_utils.parse_yamls', (['locations'], {}), '(locations)\n', (2881, 2892), True, 'import prometheus_adaptive_cards.config.settings_utils as settings_utils\n'), ((4057, 4083), 'loguru.logger.debug', 'logger.debug', (['"""Cast vars."""'], {}), "('Cast vars.')\n", (4069, 4083), False, 'from loguru import logger\n'), ((4089, 4159), 'prometheus_adaptive_cards.config.settings_utils.cast', 'settings_utils.cast', (['box', '"""logging.structured.custom_serializer"""', 'bool'], {}), "(box, 'logging.structured.custom_serializer', bool)\n", (4108, 4159), True, 'import prometheus_adaptive_cards.config.settings_utils as settings_utils\n'), ((4164, 4227), 'prometheus_adaptive_cards.config.settings_utils.cast', 'settings_utils.cast', (['box', '"""logging.unstructured.colorize"""', 'bool'], {}), "(box, 'logging.unstructured.colorize', bool)\n", (4183, 4227), True, 'import prometheus_adaptive_cards.config.settings_utils as settings_utils\n'), ((4232, 4276), 'prometheus_adaptive_cards.config.settings_utils.cast', 'settings_utils.cast', (['box', '"""server.port"""', 'int'], {}), "(box, 'server.port', int)\n", (4251, 4276), True, 'import prometheus_adaptive_cards.config.settings_utils as settings_utils\n'), ((4838, 4883), 'loguru.logger.debug', 'logger.debug', (['"""Parse CLI args with argparse."""'], {}), "('Parse CLI args with argparse.')\n", (4850, 4883), False, 'from loguru import logger\n'), ((4930, 4986), 'loguru.logger.debug', 'logger.debug', (['"""Find, parse and merge YAML config files."""'], {}), "('Find, parse and merge YAML config files.')\n", (4942, 4986), False, 'from loguru import logger\n'), ((5146, 5222), 'loguru.logger.debug', 'logger.debug', (['"""Extract and parse relevant env vars and merge into settings."""'], {}), "('Extract and parse relevant env vars and merge into settings.')\n", (5158, 5222), False, 'from loguru import logger\n'), ((5371, 5422), 'loguru.logger.debug', 'logger.debug', (['"""Extract and parse relevant CLI args"""'], {}), "('Extract and parse relevant CLI args')\n", (5383, 5422), False, 'from loguru import logger\n'), ((1451, 1490), 'prometheus_adaptive_cards.config.settings_utils.unflatten', 'settings_utils.unflatten', (['cli_args_dict'], {}), '(cli_args_dict)\n', (1475, 1490), True, 'import prometheus_adaptive_cards.config.settings_utils as settings_utils\n'), ((2480, 2536), 'loguru.logger.debug', 'logger.debug', (['f"""Only file \'{force_file}\' is considered."""'], {}), '(f"Only file \'{force_file}\' is considered.")\n', (2492, 2536), False, 'from loguru import logger\n'), ((2557, 2604), 'prometheus_adaptive_cards.config.settings_utils.generate_locations', 'settings_utils.generate_locations', (['[force_file]'], {}), '([force_file])\n', (2590, 2604), True, 'import prometheus_adaptive_cards.config.settings_utils as settings_utils\n'), ((2976, 3019), 'prometheus_adaptive_cards.config.settings_utils.merge', 'settings_utils.merge', (['settings', 'configs[1:]'], {}), '(settings, configs[1:])\n', (2996, 3019), True, 'import prometheus_adaptive_cards.config.settings_utils as settings_utils\n'), ((3821, 3855), 'prometheus_adaptive_cards.config.settings_utils.unflatten', 'settings_utils.unflatten', (['env_vars'], {}), '(env_vars)\n', (3845, 3855), True, 'import prometheus_adaptive_cards.config.settings_utils as settings_utils\n'), ((5037, 5072), 'os.environ.get', 'os.environ.get', (['"""CONFIG_FILE"""', 'None'], {}), "('CONFIG_FILE', None)\n", (5051, 5072), False, 'import os\n'), ((968, 999), 'loguru.logger.bind', 'logger.bind', ([], {'args_to_parse': 'args'}), '(args_to_parse=args)\n', (979, 999), False, 'from loguru import logger\n'), ((2338, 2405), 'loguru.logger.bind', 'logger.bind', ([], {'force_file': 'force_file', 'lookup_override': 'lookup_override'}), '(force_file=force_file, lookup_override=lookup_override)\n', (2349, 2405), False, 'from loguru import logger\n'), ((3553, 3591), 'loguru.logger.bind', 'logger.bind', ([], {'all_env_vars': 'all_env_vars'}), '(all_env_vars=all_env_vars)\n', (3564, 3591), False, 'from loguru import logger\n'), ((2734, 2759), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2749, 2759), False, 'import os\n')]
|
#!/usr/bin/env python
# SPDX-FileCopyrightText: 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: MIT
import random
import sys
class Operation(object):
def valid (self, a, b):
return True
def result(self, a, b):
raise TypeError
def symbol(self):
raise TypeError
class Addition(Operation):
def __init__(self):
pass
def result(self, a, b):
return a+b
def symbol(self):
return "+"
class Multiplication(Operation):
def __init__(self):
pass
def result(self, a, b):
return a*b
def symbol(self):
return "*"
class Substraction(Operation):
def __init__(self):
pass
def valid(self, a, b):
return a >= b
def result(self, a, b):
return a-b
def symbol(self):
return "-"
class TableGenerator(object):
def __init__(self, used_operation):
self.usedOperation = used_operation
def table(self, a):
result = []
for i in range(1,10+1):
if self.usedOperation.valid(a, i):
result.append((a, i))
return result
class Game(object):
def __init__(self, table_generator, used_operation, combiner_min, combiner_max):
self.tableGenerator = table_generator
self.usedOperation = used_operation
self.combinations = []
for i in range(combiner_min, combiner_max+1):
self.combinations.extend(self.tableGenerator.table(i))
self.quitAnswer = "q"
def run(self):
print("Welcome! To quit the game, answer '%s' to any question" % (self.quitAnswer,))
questions = []
while True:
if len(questions) == 0:
questions = self.combinations[:]
random.shuffle(questions)
next_idx = random.choice(range(0, len(questions)))
question = questions[next_idx]
del questions[next_idx]
continue_playing = self.ask(question)
if not continue_playing:
break
def ask(self, question):
correct_sign = "\u2713 Yes!"
incorrect_sign = "\u2717 No"
a, b = question
correct_answer = self.usedOperation.result(a, b)
while True:
answer = input("%s %s %s = " % (a, self.usedOperation.symbol(), b))
if answer.lower() == self.quitAnswer:
return False
try:
numerical_answer = int(answer)
if numerical_answer == correct_answer:
print(correct_sign)
break
else:
print(incorrect_sign)
except ValueError:
print(incorrect_sign)
return True
if __name__ == "__main__":
def usage():
sys.exit("Usage: %s add|sub|mul NUMBER_FROM_1_TO_10" % (sys.argv[0], ))
if len(sys.argv) != 3:
usage()
operation_arg = sys.argv[1]
max_table_arg = sys.argv[2]
operation_classes = dict()
operation_classes['add'] = Addition
operation_classes['sub'] = Substraction
operation_classes['mul'] = Multiplication
if operation_arg not in operation_classes:
usage()
used_operation = operation_classes[operation_arg]()
table_generator = TableGenerator(used_operation)
try:
max_value = int(max_table_arg)
if max_value < 1 or max_value > 10:
raise ValueError
except ValueError:
usage()
game = Game(table_generator, used_operation, 1, max_value)
try:
game.run()
except KeyboardInterrupt:
print("\n")
|
[
"random.shuffle",
"sys.exit"
] |
[((2795, 2865), 'sys.exit', 'sys.exit', (["('Usage: %s add|sub|mul NUMBER_FROM_1_TO_10' % (sys.argv[0],))"], {}), "('Usage: %s add|sub|mul NUMBER_FROM_1_TO_10' % (sys.argv[0],))\n", (2803, 2865), False, 'import sys\n'), ((1761, 1786), 'random.shuffle', 'random.shuffle', (['questions'], {}), '(questions)\n', (1775, 1786), False, 'import random\n')]
|
from datetime import datetime
from django.test import TestCase, Client
from django.urls import reverse
from students.models.students import Student
from students.models.groups import Group
class TestStudentList(TestCase):
def setUp(self):
# create 2 groups
group1, created = Group.objects.get_or_create(
title="MtM-1")
group2, created = Group.objects.get_or_create(
title="MtM-2")
# create 4 students: 1 for group1 and 3 for group2
Student.students.get_or_create(
first_name="Vitaliy",
last_name="Podoba",
birthday=datetime.today(),
ticket='12345',
student_group=group1)
Student.students.get_or_create(
first_name="John",
last_name="Dobson",
birthday=datetime.today(),
ticket='23456',
student_group=group2)
Student.students.get_or_create(
first_name="Sam",
last_name="Stefenson",
birthday=datetime.today(),
ticket='34567',
student_group=group2)
Student.students.get_or_create(
first_name="Arnold",
last_name="Kidney",
birthday=datetime.today(),
ticket='45678',
student_group=group2)
# remember test browser
self.client = Client()
# remember url to our homepage
self.url = reverse('home')
def test_students_list(self):
# make request to the server to get homepage page
response = self.client.get(self.url)
# print(response.context)
# have we received OK status from the server?
self.assertEqual(response.status_code, 200)
# do we have student name on a page?
self.assertIn('Vitaliy', str(response.content))
# do we have link to student edit form?
self.assertIn(reverse('students_edit',
kwargs={'pk': Student.students.all()[0].id}),
str(response.content))
# ensure we got 3 students, pagination limit is 3
self.assertEqual(len(response.context['students']), 3)
def test_current_group(self):
# # set group1 as currently selected group
group = Group.objects.filter(title="MtM-1")[0]
self.client.cookies['current_group'] = group.id
# # make request to the server to get homepage page
response = self.client.get(self.url)
# # in group1 we have only 1 student
self.assertEqual(len(response.context['students']), 1)
def test_order_by(self):
# # set order by Last Name
response = self.client.get(self.url, {'order_by': 'last_name'})
# # now check if we got proper order
students = response.context['students']
self.assertEqual(students[0].last_name, 'Dobson')
self.assertEqual(students[1].last_name, 'Kidney')
self.assertEqual(students[2].last_name, 'Podoba')
def test_reverse_order_by(self):
# # order students by ticket number in reverse order
response = self.client.get(self.url, {'order_by': 'ticket',
'reverse': '1'})
# # now check if we got proper order
students = response.context['students']
self.assertEqual(students[0].last_name, 'Kidney')
self.assertEqual(students[1].last_name, 'Stefenson')
self.assertEqual(students[2].last_name, 'Dobson')
def test_pagination(self):
# # navigate to second page with students
response = self.client.get(self.url, {'page': '2'})
# self.assertEqual(response.context['is_paginated'], True)
self.assertEqual(len(response.context['students']), 1)
self.assertEqual(response.context['students'][0].last_name, 'Stefenson')
|
[
"students.models.groups.Group.objects.get_or_create",
"datetime.datetime.today",
"students.models.students.Student.students.all",
"django.test.Client",
"django.urls.reverse",
"students.models.groups.Group.objects.filter"
] |
[((300, 342), 'students.models.groups.Group.objects.get_or_create', 'Group.objects.get_or_create', ([], {'title': '"""MtM-1"""'}), "(title='MtM-1')\n", (327, 342), False, 'from students.models.groups import Group\n'), ((382, 424), 'students.models.groups.Group.objects.get_or_create', 'Group.objects.get_or_create', ([], {'title': '"""MtM-2"""'}), "(title='MtM-2')\n", (409, 424), False, 'from students.models.groups import Group\n'), ((1376, 1384), 'django.test.Client', 'Client', ([], {}), '()\n', (1382, 1384), False, 'from django.test import TestCase, Client\n'), ((1444, 1459), 'django.urls.reverse', 'reverse', (['"""home"""'], {}), "('home')\n", (1451, 1459), False, 'from django.urls import reverse\n'), ((2254, 2289), 'students.models.groups.Group.objects.filter', 'Group.objects.filter', ([], {'title': '"""MtM-1"""'}), "(title='MtM-1')\n", (2274, 2289), False, 'from students.models.groups import Group\n'), ((625, 641), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (639, 641), False, 'from datetime import datetime\n'), ((829, 845), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (843, 845), False, 'from datetime import datetime\n'), ((1035, 1051), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1049, 1051), False, 'from datetime import datetime\n'), ((1241, 1257), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1255, 1257), False, 'from datetime import datetime\n'), ((1963, 1985), 'students.models.students.Student.students.all', 'Student.students.all', ([], {}), '()\n', (1983, 1985), False, 'from students.models.students import Student\n')]
|
# NVIDIA
import unittest
from test_bert_batch_1 import *
#from test_bert_batch_7 import *
from test_embeddings_batch_1 import *
from test_encoders_batch_1 import *
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"unittest.main"
] |
[((196, 222), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (209, 222), False, 'import unittest\n')]
|
import click
from apigee import console
from apigee.auth import common_auth_options, gen_auth
from apigee.backups.backups import Backups
# from apigee.cls import OptionEatAll
from apigee.prefix import common_prefix_options
from apigee.silent import common_silent_options
from apigee.verbose import common_verbose_options
APIS_CHOICES = {
'apis',
'keyvaluemaps',
'targetservers',
'caches',
'developers',
'apiproducts',
'apps',
'userroles',
}
@click.group(help='Download configuration files from Apigee that can later be restored.')
def backups():
pass
def _take_snapshot(
username,
password,
mfa_secret,
token,
zonename,
org,
profile,
target_directory,
prefix,
environments,
apis,
**kwargs
):
if not isinstance(apis, set):
apis = set(apis)
Backups(
gen_auth(username, password, mfa_secret, token, zonename),
org,
target_directory,
prefix=prefix,
fs_write=True,
apis=apis,
environments=list(environments),
).take_snapshot()
@backups.command(
help='Downloads and generates local snapshots of specified Apigee resources e.g. API proxies, KVMs, target servers, etc.'
)
@common_auth_options
@common_prefix_options
@common_silent_options
@common_verbose_options
@click.option(
'--target-directory',
type=click.Path(exists=False, dir_okay=True, file_okay=False, resolve_path=False),
required=True,
)
@click.option(
'--apis',
type=click.Choice(APIS_CHOICES, case_sensitive=False),
multiple=True,
default=APIS_CHOICES,
show_default=True,
)
# @click.option('--apis', metavar='LIST', cls=OptionEatAll, default=APIS_CHOICES, show_default=True, help='')
# @click.option(
# '-e', '--environments', metavar='LIST', cls=OptionEatAll, default=['test', 'prod'], help=''
# )
@click.option(
'-e', '--environments', multiple=True, show_default=True, default=['test', 'prod'], help=''
)
def take_snapshot(*args, **kwargs):
_take_snapshot(*args, **kwargs)
|
[
"click.option",
"click.Choice",
"apigee.auth.gen_auth",
"click.Path",
"click.group"
] |
[((500, 593), 'click.group', 'click.group', ([], {'help': '"""Download configuration files from Apigee that can later be restored."""'}), "(help=\n 'Download configuration files from Apigee that can later be restored.')\n", (511, 593), False, 'import click\n'), ((1941, 2050), 'click.option', 'click.option', (['"""-e"""', '"""--environments"""'], {'multiple': '(True)', 'show_default': '(True)', 'default': "['test', 'prod']", 'help': '""""""'}), "('-e', '--environments', multiple=True, show_default=True,\n default=['test', 'prod'], help='')\n", (1953, 2050), False, 'import click\n'), ((1440, 1516), 'click.Path', 'click.Path', ([], {'exists': '(False)', 'dir_okay': '(True)', 'file_okay': '(False)', 'resolve_path': '(False)'}), '(exists=False, dir_okay=True, file_okay=False, resolve_path=False)\n', (1450, 1516), False, 'import click\n'), ((1582, 1630), 'click.Choice', 'click.Choice', (['APIS_CHOICES'], {'case_sensitive': '(False)'}), '(APIS_CHOICES, case_sensitive=False)\n', (1594, 1630), False, 'import click\n'), ((906, 963), 'apigee.auth.gen_auth', 'gen_auth', (['username', 'password', 'mfa_secret', 'token', 'zonename'], {}), '(username, password, mfa_secret, token, zonename)\n', (914, 963), False, 'from apigee.auth import common_auth_options, gen_auth\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-01-25 18:54
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dynadb', '0074_auto_20170125_1926'),
]
operations = [
migrations.AlterField(
model_name='dyndbsubmission',
name='user_id',
field=models.ForeignKey(blank=True, db_column='user_id', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((475, 618), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'db_column': '"""user_id"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, db_column='user_id', null=True, on_delete=\n django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)\n", (492, 618), False, 'from django.db import migrations, models\n')]
|
import os
import numpy as np
import json
import torch
from .utils import skeleton
class SkeletonDataset(torch.utils.data.Dataset):
""" Feeder for skeleton-based action recognition
Arguments:
data_path: the path to data folder
random_choose: If true, randomly choose a portion of the input sequence
random_move: If true, randomly perfrom affine transformation
window_size: The length of the output sequence
repeat: times of repeating the dataset
data_subscripts: subscript expression of einsum operation.
In the default case, the shape of output data is `(channel, vertex, frames, person)`.
To permute the shape to `(channel, frames, vertex, person)`,
set `data_subscripts` to 'cvfm->cfvm'.
"""
def __init__(self,
data_dir,
random_choose=False,
random_move=False,
window_size=-1,
num_track=1,
data_subscripts=None,
repeat=1):
self.data_dir = data_dir
self.random_choose = random_choose
self.random_move = random_move
self.window_size = window_size
self.num_track = num_track
self.data_subscripts = data_subscripts
self.files = [
os.path.join(self.data_dir, f) for f in os.listdir(self.data_dir)
] * repeat
def __len__(self):
return len(self.files)
def __getitem__(self, index):
with open(self.files[index]) as f:
data = json.load(f)
resolution = data['info']['resolution']
category_id = data['category_id']
annotations = data['annotations']
num_frame = data['info']['num_frame']
num_keypoints = data['info']['num_keypoints']
channel = data['info']['keypoint_channels']
num_channel = len(channel)
# get data
data = np.zeros(
(num_channel, num_keypoints, num_frame, self.num_track),
dtype=np.float32)
for a in annotations:
person_id = a['id'] if a['person_id'] is None else a['person_id']
frame_index = a['frame_index']
if person_id < self.num_track and frame_index < num_frame:
data[:, :, frame_index, person_id] = np.array(
a['keypoints']).transpose()
# normalization
if self.normalization:
for i, c in enumerate(channel):
if c == 'x':
data[i] = data[i] / resolution[0] - 0.5
if c == 'y':
data[i] = data[i] / resolution[1] - 0.5
if c == 'score' or c == 'visibility':
mask = (data[i] == 0)
for j in range(num_channel):
if c != j:
data[j][mask] = 0
# permute
if self.data_subscripts is not None:
data = np.einsum(self.data_subscripts, data)
# augmentation
if self.random_choose:
data = skeleton.random_choose(data, self.window_size)
elif self.window_size > 0:
data = skeleton.auto_pading(data, self.window_size)
if self.random_move:
data = skeleton.random_move(data)
return data, category_id
|
[
"json.load",
"numpy.zeros",
"numpy.einsum",
"numpy.array",
"os.path.join",
"os.listdir"
] |
[((1924, 2012), 'numpy.zeros', 'np.zeros', (['(num_channel, num_keypoints, num_frame, self.num_track)'], {'dtype': 'np.float32'}), '((num_channel, num_keypoints, num_frame, self.num_track), dtype=np.\n float32)\n', (1932, 2012), True, 'import numpy as np\n'), ((1556, 1568), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1565, 1568), False, 'import json\n'), ((2954, 2991), 'numpy.einsum', 'np.einsum', (['self.data_subscripts', 'data'], {}), '(self.data_subscripts, data)\n', (2963, 2991), True, 'import numpy as np\n'), ((1318, 1348), 'os.path.join', 'os.path.join', (['self.data_dir', 'f'], {}), '(self.data_dir, f)\n', (1330, 1348), False, 'import os\n'), ((1358, 1383), 'os.listdir', 'os.listdir', (['self.data_dir'], {}), '(self.data_dir)\n', (1368, 1383), False, 'import os\n'), ((2309, 2333), 'numpy.array', 'np.array', (["a['keypoints']"], {}), "(a['keypoints'])\n", (2317, 2333), True, 'import numpy as np\n')]
|
# SPDX-FileCopyrightText: 2020 <NAME>
#
# SPDX-License-Identifier: MIT
import logging
logger = logging.getLogger(__name__)
def add_parser(subparsers):
parser = subparsers.add_parser('log-server', help='Log server')
subparsers = parser.add_subparsers(dest='cmd')
subparsers.required = True
sync_parser = subparsers.add_parser(
'sync', help='Sync logs to local machine'
)
sync_parser.set_defaults(func=sync)
def sync():
pass
|
[
"logging.getLogger"
] |
[((97, 124), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (114, 124), False, 'import logging\n')]
|
'''7. Faça uma função que sorteia 10 números aleatórios entre 0 e 100 e retorna o maior entre eles.'''
import random
def sorteio():
maior = 0
menor = 0
for i in range(10):
x = random.randint(0,100)
if i==1:
menor = x
if x<menor:
menor = x
if x>maior:
maior = x
print('Menor: ', menor)
print('Maior: ', maior)
sorteio()
|
[
"random.randint"
] |
[((197, 219), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (211, 219), False, 'import random\n')]
|
import sys
import uncertainty_rfr
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
import pandas.api.types as ptypes
sys.path.append("../")
df_test = pd.read_csv('./xiaofeng_lasso/unittest_dummy.csv', nrows=5)
X_test, y_test = uncertainty_rfr.descriptors_outputs(df_test, d_start=5,
o=0)
def test_uncertainty_rfr_qfr():
'''
Test function for uncertainty_rfr_qfr. Checks values in actual are 0 when
true_y = False, and that the output df has the correct number of rows.
'''
df_test = pd.read_csv('./xiaofeng_lasso/unittest_dummy.csv')
X = df_test.iloc[range(3)]
err_df_test = \
uncertainty_rfr.uncertainty_rfr_qfr(df_test, X[X.columns[5:]],
Y='none', true_y=False, o=0,
d_start=5)
assert err_df_test['actual'][0] == err_df_test['actual'][1], \
'with true_y = False, all values in "actual" should be equal (0.0)'
assert len(err_df_test) == len(X), \
'length of predicting df should equal length of output df'
def test_descriptors_outputs():
'''
Test function for descriptors_outputs. Checks the shape of X, and checks
that the correct type of value (numeric) is in the columns.
'''
X_test, y_test = uncertainty_rfr.descriptors_outputs(df_test, d_start=5,
o=0)
assert X_test.shape[1] == 5, \
'array shape is incorrect. should be ({}, 7), got ({}, {})'\
.format(X_test.shape[0], X_test.shape[0], X_test.shape[1])
assert all(ptypes.is_numeric_dtype(X_test[col]) for col in
list(X_test[X_test.columns[:]])), \
'data type in columns is of incorrect type, must be numeric'
assert ptypes.is_numeric_dtype(y_test), \
'data type in columns is of incorrect type, must be numeric'
def test_traintest():
'''
Test function for traintest. Checks that the length of X_train and
y_train are the same.
'''
train_idx_test = np.array([0, 1, 2])
test_idx_test = np.array([3, 4])
X_train_test, y_train_test = \
uncertainty_rfr.traintest(X_test, y_test, train_idx_test,
test_idx_test)
assert X_train_test.shape[0] == y_train_test.shape[0], \
'X_train and y_train datapoints do not have the same num of values'
def test_predict_append():
'''
Test function for predict_append. Checks that the func appends one value
at a time, and that the output is a list.
'''
df_test2 = df_test[df_test.columns[:7]]
X_test, y_test = uncertainty_rfr.descriptors_outputs(df_test2, d_start=5,
o=0)
clf_test = RandomForestRegressor(random_state=130)
clf_test.fit(X_test, y_test)
N_arr_test = np.array([[3.98069889, 0.38048415],
[-0.78001682, 0.20058657]])
n_test = 0
preds_test = []
preds_test = uncertainty_rfr.predict_append(clf_test, N_arr_test, n_test,
preds_test)
assert len(preds_test) == 1, \
'preds_test needs to be length 1. Got {}'.format(len(preds_test))
assert isinstance(preds_test, list), \
'preds_test needs to be a list, got {}'.format(type(preds_test))
def test_dft_points():
'''
Test functino for dft_points. Checks that when true_y = True, the output
array is equal to Y_test, adn when true_y = False the output arry is the
same length as N_arr_test.
'''
Y_test = [3, 5]
N_arr_test = np.array([[3.98069889, 0.38048415],
[-0.78001682, 0.20058657]])
Y_arr_test = uncertainty_rfr.dft_points(True, Y_test, N_arr_test)
Y_arr_test2 = uncertainty_rfr.dft_points(False, Y_test, N_arr_test)
assert Y_arr_test[0] == Y_test[0], \
'Y_arr_test got unexpected result. Expected np.array([3,5]), got{}'.\
format(Y_arr_test)
assert len(Y_arr_test2) == N_arr_test.shape[0], \
'length of Y_arr_test2 should be equal to the number of rows of \
N_arr_test. Got Y_arr: {}, N_arr {}'.\
format(len(Y_arr_test2), N_arr_test.shape[0])
def test_uncert_table():
'''
Test function for uncert_table. Checks that the columns in the df are in
the correct place, the length of the output dataframe the correct
length, and that the last three columns in the output df are numeric.
'''
N_test = df_test[df_test.columns[5:]].iloc[[0, 1]]
X = df_test.iloc[[0, 1]]
Y_arr_test = np.array([3, 5])
pred_desc_test = pd.DataFrame(data={'mean': [1, 2], 'std': [3, 4]}).T
err_df = uncertainty_rfr.uncert_table(N_test, X, 1, 2, 3, 4,
Y_arr_test, pred_desc_test)
assert err_df.columns[0] == 'Type', \
'first column got unexpected value {}, should be Type'.\
format(err_df.columns[0])
assert len(err_df) == len(X), \
'arrays must all be the same length'
assert all(ptypes.is_numeric_dtype(err_df[col]) for col in
list(err_df[err_df.columns[4:]])), \
'columns "true val", "mean", and "std" are of wrong type, should be\
numeric values.'
def test_uncertainty_rfr_cv():
'''
Test function for undertainty_rfr_cv. Checks that the prediction df has
as many rows as folds in cv. In the output df it checks that "true val"
values are 0 when true_y = False, and checks that values in "AB" are of
type string.
'''
X = df_test.iloc[[0, 1]]
Y = 'none'
d_start, x_start = 5, 5
o = 0
folds_test = 2
pred_df_test, err_df_test = \
uncertainty_rfr.uncertainty_rfr_cv(df_test, X, Y, o, d_start, x_start,
folds=folds_test)
assert pred_df_test.shape[0] == folds_test, \
'Number of row in pred_df_test array should equal number of folds, \
expected {}, got {}'.format(folds_test, pred_df_test.shape[0])
assert err_df_test[err_df_test.columns[4]][0] == 0.0, \
'Expected 0.0 in "true val" with true_y set to false, instead got a \
different val'
assert isinstance(err_df_test['AB'][1], str), \
'Expected string in column "AB", got {}'.format(type(
err_df_test['AB'][1]))
def test_largest_uncertainty():
'''
test function for largest_uncertainty. checks that that length of the
df is equal to the num of values it was asked to return, and that the
output idx are a list.
'''
df = pd.DataFrame(data={'err_int': [1, 2, 3], 'std_dev': [4, 5, 6]})
num_vals = 2
larg, idx = uncertainty_rfr.largest_uncertainty(df, num_vals, 'std_dev')
assert len(larg) == num_vals, \
'number of rows in the output df should equal the number of values\
the func called to return'
assert isinstance(idx, list), \
'expected idx to be list, got {}'.format(type(idx))
|
[
"sys.path.append",
"pandas.DataFrame",
"uncertainty_rfr.traintest",
"pandas.read_csv",
"uncertainty_rfr.predict_append",
"uncertainty_rfr.descriptors_outputs",
"sklearn.ensemble.RandomForestRegressor",
"uncertainty_rfr.uncertainty_rfr_cv",
"uncertainty_rfr.largest_uncertainty",
"numpy.array",
"uncertainty_rfr.dft_points",
"pandas.api.types.is_numeric_dtype",
"uncertainty_rfr.uncert_table",
"uncertainty_rfr.uncertainty_rfr_qfr"
] |
[((159, 181), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (174, 181), False, 'import sys\n'), ((194, 253), 'pandas.read_csv', 'pd.read_csv', (['"""./xiaofeng_lasso/unittest_dummy.csv"""'], {'nrows': '(5)'}), "('./xiaofeng_lasso/unittest_dummy.csv', nrows=5)\n", (205, 253), True, 'import pandas as pd\n'), ((271, 331), 'uncertainty_rfr.descriptors_outputs', 'uncertainty_rfr.descriptors_outputs', (['df_test'], {'d_start': '(5)', 'o': '(0)'}), '(df_test, d_start=5, o=0)\n', (306, 331), False, 'import uncertainty_rfr\n'), ((602, 652), 'pandas.read_csv', 'pd.read_csv', (['"""./xiaofeng_lasso/unittest_dummy.csv"""'], {}), "('./xiaofeng_lasso/unittest_dummy.csv')\n", (613, 652), True, 'import pandas as pd\n'), ((713, 819), 'uncertainty_rfr.uncertainty_rfr_qfr', 'uncertainty_rfr.uncertainty_rfr_qfr', (['df_test', 'X[X.columns[5:]]'], {'Y': '"""none"""', 'true_y': '(False)', 'o': '(0)', 'd_start': '(5)'}), "(df_test, X[X.columns[5:]], Y='none',\n true_y=False, o=0, d_start=5)\n", (748, 819), False, 'import uncertainty_rfr\n'), ((1368, 1428), 'uncertainty_rfr.descriptors_outputs', 'uncertainty_rfr.descriptors_outputs', (['df_test'], {'d_start': '(5)', 'o': '(0)'}), '(df_test, d_start=5, o=0)\n', (1403, 1428), False, 'import uncertainty_rfr\n'), ((1854, 1885), 'pandas.api.types.is_numeric_dtype', 'ptypes.is_numeric_dtype', (['y_test'], {}), '(y_test)\n', (1877, 1885), True, 'import pandas.api.types as ptypes\n'), ((2116, 2135), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2124, 2135), True, 'import numpy as np\n'), ((2156, 2172), 'numpy.array', 'np.array', (['[3, 4]'], {}), '([3, 4])\n', (2164, 2172), True, 'import numpy as np\n'), ((2217, 2289), 'uncertainty_rfr.traintest', 'uncertainty_rfr.traintest', (['X_test', 'y_test', 'train_idx_test', 'test_idx_test'], {}), '(X_test, y_test, train_idx_test, test_idx_test)\n', (2242, 2289), False, 'import uncertainty_rfr\n'), ((2695, 2756), 'uncertainty_rfr.descriptors_outputs', 'uncertainty_rfr.descriptors_outputs', (['df_test2'], {'d_start': '(5)', 'o': '(0)'}), '(df_test2, d_start=5, o=0)\n', (2730, 2756), False, 'import uncertainty_rfr\n'), ((2829, 2868), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(130)'}), '(random_state=130)\n', (2850, 2868), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2919, 2982), 'numpy.array', 'np.array', (['[[3.98069889, 0.38048415], [-0.78001682, 0.20058657]]'], {}), '([[3.98069889, 0.38048415], [-0.78001682, 0.20058657]])\n', (2927, 2982), True, 'import numpy as np\n'), ((3062, 3134), 'uncertainty_rfr.predict_append', 'uncertainty_rfr.predict_append', (['clf_test', 'N_arr_test', 'n_test', 'preds_test'], {}), '(clf_test, N_arr_test, n_test, preds_test)\n', (3092, 3134), False, 'import uncertainty_rfr\n'), ((3673, 3736), 'numpy.array', 'np.array', (['[[3.98069889, 0.38048415], [-0.78001682, 0.20058657]]'], {}), '([[3.98069889, 0.38048415], [-0.78001682, 0.20058657]])\n', (3681, 3736), True, 'import numpy as np\n'), ((3781, 3833), 'uncertainty_rfr.dft_points', 'uncertainty_rfr.dft_points', (['(True)', 'Y_test', 'N_arr_test'], {}), '(True, Y_test, N_arr_test)\n', (3807, 3833), False, 'import uncertainty_rfr\n'), ((3852, 3905), 'uncertainty_rfr.dft_points', 'uncertainty_rfr.dft_points', (['(False)', 'Y_test', 'N_arr_test'], {}), '(False, Y_test, N_arr_test)\n', (3878, 3905), False, 'import uncertainty_rfr\n'), ((4648, 4664), 'numpy.array', 'np.array', (['[3, 5]'], {}), '([3, 5])\n', (4656, 4664), True, 'import numpy as np\n'), ((4753, 4832), 'uncertainty_rfr.uncert_table', 'uncertainty_rfr.uncert_table', (['N_test', 'X', '(1)', '(2)', '(3)', '(4)', 'Y_arr_test', 'pred_desc_test'], {}), '(N_test, X, 1, 2, 3, 4, Y_arr_test, pred_desc_test)\n', (4781, 4832), False, 'import uncertainty_rfr\n'), ((5754, 5846), 'uncertainty_rfr.uncertainty_rfr_cv', 'uncertainty_rfr.uncertainty_rfr_cv', (['df_test', 'X', 'Y', 'o', 'd_start', 'x_start'], {'folds': 'folds_test'}), '(df_test, X, Y, o, d_start, x_start,\n folds=folds_test)\n', (5788, 5846), False, 'import uncertainty_rfr\n'), ((6626, 6689), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'err_int': [1, 2, 3], 'std_dev': [4, 5, 6]}"}), "(data={'err_int': [1, 2, 3], 'std_dev': [4, 5, 6]})\n", (6638, 6689), True, 'import pandas as pd\n'), ((6724, 6784), 'uncertainty_rfr.largest_uncertainty', 'uncertainty_rfr.largest_uncertainty', (['df', 'num_vals', '"""std_dev"""'], {}), "(df, num_vals, 'std_dev')\n", (6759, 6784), False, 'import uncertainty_rfr\n'), ((4686, 4736), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'mean': [1, 2], 'std': [3, 4]}"}), "(data={'mean': [1, 2], 'std': [3, 4]})\n", (4698, 4736), True, 'import pandas as pd\n'), ((1674, 1710), 'pandas.api.types.is_numeric_dtype', 'ptypes.is_numeric_dtype', (['X_test[col]'], {}), '(X_test[col])\n', (1697, 1710), True, 'import pandas.api.types as ptypes\n'), ((5113, 5149), 'pandas.api.types.is_numeric_dtype', 'ptypes.is_numeric_dtype', (['err_df[col]'], {}), '(err_df[col])\n', (5136, 5149), True, 'import pandas.api.types as ptypes\n')]
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import _
from frappe.model.document import Document
class CForm(Document):
def validate(self):
"""Validate invoice that c-form is applicable
and no other c-form is received for that"""
for d in self.get('invoices'):
if d.invoice_no:
inv = frappe.db.sql("""select c_form_applicable, c_form_no from
`tabSales Invoice` where name = %s and docstatus = 1""", d.invoice_no)
if inv and inv[0][0] != 'Yes':
frappe.throw(_("C-form is not applicable for Invoice: {0}".format(d.invoice_no)))
elif inv and inv[0][1] and inv[0][1] != self.name:
frappe.throw(_("""Invoice {0} is tagged in another C-form: {1}.
If you want to change C-form no for this invoice,
please remove invoice no from the previous c-form and then try again"""\
.format(d.invoice_no, inv[0][1])))
elif not inv:
frappe.throw(_("Row {0}: Invoice {1} is invalid, it might be cancelled / does not exist. \
Please enter a valid Invoice".format(d.idx, d.invoice_no)))
def on_update(self):
""" Update C-Form No on invoices"""
self.set_total_invoiced_amount()
def on_submit(self):
self.set_cform_in_sales_invoices()
def before_cancel(self):
# remove cform reference
frappe.db.sql("""update `tabSales Invoice` set c_form_no=null where c_form_no=%s""", self.name)
def set_cform_in_sales_invoices(self):
inv = [d.invoice_no for d in self.get('invoices')]
if inv:
frappe.db.sql("""update `tabSales Invoice` set c_form_no=%s, modified=%s where name in (%s)""" %
('%s', '%s', ', '.join(['%s'] * len(inv))), tuple([self.name, self.modified] + inv))
frappe.db.sql("""update `tabSales Invoice` set c_form_no = null, modified = %s
where name not in (%s) and ifnull(c_form_no, '') = %s""" %
('%s', ', '.join(['%s']*len(inv)), '%s'), tuple([self.modified] + inv + [self.name]))
else:
frappe.throw(_("Please enter atleast 1 invoice in the table"))
def set_total_invoiced_amount(self):
total = sum([flt(d.grand_total) for d in self.get('invoices')])
frappe.db.set(self, 'total_invoiced_amount', total)
def get_invoice_details(self, invoice_no):
""" Pull details from invoices for referrence """
if invoice_no:
inv = frappe.db.get_value("Sales Invoice", invoice_no,
["posting_date", "territory", "base_net_total", "base_grand_total"], as_dict=True)
return {
'invoice_date' : inv.posting_date,
'territory' : inv.territory,
'net_total' : inv.base_net_total,
'grand_total' : inv.base_grand_total
}
|
[
"frappe.utils.flt",
"frappe.db.sql",
"frappe.db.get_value",
"frappe.db.set",
"frappe._"
] |
[((1430, 1526), 'frappe.db.sql', 'frappe.db.sql', (['"""update `tabSales Invoice` set c_form_no=null where c_form_no=%s"""', 'self.name'], {}), "('update `tabSales Invoice` set c_form_no=null where c_form_no=%s'\n , self.name)\n", (1443, 1526), False, 'import frappe\n'), ((2236, 2287), 'frappe.db.set', 'frappe.db.set', (['self', '"""total_invoiced_amount"""', 'total'], {}), "(self, 'total_invoiced_amount', total)\n", (2249, 2287), False, 'import frappe\n'), ((2411, 2546), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Sales Invoice"""', 'invoice_no', "['posting_date', 'territory', 'base_net_total', 'base_grand_total']"], {'as_dict': '(True)'}), "('Sales Invoice', invoice_no, ['posting_date',\n 'territory', 'base_net_total', 'base_grand_total'], as_dict=True)\n", (2430, 2546), False, 'import frappe\n'), ((479, 622), 'frappe.db.sql', 'frappe.db.sql', (['"""select c_form_applicable, c_form_no from\n\t\t\t\t\t`tabSales Invoice` where name = %s and docstatus = 1"""', 'd.invoice_no'], {}), '(\n """select c_form_applicable, c_form_no from\n\t\t\t\t\t`tabSales Invoice` where name = %s and docstatus = 1"""\n , d.invoice_no)\n', (492, 622), False, 'import frappe\n'), ((2079, 2127), 'frappe._', '_', (['"""Please enter atleast 1 invoice in the table"""'], {}), "('Please enter atleast 1 invoice in the table')\n", (2080, 2127), False, 'from frappe import _\n'), ((2183, 2201), 'frappe.utils.flt', 'flt', (['d.grand_total'], {}), '(d.grand_total)\n', (2186, 2201), False, 'from frappe.utils import flt\n')]
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Provides black-box gradient estimation using NES.
"""
import logging
from typing import List, Optional, Tuple, Union, TYPE_CHECKING
import numpy as np
from scipy.stats import entropy
from art.estimators.estimator import BaseEstimator
from art.estimators.classification.classifier import ClassifierMixin, ClassifierLossGradients
from art.utils import clip_and_round
if TYPE_CHECKING:
from art.utils import CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE
logger = logging.getLogger(__name__)
import itertools
class QueryEfficientGradientEstimationClassifier(ClassifierLossGradients, ClassifierMixin, BaseEstimator):
"""
Implementation of Query-Efficient Black-box Adversarial Examples. The attack approximates the gradient by
maximizing the loss function over samples drawn from random Gaussian noise around the input.
| Paper link: https://arxiv.org/abs/1712.07113
"""
estimator_params = ["num_basis", "sigma", "round_samples"]
def __init__(
self,
classifier: "CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE",
num_basis: int,
sigma: float,
round_samples: float = 0.0,
) -> None:
"""
:param classifier: An instance of a classification estimator whose loss_gradient is being approximated.
:param num_basis: The number of samples to draw to approximate the gradient.
:param sigma: Scaling on the Gaussian noise N(0,1).
:param round_samples: The resolution of the input domain to round the data to, e.g., 1.0, or 1/255. Set to 0 to
disable.
"""
super().__init__(model=classifier.model, clip_values=classifier.clip_values)
# pylint: disable=E0203
self._classifier = classifier
self.num_basis = num_basis
self.sigma = sigma
self.round_samples = round_samples
self._nb_classes = self._classifier.nb_classes
@property
def input_shape(self) -> Tuple[int, ...]:
"""
Return the shape of one input sample.
:return: Shape of one input sample.
"""
return self._classifier.input_shape # type: ignore
def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray: # pylint: disable=W0221
"""
Perform prediction of the classifier for input `x`. Rounds results first.
:param x: Features in array of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
:param batch_size: Size of batches.
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
"""
return self._classifier.predict(clip_and_round(x, self.clip_values, self.round_samples), batch_size=batch_size)
def fit(self, x: np.ndarray, y: np.ndarray, **kwargs) -> None:
"""
Fit the classifier using the training data `(x, y)`.
:param x: Features in array of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
:param y: Target values (class labels in classification) in array of shape (nb_samples, nb_classes) in
one-hot encoding format.
:param kwargs: Dictionary of framework-specific arguments.
"""
raise NotImplementedError
def _generate_samples(self, x: np.ndarray, epsilon_map: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate samples around the current image.
:param x: Sample input with shape as expected by the model.
:param epsilon_map: Samples drawn from search space.
:return: Two arrays of new input samples to approximate gradient.
"""
minus = clip_and_round(
np.repeat(x, self.num_basis, axis=0) - epsilon_map,
self.clip_values,
self.round_samples,
)
plus = clip_and_round(
np.repeat(x, self.num_basis, axis=0) + epsilon_map,
self.clip_values,
self.round_samples,
)
return minus, plus
def class_gradient(self, x: np.ndarray, label: Union[int, List[int], None] = None, **kwargs) -> np.ndarray:
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Input with shape as expected by the classifier's model.
:param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
output is computed for all samples. If multiple values as provided, the first dimension should
match the batch size of `x`, and each value will be used as target for its corresponding sample in
`x`. If `None`, then gradients for all classes will be computed for each sample.
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified.
"""
raise NotImplementedError
def _generate_sample_i(self, x: np.ndarray, epsilon_map: np.ndarray, i: int) -> Tuple[np.ndarray, np.ndarray]:
minus = clip_and_round(
x - epsilon_map[i],
self.clip_values,
self.round_samples,
)
plus = clip_and_round(
x + epsilon_map[i],
self.clip_values,
self.round_samples,
)
return minus, plus
def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
if self.amortized_attack:
return self.loss_gradient_new_efficient(x, y)
#return self.loss_gradient_new(x, y)
else:
return self.loss_gradient_old(x, y)
#return self.loss_gradient_new(x, y)
#return self.loss_gradient_new_efficient(x, y)
def loss_gradient_new_efficient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param y: Correct labels, one-vs-rest encoding.
:return: Array of gradients of the same shape as `x`.
"""
epsilon_map = self.sigma * np.random.normal(size=([self.num_basis] + list(self.input_shape)))
#print(epsilon_map.shape)
#print(epsilon_map.reshape(self.num_basis, -1).shape)
grads = [0.0] * len(x)
#print('eps map shape', epsilon_map.shape)
#print('epsmap 11', epsilon_map[11])
#batch over multiple examples
reps_per_batch = 10
reps = epsilon_map.shape[0]
for jb in range(0, reps, reps_per_batch):
minus_preds = []
len_x = len(x)
pm_len = 2*len_x*reps_per_batch
minuses_pluses = [None]*pm_len
for b in range(reps_per_batch):
j = jb + b
#print('j', j, 'b', b)
if j >= reps:
b -= 1
#print('b after dec', b)
break
for i in range(len(x)):
minus, plus = self._generate_sample_i(x[i : i + 1], epsilon_map, j)
#print('j', j)
#print('minus i', i + b*2*len_x, 'plus i', i + len_x + b*2*len_x)
minuses_pluses[i + b*2*len_x] = minus
minuses_pluses[i + len_x + b*2*len_x] = plus
#print('b after loop', b)
if jb + reps_per_batch > reps:
#print(minuses_pluses[:(b+1)*2*len_x])
#print(minuses_pluses[(b+1)*2*len_x:])
minuses_pluses = minuses_pluses[:(b+1)*2*len_x]
#print('len(minuses_pluses)', len(minuses_pluses))
minuses_pluses = np.array(minuses_pluses)
minuses_pluses = np.squeeze(minuses_pluses, 1)
#print(minuses_pluses.shape)
pm_preds = self.predict(minuses_pluses, batch_size=4000)
#minus_preds, plus_preds = np.split(pm_preds, 2)
#print('num pm preds', pm_preds.shape)
#print('b', b+1)
rounds = np.split(pm_preds, b+1)
#print('len(rounds)', len(rounds))
for rn, r in enumerate(rounds):
minus_preds, plus_preds = np.split(r, 2)
#print(minus_preds.shape, plus_preds.shape)
j = jb + rn
for i, (mp, pp) in enumerate(zip(minus_preds, plus_preds)):
new_y_minus = entropy(y[i], mp)
new_y_plus = entropy(y[i], pp)
one_grad = epsilon_map[j] * (new_y_plus - new_y_minus)
grads[i] += one_grad
for i in range(len(grads)):
grads[i] = grads[i] / (self.num_basis * self.sigma)
grads = self._apply_preprocessing_gradient(x, np.array(grads))
return grads
def loss_gradient_new(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param y: Correct labels, one-vs-rest encoding.
:return: Array of gradients of the same shape as `x`.
"""
epsilon_map = self.sigma * np.random.normal(size=([self.num_basis] + list(self.input_shape)))
#print(epsilon_map.shape)
#print(epsilon_map.reshape(self.num_basis, -1).shape)
grads = [0.0] * len(x)
for j in range(epsilon_map.shape[0]):
minus_preds = []
#plus_preds = []
pluses = []
minus = None
plus = None
for r in range(2):
for i in range(len(x)):
if r == 0:
minus, plus = self._generate_sample_i(x[i : i + 1], epsilon_map, j)
minus_preds.append(self.predict(minus)[0])
pluses.append(plus)
else:
plus_pred = self.predict(pluses[i])[0]
new_y_minus = entropy(y[i], minus_preds[i])
new_y_plus = entropy(y[i], plus_pred)
one_grad = epsilon_map[j] * (new_y_plus - new_y_minus)
grads[i] += one_grad
#for j in range(epsilon_map.shape[0]):
# for i in range(len(x)):
# minus, plus = self._generate_sample_i(x[i : i + 1], epsilon_map, j)
# pred = self.predict(np.concatenate((minus, plus)))
# new_y_minus = entropy(y[i], pred[0])
# new_y_plus = entropy(y[i], pred[1])
# one_grad = epsilon_map[j] * (new_y_plus - new_y_minus)
# grads[i] += one_grad
# #pluses = [self._generate_sample_i(x[i : i + 1], epsilon_map, j)[1][0] for i in range(len(x))]
# #plus_preds = self.predict(pluses)
# #print('plus_preds.shape', plus_preds.shape)
# #print(len(pluses))
# #minuses = [self._generate_sample_i(x[i : i + 1], epsilon_map, j)[0][0] for i in range(len(x))]
# #minus_preds = self.predict(minuses)
# #print('minus_preds.shape', minus_preds.shape)
# #for i in range(len(x)):
# # grads[i] += epsilon_map[j] * (plus_preds[i] - minus_preds[i])
for i in range(len(grads)):
grads[i] = grads[i]* 2/self.num_basis / (2 * self.sigma)
grads = self._apply_preprocessing_gradient(x, np.array(grads))
return grads
def loss_gradient_old(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
#new_grads = self.loss_gradient_new(x, y)
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param y: Correct labels, one-vs-rest encoding.
:return: Array of gradients of the same shape as `x`.
"""
epsilon_map = self.sigma * np.random.normal(size=([self.num_basis] + list(self.input_shape)))
#print(epsilon_map.shape)
#print(epsilon_map.reshape(self.num_basis, -1).shape)
grads = []
for i in range(len(x)):
#print('i', i)
minus, plus = self._generate_samples(x[i : i + 1], epsilon_map)
#print('shape', minus.shape, plus.shape)
# Vectorized; small tests weren't faster
# ent_vec = np.vectorize(lambda p: entropy(y[i], p), signature='(n)->()')
# new_y_minus = ent_vec(self.predict(minus))
# new_y_plus = ent_vec(self.predict(plus))
# Vanilla
new_y_minus = np.array([entropy(y[i], p) for p in self.predict(minus, batch_size=4000)])
new_y_plus = np.array([entropy(y[i], p) for p in self.predict(plus, batch_size=4000)])
#print('term1 shape', epsilon_map.reshape(self.num_basis, -1).shape)
#print('term2 shape', ((new_y_plus - new_y_minus).reshape(self.num_basis, -1) / (2 * self.sigma)).shape)
query_efficient_grad = 2 * np.mean(
np.multiply(
epsilon_map.reshape(self.num_basis, -1),
(new_y_plus - new_y_minus).reshape(self.num_basis, -1) / (2 * self.sigma),
).reshape([-1] + list(self.input_shape)),
axis=0,
)
grads.append(query_efficient_grad)
grads = self._apply_preprocessing_gradient(x, np.array(grads))
#print('old grads', grads)
#print('new grads', new_grads)
#print('equal', grads == new_grads)
return grads
def get_activations(self, x: np.ndarray, layer: Union[int, str], batch_size: int) -> np.ndarray:
"""
Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and
`nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by
calling `layer_names`.
:param x: Input for computing the activations.
:param layer: Layer for computing the activations.
:param batch_size: Size of batches.
:return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.
"""
raise NotImplementedError
def save(self, filename: str, path: Optional[str] = None) -> None:
"""
Save a model to file specific to the backend framework.
:param filename: Name of the file where to save the model.
:param path: Path of the directory where to save the model. If no path is specified, the model will be stored in
the default data location of ART at `ART_DATA_PATH`.
"""
raise NotImplementedError
|
[
"scipy.stats.entropy",
"numpy.split",
"numpy.array",
"numpy.squeeze",
"art.utils.clip_and_round",
"logging.getLogger",
"numpy.repeat"
] |
[((1598, 1625), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1615, 1625), False, 'import logging\n'), ((6455, 6527), 'art.utils.clip_and_round', 'clip_and_round', (['(x - epsilon_map[i])', 'self.clip_values', 'self.round_samples'], {}), '(x - epsilon_map[i], self.clip_values, self.round_samples)\n', (6469, 6527), False, 'from art.utils import clip_and_round\n'), ((6590, 6662), 'art.utils.clip_and_round', 'clip_and_round', (['(x + epsilon_map[i])', 'self.clip_values', 'self.round_samples'], {}), '(x + epsilon_map[i], self.clip_values, self.round_samples)\n', (6604, 6662), False, 'from art.utils import clip_and_round\n'), ((3849, 3904), 'art.utils.clip_and_round', 'clip_and_round', (['x', 'self.clip_values', 'self.round_samples'], {}), '(x, self.clip_values, self.round_samples)\n', (3863, 3904), False, 'from art.utils import clip_and_round\n'), ((9075, 9099), 'numpy.array', 'np.array', (['minuses_pluses'], {}), '(minuses_pluses)\n', (9083, 9099), True, 'import numpy as np\n'), ((9129, 9158), 'numpy.squeeze', 'np.squeeze', (['minuses_pluses', '(1)'], {}), '(minuses_pluses, 1)\n', (9139, 9158), True, 'import numpy as np\n'), ((9434, 9459), 'numpy.split', 'np.split', (['pm_preds', '(b + 1)'], {}), '(pm_preds, b + 1)\n', (9442, 9459), True, 'import numpy as np\n'), ((10124, 10139), 'numpy.array', 'np.array', (['grads'], {}), '(grads)\n', (10132, 10139), True, 'import numpy as np\n'), ((12810, 12825), 'numpy.array', 'np.array', (['grads'], {}), '(grads)\n', (12818, 12825), True, 'import numpy as np\n'), ((14768, 14783), 'numpy.array', 'np.array', (['grads'], {}), '(grads)\n', (14776, 14783), True, 'import numpy as np\n'), ((4968, 5004), 'numpy.repeat', 'np.repeat', (['x', 'self.num_basis'], {'axis': '(0)'}), '(x, self.num_basis, axis=0)\n', (4977, 5004), True, 'import numpy as np\n'), ((5135, 5171), 'numpy.repeat', 'np.repeat', (['x', 'self.num_basis'], {'axis': '(0)'}), '(x, self.num_basis, axis=0)\n', (5144, 5171), True, 'import numpy as np\n'), ((9590, 9604), 'numpy.split', 'np.split', (['r', '(2)'], {}), '(r, 2)\n', (9598, 9604), True, 'import numpy as np\n'), ((9794, 9811), 'scipy.stats.entropy', 'entropy', (['y[i]', 'mp'], {}), '(y[i], mp)\n', (9801, 9811), False, 'from scipy.stats import entropy\n'), ((9841, 9858), 'scipy.stats.entropy', 'entropy', (['y[i]', 'pp'], {}), '(y[i], pp)\n', (9848, 9858), False, 'from scipy.stats import entropy\n'), ((13974, 13990), 'scipy.stats.entropy', 'entropy', (['y[i]', 'p'], {}), '(y[i], p)\n', (13981, 13990), False, 'from scipy.stats import entropy\n'), ((14074, 14090), 'scipy.stats.entropy', 'entropy', (['y[i]', 'p'], {}), '(y[i], p)\n', (14081, 14090), False, 'from scipy.stats import entropy\n'), ((11388, 11417), 'scipy.stats.entropy', 'entropy', (['y[i]', 'minus_preds[i]'], {}), '(y[i], minus_preds[i])\n', (11395, 11417), False, 'from scipy.stats import entropy\n'), ((11455, 11479), 'scipy.stats.entropy', 'entropy', (['y[i]', 'plus_pred'], {}), '(y[i], plus_pred)\n', (11462, 11479), False, 'from scipy.stats import entropy\n')]
|
import pytest
from salt.ext.tornado.httpclient import HTTPError
@pytest.fixture
def app(app):
app.wsgi_application.config["global"]["tools.hypermedia_out.on"] = True
return app
async def test_default_accept(http_client, content_type_map):
response = await http_client.fetch("/", method="GET")
assert response.headers["Content-Type"] == content_type_map["json"]
async def test_unsupported_accept(http_client):
with pytest.raises(HTTPError) as exc:
await http_client.fetch(
"/", method="GET", headers={"Accept": "application/ms-word"}
)
assert exc.value.code == 406
async def test_json_out(http_client, content_type_map):
response = await http_client.fetch(
"/", method="GET", headers={"Accept": content_type_map["json"]}
)
assert response.headers["Content-Type"] == content_type_map["json"]
async def test_yaml_out(http_client, content_type_map):
response = await http_client.fetch(
"/", method="GET", headers={"Accept": content_type_map["yaml"]}
)
assert response.headers["Content-Type"] == content_type_map["yaml"]
|
[
"pytest.raises"
] |
[((440, 464), 'pytest.raises', 'pytest.raises', (['HTTPError'], {}), '(HTTPError)\n', (453, 464), False, 'import pytest\n')]
|
import base64
from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, U, registry
@registry.register
class Base64_url(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Base64 URL decoding
"""
ctext_padding = ctext + "=" * (4 - len(ctext) % 4)
try:
return base64.urlsafe_b64decode(ctext_padding).decode("utf-8")
except Exception:
return None
@staticmethod
def priority() -> float:
# Not expected to show up often, but also very fast to check.
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "base64_url"
|
[
"base64.urlsafe_b64decode"
] |
[((369, 408), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (['ctext_padding'], {}), '(ctext_padding)\n', (393, 408), False, 'import base64\n')]
|
import numpy as np
import straxen
import tempfile
import os
import unittest
import shutil
import uuid
test_run_id_1T = '180423_1021'
class TestBasics(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
temp_folder = uuid.uuid4().hex
# Keep one temp dir because we don't want to download the data every time.
cls.tempdir = os.path.join(tempfile.gettempdir(), temp_folder)
assert not os.path.exists(cls.tempdir)
print("Downloading test data (if needed)")
st = straxen.contexts.demo()
cls.run_id = test_run_id_1T
cls.st = st
@classmethod
def tearDownClass(cls):
# Make sure to only cleanup this dir after we have done all the tests
if os.path.exists(cls.tempdir):
shutil.rmtree(cls.tempdir)
def test_run_selection(self):
st = self.st
# Ignore strax-internal warnings
st.set_context_config({'free_options': tuple(st.config.keys())})
run_df = st.select_runs(available='raw_records')
print(run_df)
run_id = run_df.iloc[0]['name']
assert run_id == test_run_id_1T
def test_processing(self):
st = self.st
df = st.get_df(self.run_id, 'event_info')
assert len(df) > 0
assert 'cs1' in df.columns
assert df['cs1'].sum() > 0
assert not np.all(np.isnan(df['x'].values))
def test_get_livetime_sec(self):
st = self.st
events = st.get_array(self.run_id, 'peaks')
straxen.get_livetime_sec(st, test_run_id_1T, things=events)
def test_mini_analysis(self):
@straxen.mini_analysis(requires=('raw_records',))
def count_rr(raw_records):
return len(raw_records)
n = self.st.count_rr(self.run_id)
assert n > 100
|
[
"uuid.uuid4",
"straxen.contexts.demo",
"tempfile.gettempdir",
"os.path.exists",
"numpy.isnan",
"straxen.get_livetime_sec",
"straxen.mini_analysis",
"shutil.rmtree"
] |
[((529, 552), 'straxen.contexts.demo', 'straxen.contexts.demo', ([], {}), '()\n', (550, 552), False, 'import straxen\n'), ((744, 771), 'os.path.exists', 'os.path.exists', (['cls.tempdir'], {}), '(cls.tempdir)\n', (758, 771), False, 'import os\n'), ((1514, 1573), 'straxen.get_livetime_sec', 'straxen.get_livetime_sec', (['st', 'test_run_id_1T'], {'things': 'events'}), '(st, test_run_id_1T, things=events)\n', (1538, 1573), False, 'import straxen\n'), ((1618, 1666), 'straxen.mini_analysis', 'straxen.mini_analysis', ([], {'requires': "('raw_records',)"}), "(requires=('raw_records',))\n", (1639, 1666), False, 'import straxen\n'), ((246, 258), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (256, 258), False, 'import uuid\n'), ((381, 402), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (400, 402), False, 'import tempfile\n'), ((436, 463), 'os.path.exists', 'os.path.exists', (['cls.tempdir'], {}), '(cls.tempdir)\n', (450, 463), False, 'import os\n'), ((785, 811), 'shutil.rmtree', 'shutil.rmtree', (['cls.tempdir'], {}), '(cls.tempdir)\n', (798, 811), False, 'import shutil\n'), ((1369, 1393), 'numpy.isnan', 'np.isnan', (["df['x'].values"], {}), "(df['x'].values)\n", (1377, 1393), True, 'import numpy as np\n')]
|
from __future__ import annotations
from asyncio.events import AbstractEventLoop, TimerHandle
from asyncio.futures import Future
from typing import Mapping
from safe_set_result import safe_set_result
import scrypted_sdk
import numpy as np
import re
import tflite_runtime.interpreter as tflite
from pycoral.utils.edgetpu import make_interpreter
from pycoral.utils.edgetpu import list_edge_tpus
from pycoral.utils.edgetpu import run_inference
from pycoral.adapters.common import input_size
from pycoral.adapters import detect
from PIL import Image
import common
import io
import gstreamer
import json
import asyncio
import time
import os
import binascii
from urllib.parse import urlparse
from gi.repository import Gst
import multiprocessing
from third_party.sort import Sort
from scrypted_sdk.types import FFMpegInput, Lock, MediaObject, ObjectDetection, ObjectDetectionModel, ObjectDetectionResult, ObjectDetectionSession, OnOff, ObjectsDetected, ScryptedInterface, ScryptedMimeTypes
def parse_label_contents(contents: str):
lines = contents.splitlines()
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
if len(pair) == 2 and pair[0].strip().isdigit():
ret[int(pair[0])] = pair[1].strip()
else:
ret[row_number] = content.strip()
return ret
class DetectionSession:
id: str
timerHandle: TimerHandle
future: Future
loop: AbstractEventLoop
score_threshold: float
running: bool
def __init__(self) -> None:
self.timerHandle = None
self.future = Future()
self.tracker = Sort()
self.running = False
def cancel(self):
if self.timerHandle:
self.timerHandle.cancel()
self.timerHandle = None
def timedOut(self):
safe_set_result(self.future)
def setTimeout(self, duration: float):
self.cancel()
self.loop.call_later(duration, lambda: self.timedOut())
class CoralPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
detection_sessions: Mapping[str, DetectionSession] = {}
session_mutex = multiprocessing.Lock()
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
labels_contents = scrypted_sdk.zip.open(
'fs/coco_labels.txt').read().decode('utf8')
self.labels = parse_label_contents(labels_contents)
edge_tpus = list_edge_tpus()
if len(edge_tpus):
model = scrypted_sdk.zip.open(
'fs/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite').read()
self.interpreter = make_interpreter(model)
else:
model = scrypted_sdk.zip.open(
'fs/mobilenet_ssd_v2_coco_quant_postprocess.tflite').read()
self.interpreter = tflite.Interpreter(model_content=model)
self.interpreter.allocate_tensors()
self.mutex = multiprocessing.Lock()
async def getInferenceModels(self) -> list[ObjectDetectionModel]:
ret = list[ObjectDetectionModel]()
_, height, width, channels = self.interpreter.get_input_details()[
0]['shape']
d = {
'id': 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu',
'name': '<NAME>',
'classes': list(self.labels.values()),
'inputShape': [int(width), int(height), int(channels)],
}
ret.append(d)
return ret
def create_detection_result(self, objs, size, tracker: Sort = None):
detections = list[ObjectDetectionResult]()
detection_result: ObjectsDetected = {}
detection_result['detections'] = detections
detection_result['inputDimensions'] = size
tracker_detections = []
for obj in objs:
element = [] # np.array([])
element.append(obj.bbox.xmin)
element.append(obj.bbox.ymin)
element.append(obj.bbox.xmax)
element.append(obj.bbox.ymax)
element.append(obj.score) # print('element= ',element)
tracker_detections.append(element)
tracker_detections = np.array(tracker_detections)
trdata = []
trackerFlag = False
if tracker and tracker_detections.any():
trdata = tracker.update(tracker_detections)
trackerFlag = True
if trackerFlag and (np.array(trdata)).size:
for td in trdata:
x0, y0, x1, y1, trackID = td[0].item(), td[1].item(
), td[2].item(), td[3].item(), td[4].item()
overlap = 0
for ob in objs:
dx0, dy0, dx1, dy1 = ob.bbox.xmin, ob.bbox.ymin, ob.bbox.xmax, ob.bbox.ymax
area = (min(dx1, x1)-max(dx0, x0)) * \
(min(dy1, y1)-max(dy0, y0))
if (area > overlap):
overlap = area
obj = ob
detection: ObjectDetectionResult = {}
detection['id'] = str(trackID)
detection['boundingBox'] = (
obj.bbox.xmin, obj.bbox.ymin, obj.bbox.ymax, obj.bbox.ymax)
detection['className'] = self.labels.get(obj.id, obj.id)
detection['score'] = obj.score
detections.append(detection)
else:
for obj in objs:
detection: ObjectDetectionResult = {}
detection['boundingBox'] = (
obj.bbox.xmin, obj.bbox.ymin, obj.bbox.ymax, obj.bbox.ymax)
detection['className'] = self.labels.get(obj.id, obj.id)
detection['score'] = obj.score
detections.append(detection)
return detection_result
def detection_event(self, detection_session: DetectionSession, detection_result: ObjectsDetected, event_buffer: bytes = None):
detection_result['detectionId'] = detection_session.id
detection_result['timestamp'] = int(time.time() * 1000)
asyncio.run_coroutine_threadsafe(self.onDeviceEvent(
ScryptedInterface.ObjectDetection.value, detection_result), loop=detection_session.loop)
def end_session(self, detection_session: DetectionSession):
print('detection ended', detection_session.id)
detection_session.cancel()
with self.session_mutex:
self.detection_sessions.pop(detection_session.id, None)
detection_result: ObjectsDetected = {}
detection_result['running'] = False
self.detection_event(detection_session, detection_result)
async def detectObjects(self, mediaObject: MediaObject, session: ObjectDetectionSession = None) -> ObjectsDetected:
score_threshold = -float('inf')
duration = None
detection_id = None
if session:
detection_id = session.get('detectionId', -float('inf'))
duration = session.get('duration', None)
score_threshold = session.get('minScore', score_threshold)
is_image = mediaObject and mediaObject.mimeType.startswith('image/')
with self.session_mutex:
if not is_image and not detection_id:
detection_id = binascii.b2a_hex(os.urandom(15)).decode('utf8')
if detection_id:
detection_session = self.detection_sessions.get(detection_id, None)
if not duration and not is_image:
if detection_session:
self.end_session(detection_session)
return
elif detection_id and not detection_session:
if not mediaObject:
raise Exception(
'session %s inactive and no mediaObject provided' % detection_id)
detection_session = DetectionSession()
detection_session.id = detection_id
detection_session.score_threshold = score_threshold
loop = asyncio.get_event_loop()
detection_session.loop = loop
self.detection_sessions[detection_id] = detection_session
detection_session.future.add_done_callback(
lambda _: self.end_session(detection_session))
if is_image:
stream = io.BytesIO(bytes(await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, 'image/jpeg')))
image = Image.open(stream)
_, scale = common.set_resized_input(
self.interpreter, image.size, lambda size: image.resize(size, Image.ANTIALIAS))
tracker = None
if detection_session:
tracker = detection_session.tracker
with self.mutex:
self.interpreter.invoke()
objs = detect.get_objects(
self.interpreter, score_threshold=score_threshold, image_scale=scale)
return self.create_detection_result(objs, image.size, tracker = tracker)
new_session = not detection_session.running
if new_session:
detection_session.running = True
detection_session.setTimeout(duration / 1000)
if not new_session:
return
print('detection starting', detection_id)
b = await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, ScryptedMimeTypes.MediaStreamUrl.value)
s = b.decode('utf8')
j: FFMpegInput = json.loads(s)
container = j['container']
videofmt = 'raw'
videosrc = j['url']
if container == 'mpegts' and videosrc.startswith('tcp://'):
parsed_url = urlparse(videosrc)
videofmt = 'gst'
videosrc = 'tcpclientsrc port=%s host=%s ! tsdemux' % (
parsed_url.port, parsed_url.hostname)
size = j['mediaStreamOptions']['video']
inference_size = input_size(self.interpreter)
width, height = inference_size
w, h = (size['width'], size['height'])
scale = min(width / w, height / h)
def user_callback(input_tensor, src_size, inference_box):
with self.mutex:
run_inference(self.interpreter, input_tensor)
objs = detect.get_objects(
self.interpreter, score_threshold=score_threshold, image_scale=(scale, scale))
# (result, mapinfo) = input_tensor.map(Gst.MapFlags.READ)
try:
detection_result = self.create_detection_result(objs,
src_size, detection_session.tracker)
# self.detection_event(detection_session, detection_result, mapinfo.data.tobytes())
self.detection_event(detection_session, detection_result)
if not session or not duration:
safe_set_result(detection_session.future)
finally:
# input_tensor.unmap(mapinfo)
pass
pipeline = gstreamer.run_pipeline(detection_session.future, user_callback,
src_size=(
size['width'], size['height']),
appsink_size=inference_size,
videosrc=videosrc,
videofmt=videofmt)
task = pipeline.run()
asyncio.ensure_future(task)
detection_result: ObjectsDetected = {}
detection_result['detectionId'] = detection_id
detection_result['running'] = True
return detection_result
def create_scrypted_plugin():
return CoralPlugin()
#
|
[
"pycoral.utils.edgetpu.make_interpreter",
"multiprocessing.Lock",
"pycoral.utils.edgetpu.run_inference",
"urllib.parse.urlparse",
"safe_set_result.safe_set_result",
"scrypted_sdk.mediaManager.convertMediaObjectToBuffer",
"json.loads",
"pycoral.adapters.common.input_size",
"asyncio.ensure_future",
"gstreamer.run_pipeline",
"tflite_runtime.interpreter.Interpreter",
"os.urandom",
"pycoral.utils.edgetpu.list_edge_tpus",
"asyncio.get_event_loop",
"pycoral.adapters.detect.get_objects",
"third_party.sort.Sort",
"asyncio.futures.Future",
"scrypted_sdk.zip.open",
"PIL.Image.open",
"time.time",
"numpy.array"
] |
[((2148, 2170), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (2168, 2170), False, 'import multiprocessing\n'), ((1611, 1619), 'asyncio.futures.Future', 'Future', ([], {}), '()\n', (1617, 1619), False, 'from asyncio.futures import Future\n'), ((1643, 1649), 'third_party.sort.Sort', 'Sort', ([], {}), '()\n', (1647, 1649), False, 'from third_party.sort import Sort\n'), ((1838, 1866), 'safe_set_result.safe_set_result', 'safe_set_result', (['self.future'], {}), '(self.future)\n', (1853, 1866), False, 'from safe_set_result import safe_set_result\n'), ((2454, 2470), 'pycoral.utils.edgetpu.list_edge_tpus', 'list_edge_tpus', ([], {}), '()\n', (2468, 2470), False, 'from pycoral.utils.edgetpu import list_edge_tpus\n'), ((2945, 2967), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (2965, 2967), False, 'import multiprocessing\n'), ((4153, 4181), 'numpy.array', 'np.array', (['tracker_detections'], {}), '(tracker_detections)\n', (4161, 4181), True, 'import numpy as np\n'), ((9435, 9448), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (9445, 9448), False, 'import json\n'), ((9874, 9902), 'pycoral.adapters.common.input_size', 'input_size', (['self.interpreter'], {}), '(self.interpreter)\n', (9884, 9902), False, 'from pycoral.adapters.common import input_size\n'), ((10985, 11167), 'gstreamer.run_pipeline', 'gstreamer.run_pipeline', (['detection_session.future', 'user_callback'], {'src_size': "(size['width'], size['height'])", 'appsink_size': 'inference_size', 'videosrc': 'videosrc', 'videofmt': 'videofmt'}), "(detection_session.future, user_callback, src_size=(\n size['width'], size['height']), appsink_size=inference_size, videosrc=\n videosrc, videofmt=videofmt)\n", (11007, 11167), False, 'import gstreamer\n'), ((11411, 11438), 'asyncio.ensure_future', 'asyncio.ensure_future', (['task'], {}), '(task)\n', (11432, 11438), False, 'import asyncio\n'), ((2656, 2679), 'pycoral.utils.edgetpu.make_interpreter', 'make_interpreter', (['model'], {}), '(model)\n', (2672, 2679), False, 'from pycoral.utils.edgetpu import make_interpreter\n'), ((2840, 2879), 'tflite_runtime.interpreter.Interpreter', 'tflite.Interpreter', ([], {'model_content': 'model'}), '(model_content=model)\n', (2858, 2879), True, 'import tflite_runtime.interpreter as tflite\n'), ((8411, 8429), 'PIL.Image.open', 'Image.open', (['stream'], {}), '(stream)\n', (8421, 8429), False, 'from PIL import Image\n'), ((9275, 9384), 'scrypted_sdk.mediaManager.convertMediaObjectToBuffer', 'scrypted_sdk.mediaManager.convertMediaObjectToBuffer', (['mediaObject', 'ScryptedMimeTypes.MediaStreamUrl.value'], {}), '(mediaObject,\n ScryptedMimeTypes.MediaStreamUrl.value)\n', (9327, 9384), False, 'import scrypted_sdk\n'), ((9630, 9648), 'urllib.parse.urlparse', 'urlparse', (['videosrc'], {}), '(videosrc)\n', (9638, 9648), False, 'from urllib.parse import urlparse\n'), ((4395, 4411), 'numpy.array', 'np.array', (['trdata'], {}), '(trdata)\n', (4403, 4411), True, 'import numpy as np\n'), ((6008, 6019), 'time.time', 'time.time', ([], {}), '()\n', (6017, 6019), False, 'import time\n'), ((8785, 8877), 'pycoral.adapters.detect.get_objects', 'detect.get_objects', (['self.interpreter'], {'score_threshold': 'score_threshold', 'image_scale': 'scale'}), '(self.interpreter, score_threshold=score_threshold,\n image_scale=scale)\n', (8803, 8877), False, 'from pycoral.adapters import detect\n'), ((10144, 10189), 'pycoral.utils.edgetpu.run_inference', 'run_inference', (['self.interpreter', 'input_tensor'], {}), '(self.interpreter, input_tensor)\n', (10157, 10189), False, 'from pycoral.utils.edgetpu import run_inference\n'), ((10213, 10314), 'pycoral.adapters.detect.get_objects', 'detect.get_objects', (['self.interpreter'], {'score_threshold': 'score_threshold', 'image_scale': '(scale, scale)'}), '(self.interpreter, score_threshold=score_threshold,\n image_scale=(scale, scale))\n', (10231, 10314), False, 'from pycoral.adapters import detect\n'), ((2518, 2605), 'scrypted_sdk.zip.open', 'scrypted_sdk.zip.open', (['"""fs/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite"""'], {}), "(\n 'fs/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite')\n", (2539, 2605), False, 'import scrypted_sdk\n'), ((2714, 2788), 'scrypted_sdk.zip.open', 'scrypted_sdk.zip.open', (['"""fs/mobilenet_ssd_v2_coco_quant_postprocess.tflite"""'], {}), "('fs/mobilenet_ssd_v2_coco_quant_postprocess.tflite')\n", (2735, 2788), False, 'import scrypted_sdk\n'), ((7970, 7994), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (7992, 7994), False, 'import asyncio\n'), ((10835, 10876), 'safe_set_result.safe_set_result', 'safe_set_result', (['detection_session.future'], {}), '(detection_session.future)\n', (10850, 10876), False, 'from safe_set_result import safe_set_result\n'), ((2295, 2338), 'scrypted_sdk.zip.open', 'scrypted_sdk.zip.open', (['"""fs/coco_labels.txt"""'], {}), "('fs/coco_labels.txt')\n", (2316, 2338), False, 'import scrypted_sdk\n'), ((8309, 8388), 'scrypted_sdk.mediaManager.convertMediaObjectToBuffer', 'scrypted_sdk.mediaManager.convertMediaObjectToBuffer', (['mediaObject', '"""image/jpeg"""'], {}), "(mediaObject, 'image/jpeg')\n", (8361, 8388), False, 'import scrypted_sdk\n'), ((7242, 7256), 'os.urandom', 'os.urandom', (['(15)'], {}), '(15)\n', (7252, 7256), False, 'import os\n')]
|
import os
import shutil
import unittest
from pypirun import utility
class TestUtility(unittest.TestCase):
test_key = 'OUROATH_UTILITY'
def tearDown(self):
try:
del os.environ[self.test_key]
except KeyError:
pass
def test__env_bool__default(self):
self.assertTrue(utility.env_bool(self.test_key, True))
self.assertFalse(utility.env_bool(self.test_key, False))
def test__env_bool__true(self):
os.environ[self.test_key] = 'True'
self.assertTrue(utility.env_bool(self.test_key, False))
def test__env_bool__false(self):
os.environ[self.test_key] = 'False'
self.assertFalse(utility.env_bool(self.test_key, True))
def test__which__allow_symlink(self):
result = utility.which('python3', allow_symlink=True)
self.assertEqual(result, shutil.which('python3'))
def test__which__allow_symlink__false(self):
result = utility.which('python3', allow_symlink=False)
|
[
"pypirun.utility.env_bool",
"shutil.which",
"pypirun.utility.which"
] |
[((782, 826), 'pypirun.utility.which', 'utility.which', (['"""python3"""'], {'allow_symlink': '(True)'}), "('python3', allow_symlink=True)\n", (795, 826), False, 'from pypirun import utility\n'), ((952, 997), 'pypirun.utility.which', 'utility.which', (['"""python3"""'], {'allow_symlink': '(False)'}), "('python3', allow_symlink=False)\n", (965, 997), False, 'from pypirun import utility\n'), ((328, 365), 'pypirun.utility.env_bool', 'utility.env_bool', (['self.test_key', '(True)'], {}), '(self.test_key, True)\n', (344, 365), False, 'from pypirun import utility\n'), ((392, 430), 'pypirun.utility.env_bool', 'utility.env_bool', (['self.test_key', '(False)'], {}), '(self.test_key, False)\n', (408, 430), False, 'from pypirun import utility\n'), ((536, 574), 'pypirun.utility.env_bool', 'utility.env_bool', (['self.test_key', '(False)'], {}), '(self.test_key, False)\n', (552, 574), False, 'from pypirun import utility\n'), ((683, 720), 'pypirun.utility.env_bool', 'utility.env_bool', (['self.test_key', '(True)'], {}), '(self.test_key, True)\n', (699, 720), False, 'from pypirun import utility\n'), ((860, 883), 'shutil.which', 'shutil.which', (['"""python3"""'], {}), "('python3')\n", (872, 883), False, 'import shutil\n')]
|
from sqlalchemy import Column, Table, MetaData, Index
import logging
from sqlalchemy.dialects.mysql.base import DATETIME
from sqlalchemy.dialects.mysql.base import INTEGER
from sqlalchemy.dialects.mysql.base import VARCHAR
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
port = Table(
'port', meta,
Column('created_at', DATETIME),
Column('updated_at', DATETIME),
Column('deleted_at', DATETIME),
Column('deleted', INTEGER(display_width=11)),
Column('key', VARCHAR(length=128)),
Column('id', INTEGER(display_width=11),
primary_key=True, nullable=False),
Column('device_id', VARCHAR(length=32), nullable=False),
Column('rack_name', VARCHAR(length=32), nullable=False),
Column('vlan_tag', INTEGER(display_width=11), nullable=False),
Column('ip', VARCHAR(length=15)),
Column('mac', VARCHAR(length=31)),
)
try:
port.create()
except Exception:
LOG.info(repr(port))
LOG.exception('Exception while creating table.')
raise
indexes = [
Index('port_vlan_tag_idx', port.c.vlan_tag),
Index('port_rack_name_idx', port.c.rack_name)
]
for index in indexes:
index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('port', meta, autoload=True)
table.drop()
|
[
"sqlalchemy.MetaData",
"sqlalchemy.Index",
"sqlalchemy.Table",
"sqlalchemy.Column",
"sqlalchemy.dialects.mysql.base.INTEGER",
"sqlalchemy.dialects.mysql.base.VARCHAR",
"logging.getLogger"
] |
[((231, 258), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (248, 258), False, 'import logging\n'), ((301, 311), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (309, 311), False, 'from sqlalchemy import Column, Table, MetaData, Index\n'), ((1383, 1393), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (1391, 1393), False, 'from sqlalchemy import Column, Table, MetaData, Index\n'), ((1437, 1471), 'sqlalchemy.Table', 'Table', (['"""port"""', 'meta'], {'autoload': '(True)'}), "('port', meta, autoload=True)\n", (1442, 1471), False, 'from sqlalchemy import Column, Table, MetaData, Index\n'), ((391, 421), 'sqlalchemy.Column', 'Column', (['"""created_at"""', 'DATETIME'], {}), "('created_at', DATETIME)\n", (397, 421), False, 'from sqlalchemy import Column, Table, MetaData, Index\n'), ((431, 461), 'sqlalchemy.Column', 'Column', (['"""updated_at"""', 'DATETIME'], {}), "('updated_at', DATETIME)\n", (437, 461), False, 'from sqlalchemy import Column, Table, MetaData, Index\n'), ((471, 501), 'sqlalchemy.Column', 'Column', (['"""deleted_at"""', 'DATETIME'], {}), "('deleted_at', DATETIME)\n", (477, 501), False, 'from sqlalchemy import Column, Table, MetaData, Index\n'), ((1170, 1213), 'sqlalchemy.Index', 'Index', (['"""port_vlan_tag_idx"""', 'port.c.vlan_tag'], {}), "('port_vlan_tag_idx', port.c.vlan_tag)\n", (1175, 1213), False, 'from sqlalchemy import Column, Table, MetaData, Index\n'), ((1223, 1268), 'sqlalchemy.Index', 'Index', (['"""port_rack_name_idx"""', 'port.c.rack_name'], {}), "('port_rack_name_idx', port.c.rack_name)\n", (1228, 1268), False, 'from sqlalchemy import Column, Table, MetaData, Index\n'), ((529, 554), 'sqlalchemy.dialects.mysql.base.INTEGER', 'INTEGER', ([], {'display_width': '(11)'}), '(display_width=11)\n', (536, 554), False, 'from sqlalchemy.dialects.mysql.base import INTEGER\n'), ((579, 598), 'sqlalchemy.dialects.mysql.base.VARCHAR', 'VARCHAR', ([], {'length': '(128)'}), '(length=128)\n', (586, 598), False, 'from sqlalchemy.dialects.mysql.base import VARCHAR\n'), ((622, 647), 'sqlalchemy.dialects.mysql.base.INTEGER', 'INTEGER', ([], {'display_width': '(11)'}), '(display_width=11)\n', (629, 647), False, 'from sqlalchemy.dialects.mysql.base import INTEGER\n'), ((727, 745), 'sqlalchemy.dialects.mysql.base.VARCHAR', 'VARCHAR', ([], {'length': '(32)'}), '(length=32)\n', (734, 745), False, 'from sqlalchemy.dialects.mysql.base import VARCHAR\n'), ((792, 810), 'sqlalchemy.dialects.mysql.base.VARCHAR', 'VARCHAR', ([], {'length': '(32)'}), '(length=32)\n', (799, 810), False, 'from sqlalchemy.dialects.mysql.base import VARCHAR\n'), ((856, 881), 'sqlalchemy.dialects.mysql.base.INTEGER', 'INTEGER', ([], {'display_width': '(11)'}), '(display_width=11)\n', (863, 881), False, 'from sqlalchemy.dialects.mysql.base import INTEGER\n'), ((921, 939), 'sqlalchemy.dialects.mysql.base.VARCHAR', 'VARCHAR', ([], {'length': '(15)'}), '(length=15)\n', (928, 939), False, 'from sqlalchemy.dialects.mysql.base import VARCHAR\n'), ((964, 982), 'sqlalchemy.dialects.mysql.base.VARCHAR', 'VARCHAR', ([], {'length': '(31)'}), '(length=31)\n', (971, 982), False, 'from sqlalchemy.dialects.mysql.base import VARCHAR\n')]
|
# -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: <NAME>
@group : waditu
@contact: <EMAIL>
"""
from io import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Idx():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def Idx(self, secID='', ticker='', field=''):
"""
获取国内外指数的基本要素信息,包括指数名称、指数代码、发布机构、发布日期、基日、基点等。
"""
code, result = self.client.getData(vs.IDX%(secID, ticker, field))
return _ret_data(code, result)
def IdxCons(self, secID='', ticker='', intoDate='', isNew='', field=''):
"""
获取国内外指数的成分构成情况,包括指数成分股名称、成分股代码、入选日期、剔除日期等。
"""
code, result = self.client.getData(vs.IDXCONS%(secID, ticker, intoDate,
intoDate, isNew, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
|
[
"tushare.util.upass.get_token",
"io.StringIO"
] |
[((1209, 1225), 'io.StringIO', 'StringIO', (['result'], {}), '(result)\n', (1217, 1225), False, 'from io import StringIO\n'), ((382, 396), 'tushare.util.upass.get_token', 'up.get_token', ([], {}), '()\n', (394, 396), True, 'from tushare.util import upass as up\n')]
|
import torch
# After running `make install` in the torchmps folder, this should work
from torchmps import ProbMPS
# Dummy parameters for the model and data
bond_dim = 13
input_dim = 2
batch_size = 55
sequence_len = 21
complex_params = True
# Verify that you can initialize the model
my_mps = ProbMPS(sequence_len, input_dim, bond_dim, complex_params)
# Verify that a Pytorch optimizer initializes properly
optimizer = torch.optim.Adam(my_mps.parameters())
# Create dummy discrete index data (has to be integer/long type!)
data = torch.randint(high=input_dim, size=(sequence_len, batch_size))
# Verify that the model forward function works on dummy data
log_probs = my_mps(data)
assert log_probs.shape == (batch_size,)
# Verify that backprop works fine, and that gradients are populated
loss = my_mps.loss(data) # <- Negative log likelihood loss
assert all(p.grad is None for p in my_mps.parameters())
# Normally we have to call optimizer.zero_grad before loss.backward, but
# this is just single training run so it doesn't matter
loss.backward()
optimizer.step()
assert all(p.grad is not None for p in my_mps.parameters())
# Congrats, you're ready to start writing the actual training script!
print("Yay, things seem to be working :)")
|
[
"torch.randint",
"torchmps.ProbMPS"
] |
[((295, 353), 'torchmps.ProbMPS', 'ProbMPS', (['sequence_len', 'input_dim', 'bond_dim', 'complex_params'], {}), '(sequence_len, input_dim, bond_dim, complex_params)\n', (302, 353), False, 'from torchmps import ProbMPS\n'), ((534, 596), 'torch.randint', 'torch.randint', ([], {'high': 'input_dim', 'size': '(sequence_len, batch_size)'}), '(high=input_dim, size=(sequence_len, batch_size))\n', (547, 596), False, 'import torch\n')]
|
"""
Tests handle's interactivity.
"""
import vcs.vtk_ui
import vtk_ui_test
import decimal
class test_vtk_ui_handle_interaction(vtk_ui_test.vtk_ui_test):
def setUp(self):
super(test_vtk_ui_handle_interaction, self).setUp()
self.h = None
self.h2 = None
def do(self):
self.win.SetSize(100, 100)
self.h = vcs.vtk_ui.Handle(self.inter, (5, 5), clicked=self.clicked, dragged=self.dragged, released=self.released)
self.h.show()
self.mouse_down(5, 5)
self.mouse_move(10, 10)
self.mouse_up(10, 10)
# Test normalized drag provides normalized dx/dy
self.h2 = vcs.vtk_ui.Handle(self.inter, (.3, .3), dragged=self.norm_drag, normalize=True)
self.h2.show()
self.mouse_down(30, 30)
self.mouse_move(40, 40)
self.mouse_up(40, 40)
assert self.passed == 5, "Did not trigger drag on normalized"
self.passed = 0
def norm_drag(self, handle, dx, dy):
assert handle == self.h2, "Normalized passed wrong handle to drag"
assert decimal.Decimal("%f" % dx) == decimal.Decimal("%f" % .1), "DX normalized incorrectly; %f when expecting %f" % (dx, .1)
assert decimal.Decimal("%f" % dy) == decimal.Decimal("%f" % .1), "DY normalized incorrectly; %f when expecting %f" % (dy, .1)
assert self.passed == 4, "Did not trigger released"
self.passed = 5
def clicked(self, handle):
assert handle == self.h, "Clicked received argument that was not the handle"
self.passed = 2
def dragged(self, handle, dx, dy):
assert handle == self.h, "Dragged received argument that was not the handle"
assert dx == 5, "DX was different from expected value"
assert dy == 5, "DY was different from expected value"
assert self.passed == 2, "Did not trigger clicked before dragging"
self.passed = 3
def released(self, handle):
assert handle == self.h, "Released received argument that was not the handle"
assert self.passed == 3, "Did not trigger dragged before released"
self.passed = 4
if __name__ == "__main__":
t = test_vtk_ui_handle_interaction()
t.test()
|
[
"decimal.Decimal"
] |
[((1075, 1101), 'decimal.Decimal', 'decimal.Decimal', (["('%f' % dx)"], {}), "('%f' % dx)\n", (1090, 1101), False, 'import decimal\n'), ((1105, 1132), 'decimal.Decimal', 'decimal.Decimal', (["('%f' % 0.1)"], {}), "('%f' % 0.1)\n", (1120, 1132), False, 'import decimal\n'), ((1209, 1235), 'decimal.Decimal', 'decimal.Decimal', (["('%f' % dy)"], {}), "('%f' % dy)\n", (1224, 1235), False, 'import decimal\n'), ((1239, 1266), 'decimal.Decimal', 'decimal.Decimal', (["('%f' % 0.1)"], {}), "('%f' % 0.1)\n", (1254, 1266), False, 'import decimal\n')]
|
#!/usr/bin/python3
# scrape twitter example going to web page for twitter profile
# author: <NAME>
# date: 2015 06 02
# Note: MUST USE PYTHON 3 from terminal
import json
import urllib.request, urllib.parse
import random
from bs4 import BeautifulSoup
useragents = ['Mozilla/5.0','Bandicout Broadway 2.4','Carls Crawler Critter 1.0','Dirty Dungeon Diksearch 69','Internet Explorer but better']
# function that returns one random value from a list only
def singlerando(listofterms):
randomed = random.choice(listofterms)
return randomed
def parseT(twitterpage):
soup = BeautifulSoup(twitterpage) # create a new bs4 object from the html data loaded
for script in soup(["script", "style"]): # remove all javascript and stylesheet code
script.extract()
# get text
text = soup.get_text()
tester = soup.find_all("p", class_="tweet-text")
print(tester[1].text)
exit()
def searchT(searchfor):
randomuseragent = singlerando(useragents) # select a random user agent from list
headers = { 'User-Agent' : randomuseragent } # get random header from above
url = 'https://twitter.com/%s' % searchfor # GOOGLE ajax API string
search_response_pre = urllib.request.Request(url,None,headers) # key to get the random headers to work
search_response = urllib.request.urlopen(search_response_pre)
search_results = search_response.read().decode("utf8")
#print(search_results)
parseT(search_results)
# global dictionary list of terms - do not change
diction = []
subset = []
twitteruser = input('Enter twitter user: ')
searchT(twitteruser)
exit()
|
[
"bs4.BeautifulSoup",
"random.choice"
] |
[((495, 521), 'random.choice', 'random.choice', (['listofterms'], {}), '(listofterms)\n', (508, 521), False, 'import random\n'), ((573, 599), 'bs4.BeautifulSoup', 'BeautifulSoup', (['twitterpage'], {}), '(twitterpage)\n', (586, 599), False, 'from bs4 import BeautifulSoup\n')]
|
'''
This script contains examples of Logistic Regression analysis, using the
SciKit-Learn library.
Logistic regression is useful when trying to classify data between 2 binary
groups / labels. For example, a logistic model would be useful to predict if
someone has a disease (1) or does not have a disease (0).
Logistic regression uses the sigmoid function, which can only output between
0 - 1.
'''
import pathlib2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
# Data is from here: https://www.kaggle.com/c/titanic/data
csv_url = ('https://raw.githubusercontent.com/robert-g-butler/python_reference'
'_guide/master/dummy_data/logistic_dummy_data.csv')
df = pd.read_csv(csv_url)
# Explore the data with graphs ------------------------------------------------
df.head()
df.info()
sns.set_style(style='whitegrid')
sns.heatmap(data=df.isna(), cmap='viridis') # yticklabels=False, cbar=False
plt.show()
sns.countplot(x='Survived', data=df, hue='Sex', palette='RdBu_r'); plt.show()
sns.countplot(x='Survived', data=df, hue='Pclass'); plt.show()
sns.distplot(df['Age'].dropna(), bins=30); plt.show() # kde=False
sns.countplot(x='SibSp', data=df); plt.show()
df['Fare'].hist(bins=40, figsize=(10, 4)); plt.show()
# Clean missing values --------------------------------------------------------
# Clean missing Age values. Impute Age by Pclass.
sns.boxplot(x='Pclass', y='Age', data=df); plt.show()
sns.heatmap(data=df.isna(), cmap='viridis'); plt.show()
def impute_age(cols):
age = cols['Age']
pclass = cols['Pclass']
if pd.isna(age):
if pclass == 1:
return 37
elif pclass == 2:
return 29
else:
return 24
else:
return age
df['Age'] = df[['Age', 'Pclass']].apply(func=impute_age, axis=1)
sns.heatmap(data=df.isna(), cmap='viridis'); plt.show()
# Drop the Cabin variable because there are too many missing values.
df.drop(columns='Cabin', axis=1, inplace=True)
sns.heatmap(data=df.isna(), cmap='viridis'); plt.show()
# Drop any remaining rows with missing values.
df.dropna(inplace=True)
sns.heatmap(data=df.isna(), cmap='viridis'); plt.show()
# Update text & categorical columns with numerical data -----------------------
# Get numerical values for each text column.
pd.get_dummies(df['Sex'])
# We must use 'drop_first' to avoid having 1 column perfectly the others.
# This problem is called 'multi-colinearity'.
sex = pd.get_dummies(df['Sex'], drop_first=True)
embarked = pd.get_dummies(df['Embarked'], drop_first=True)
df = pd.concat([df, sex, embarked], axis=1)
df.head()
# Drop columns that are text or aren't useful for prediction.
df.drop(['PassengerId', 'Sex', 'Embarked', 'Name', 'Ticket'], axis=1, inplace=True)
df.head()
# Create the model ------------------------------------------------------------
X = df.drop('Survived', axis=1)
y = df['Survived']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25)
logmodel = LogisticRegression()
logmodel.fit(X=X_train, y=y_train)
predictions = logmodel.predict(X=X_test)
# Check the prediction accuracy with 'classification_report'
print(metrics.classification_report(y_true=y_test, y_pred=predictions))
# Check the prediction accuracy with a 'confusion_matrix'
metrics.confusion_matrix(y_true=y_test, y_pred=predictions)
|
[
"seaborn.set_style",
"matplotlib.pyplot.show",
"pandas.read_csv",
"pandas.get_dummies",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"sklearn.linear_model.LogisticRegression",
"seaborn.boxplot",
"seaborn.countplot",
"sklearn.metrics.confusion_matrix",
"pandas.isna",
"pandas.concat"
] |
[((853, 873), 'pandas.read_csv', 'pd.read_csv', (['csv_url'], {}), '(csv_url)\n', (864, 873), True, 'import pandas as pd\n'), ((977, 1009), 'seaborn.set_style', 'sns.set_style', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (990, 1009), True, 'import seaborn as sns\n'), ((1087, 1097), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1095, 1097), True, 'import matplotlib.pyplot as plt\n'), ((1099, 1164), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""Survived"""', 'data': 'df', 'hue': '"""Sex"""', 'palette': '"""RdBu_r"""'}), "(x='Survived', data=df, hue='Sex', palette='RdBu_r')\n", (1112, 1164), True, 'import seaborn as sns\n'), ((1166, 1176), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1174, 1176), True, 'import matplotlib.pyplot as plt\n'), ((1177, 1227), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""Survived"""', 'data': 'df', 'hue': '"""Pclass"""'}), "(x='Survived', data=df, hue='Pclass')\n", (1190, 1227), True, 'import seaborn as sns\n'), ((1229, 1239), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1237, 1239), True, 'import matplotlib.pyplot as plt\n'), ((1283, 1293), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1291, 1293), True, 'import matplotlib.pyplot as plt\n'), ((1307, 1340), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""SibSp"""', 'data': 'df'}), "(x='SibSp', data=df)\n", (1320, 1340), True, 'import seaborn as sns\n'), ((1342, 1352), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1350, 1352), True, 'import matplotlib.pyplot as plt\n'), ((1396, 1406), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1404, 1406), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1580), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Pclass"""', 'y': '"""Age"""', 'data': 'df'}), "(x='Pclass', y='Age', data=df)\n", (1550, 1580), True, 'import seaborn as sns\n'), ((1582, 1592), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1590, 1592), True, 'import matplotlib.pyplot as plt\n'), ((1639, 1649), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1647, 1649), True, 'import matplotlib.pyplot as plt\n'), ((2015, 2025), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2023, 2025), True, 'import matplotlib.pyplot as plt\n'), ((2188, 2198), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2196, 2198), True, 'import matplotlib.pyplot as plt\n'), ((2316, 2326), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2324, 2326), True, 'import matplotlib.pyplot as plt\n'), ((2454, 2479), 'pandas.get_dummies', 'pd.get_dummies', (["df['Sex']"], {}), "(df['Sex'])\n", (2468, 2479), True, 'import pandas as pd\n'), ((2607, 2649), 'pandas.get_dummies', 'pd.get_dummies', (["df['Sex']"], {'drop_first': '(True)'}), "(df['Sex'], drop_first=True)\n", (2621, 2649), True, 'import pandas as pd\n'), ((2661, 2708), 'pandas.get_dummies', 'pd.get_dummies', (["df['Embarked']"], {'drop_first': '(True)'}), "(df['Embarked'], drop_first=True)\n", (2675, 2708), True, 'import pandas as pd\n'), ((2715, 2753), 'pandas.concat', 'pd.concat', (['[df, sex, embarked]'], {'axis': '(1)'}), '([df, sex, embarked], axis=1)\n', (2724, 2753), True, 'import pandas as pd\n'), ((3090, 3128), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)'}), '(X, y, test_size=0.25)\n', (3106, 3128), False, 'from sklearn.model_selection import train_test_split\n'), ((3140, 3160), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (3158, 3160), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3431, 3490), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', ([], {'y_true': 'y_test', 'y_pred': 'predictions'}), '(y_true=y_test, y_pred=predictions)\n', (3455, 3490), False, 'from sklearn import metrics\n'), ((1730, 1742), 'pandas.isna', 'pd.isna', (['age'], {}), '(age)\n', (1737, 1742), True, 'import pandas as pd\n'), ((3306, 3370), 'sklearn.metrics.classification_report', 'metrics.classification_report', ([], {'y_true': 'y_test', 'y_pred': 'predictions'}), '(y_true=y_test, y_pred=predictions)\n', (3335, 3370), False, 'from sklearn import metrics\n')]
|
import editor
editor.start()
|
[
"editor.start"
] |
[((14, 28), 'editor.start', 'editor.start', ([], {}), '()\n', (26, 28), False, 'import editor\n')]
|
import argparse
import hashlib
import binascii
def run(args):
h = hashlib.new('md4', args.password.encode('utf-16le')).digest()
print(binascii.hexlify(h).decode('utf-8'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Make an NTLM hash from a password.')
parser.add_argument('--password', dest='password', type=str, help='the password to hash', required=True)
try:
args = parser.parse_args()
run(args)
except argparse.ArgumentError as e:
print("[!] {}".format(e))
parser.print_usage()
sys.exit(1)
|
[
"binascii.hexlify",
"argparse.ArgumentParser"
] |
[((221, 294), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Make an NTLM hash from a password."""'}), "(description='Make an NTLM hash from a password.')\n", (244, 294), False, 'import argparse\n'), ((143, 162), 'binascii.hexlify', 'binascii.hexlify', (['h'], {}), '(h)\n', (159, 162), False, 'import binascii\n')]
|
r"""
Monte Carlo vs Black-Scholes-Merton
===========================================
Time values of options and guarantees for various in-the-moneyness
are calculated using Monte Carlo simulations and the Black-Scholes-Merton
pricing formula for European put options.
The Black-Scholes-Merton pricing formula for European put options
can be expressed as below, where
:math:`X` and :math:`S_{0}` correspond to the sum assured
and the initial account value in this example.
.. math::
p=Xe^{-rT}N\left(-d_{2}\right)-S_{0}N\left(-d_{1}\right)
d_{1}=\frac{\ln\left(\frac{S_{0}}{X}\right)+\left(r+\frac{\sigma^{2}}{2}\right)T}{\sigma\sqrt{T}}
d_{2}=d_{1}-\sigma\sqrt{T}
The graph below shows the results obtained from
the Monte Carlo simulations with 10,000 risk neutral scenarios,
and from the Black-Scholes-Merton formula.
Reference: *Options, Futures, and Other Derivatives* by <NAME>
.. seealso::
* :doc:`/libraries/notebooks/savings/savings_example1` notebook in the :mod:`~savings` library
"""
import modelx as mx
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm, lognorm
import numpy as np
model = mx.read_model("CashValue_ME_EX1")
proj = model.Projection
proj.model_point_table = proj.model_point_moneyness
monte_carlo = pd.Series(proj.pv_claims_over_av('MATURITY'), index=proj.model_point().index)
monte_carlo = list(np.average(monte_carlo[i]) for i in range(1, 10))
S0 = proj.model_point_table['premium_pp'] * proj.model_point_table['policy_count']
fig, ax = plt.subplots()
ax.scatter(S0, monte_carlo, s= 10, alpha=1, label='Monte Carlo')
ax.scatter(S0, proj.formula_option_put(120), alpha=0.5, label='Black-Scholes-Merton')
ax.legend()
ax.grid(True)
fig.suptitle('TVOG by ITM')
|
[
"numpy.average",
"matplotlib.pyplot.subplots",
"modelx.read_model"
] |
[((1161, 1194), 'modelx.read_model', 'mx.read_model', (['"""CashValue_ME_EX1"""'], {}), "('CashValue_ME_EX1')\n", (1174, 1194), True, 'import modelx as mx\n'), ((1531, 1545), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1543, 1545), True, 'import matplotlib.pyplot as plt\n'), ((1382, 1408), 'numpy.average', 'np.average', (['monte_carlo[i]'], {}), '(monte_carlo[i])\n', (1392, 1408), True, 'import numpy as np\n')]
|
"""
Summary
In this kata, you have to make a function named uglify_word (uglifyWord in Java and Javascript). It accepts a string parameter.
What does the uglify_word do?
It checks the char in the given string from the front with an iteration, in the iteration it does these steps:
There is a flag and it will be started from 1.
Check the current char in the iteration index.
If it is an alphabet character [a-zA-Z] and the flag value is equal to 1, then change this character to upper case.
If it is an alphabet character [a-zA-Z] and the flag value is equal to 0, then change this character to lower case.
Otherwise, if it is not an alphabet character, then set the flag value to 1.
If the current char is an alphabet character, do a boolean not operation to the flag.
After the iteration has done, return the fixed string that might have been changed in such iteration.
Examples
uglify_word("aaa") === "AaA"
uglify_word("AAA") === "AaA"
uglify_word("BbB") === "BbB"
uglify_word("aaa-bbb-ccc") === "AaA-BbB-CcC"
uglify_word("AaA-BbB-CcC") === "AaA-BbB-CcC"
uglify_word("eeee-ffff-gggg") === "EeEe-FfFf-GgGg"
uglify_word("EeEe-FfFf-GgGg") === "EeEe-FfFf-GgGg"
uglify_word("qwe123asdf456zxc") === "QwE123AsDf456ZxC"
uglify_word("Hello World") === "HeLlO WoRlD"
"""
from re import match
def uglify_word(s):
flag = True
final = ""
for char in s:
if match("[a-zA-Z]", char):
final += char.upper() if flag else char.lower()
flag = False if flag else True
else:
final += char
flag = True
return final
|
[
"re.match"
] |
[((1375, 1398), 're.match', 'match', (['"""[a-zA-Z]"""', 'char'], {}), "('[a-zA-Z]', char)\n", (1380, 1398), False, 'from re import match\n')]
|
# pylint: disable=W0611
#
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:<EMAIL>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""some functions that may be useful for various checkers
"""
import re
import sys
import string
import astroid
from astroid import scoped_nodes
from logilab.common.compat import builtins
BUILTINS_NAME = builtins.__name__
COMP_NODE_TYPES = astroid.ListComp, astroid.SetComp, astroid.DictComp, astroid.GenExpr
PY3K = sys.version_info[0] == 3
if not PY3K:
EXCEPTIONS_MODULE = "exceptions"
else:
EXCEPTIONS_MODULE = "builtins"
ABC_METHODS = set(('abc.abstractproperty', 'abc.abstractmethod',
'abc.abstractclassmethod', 'abc.abstractstaticmethod'))
class NoSuchArgumentError(Exception):
pass
def is_inside_except(node):
"""Returns true if node is inside the name of an except handler."""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
return current and current is current.parent.name
def get_all_elements(node):
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (astroid.Tuple, astroid.List)):
for child in node.elts:
for e in get_all_elements(child):
yield e
else:
yield node
def clobber_in_except(node):
"""Checks if an assignment node in an except handler clobbers an existing
variable.
Returns (True, args for W0623) if assignment clobbers an existing variable,
(False, None) otherwise.
"""
if isinstance(node, astroid.AssAttr):
return (True, (node.attrname, 'object %r' % (node.expr.as_string(),)))
elif isinstance(node, astroid.AssName):
name = node.name
if is_builtin(name):
return (True, (name, 'builtins'))
else:
stmts = node.lookup(name)[1]
if (stmts and not isinstance(stmts[0].ass_type(),
(astroid.Assign, astroid.AugAssign,
astroid.ExceptHandler))):
return (True, (name, 'outer scope (line %s)' % stmts[0].fromlineno))
return (False, None)
def safe_infer(node):
"""return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred)
"""
try:
inferit = node.infer()
value = next(inferit)
except astroid.InferenceError:
return
try:
next(inferit)
return # None if there is ambiguity on the inferred node
except astroid.InferenceError:
return # there is some kind of ambiguity
except StopIteration:
return value
def is_super(node):
"""return True if the node is referencing the "super" builtin function
"""
if getattr(node, 'name', None) == 'super' and \
node.root().name == BUILTINS_NAME:
return True
return False
def is_error(node):
"""return true if the function does nothing but raising an exception"""
for child_node in node.get_children():
if isinstance(child_node, astroid.Raise):
return True
return False
def is_raising(body):
"""return true if the given statement node raise an exception"""
for node in body:
if isinstance(node, astroid.Raise):
return True
return False
def is_empty(body):
"""return true if the given node does nothing but 'pass'"""
return len(body) == 1 and isinstance(body[0], astroid.Pass)
builtins = builtins.__dict__.copy()
SPECIAL_BUILTINS = ('__builtins__',) # '__path__', '__file__')
def is_builtin_object(node):
"""Returns True if the given node is an object from the __builtin__ module."""
return node and node.root().name == BUILTINS_NAME
def is_builtin(name): # was is_native_builtin
"""return true if <name> could be considered as a builtin defined by python
"""
if name in builtins:
return True
if name in SPECIAL_BUILTINS:
return True
return False
def is_defined_before(var_node):
"""return True if the variable node is defined by a parent node (list,
set, dict, or generator comprehension, lambda) or in a previous sibling
node on the same line (statement_defining ; statement_using)
"""
varname = var_node.name
_node = var_node.parent
while _node:
if isinstance(_node, COMP_NODE_TYPES):
for ass_node in _node.nodes_of_class(astroid.AssName):
if ass_node.name == varname:
return True
elif isinstance(_node, astroid.For):
for ass_node in _node.target.nodes_of_class(astroid.AssName):
if ass_node.name == varname:
return True
elif isinstance(_node, astroid.With):
for expr, ids in _node.items:
if expr.parent_of(var_node):
break
if (ids and
isinstance(ids, astroid.AssName) and
ids.name == varname):
return True
elif isinstance(_node, (astroid.Lambda, astroid.Function)):
if _node.args.is_argument(varname):
return True
if getattr(_node, 'name', None) == varname:
return True
break
elif isinstance(_node, astroid.ExceptHandler):
if isinstance(_node.name, astroid.AssName):
ass_node = _node.name
if ass_node.name == varname:
return True
_node = _node.parent
# possibly multiple statements on the same line using semi colon separator
stmt = var_node.statement()
_node = stmt.previous_sibling()
lineno = stmt.fromlineno
while _node and _node.fromlineno == lineno:
for ass_node in _node.nodes_of_class(astroid.AssName):
if ass_node.name == varname:
return True
for imp_node in _node.nodes_of_class((astroid.From, astroid.Import)):
if varname in [name[1] or name[0] for name in imp_node.names]:
return True
_node = _node.previous_sibling()
return False
def is_func_default(node):
"""return true if the given Name node is used in function default argument's
value
"""
parent = node.scope()
if isinstance(parent, astroid.Function):
for default_node in parent.args.defaults:
for default_name_node in default_node.nodes_of_class(astroid.Name):
if default_name_node is node:
return True
return False
def is_func_decorator(node):
"""return true if the name is used in function decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Decorators):
return True
if (parent.is_statement or
isinstance(parent, astroid.Lambda) or
isinstance(parent, (scoped_nodes.ComprehensionScope,
scoped_nodes.ListComp))):
break
parent = parent.parent
return False
def is_ancestor_name(frame, node):
"""return True if `frame` is a astroid.Class node with `node` in the
subtree of its bases attribute
"""
try:
bases = frame.bases
except AttributeError:
return False
for base in bases:
if node in base.nodes_of_class(astroid.Name):
return True
return False
def assign_parent(node):
"""return the higher parent which is not an AssName, Tuple or List node
"""
while node and isinstance(node, (astroid.AssName,
astroid.Tuple,
astroid.List)):
node = node.parent
return node
def overrides_an_abstract_method(class_node, name):
"""return True if pnode is a parent of node"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], astroid.Function) and \
ancestor[name].is_abstract(pass_is_abstract=False):
return True
return False
def overrides_a_method(class_node, name):
"""return True if <name> is a method overridden from an ancestor"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], astroid.Function):
return True
return False
PYMETHODS = set(('__new__', '__init__', '__del__', '__hash__',
'__str__', '__repr__',
'__len__', '__iter__',
'__delete__', '__get__', '__set__',
'__getitem__', '__setitem__', '__delitem__', '__contains__',
'__getattribute__', '__getattr__', '__setattr__', '__delattr__',
'__call__',
'__enter__', '__exit__',
'__cmp__', '__ge__', '__gt__', '__le__', '__lt__', '__eq__',
'__nonzero__', '__neg__', '__invert__',
'__mul__', '__imul__', '__rmul__',
'__div__', '__idiv__', '__rdiv__',
'__add__', '__iadd__', '__radd__',
'__sub__', '__isub__', '__rsub__',
'__pow__', '__ipow__', '__rpow__',
'__mod__', '__imod__', '__rmod__',
'__and__', '__iand__', '__rand__',
'__or__', '__ior__', '__ror__',
'__xor__', '__ixor__', '__rxor__',
# XXX To be continued
))
def check_messages(*messages):
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages
class IncompleteFormatString(Exception):
"""A format string ended in the middle of a format specifier."""
pass
class UnsupportedFormatCharacter(Exception):
"""A format character in a format string is not one of the supported
format characters."""
def __init__(self, index):
Exception.__init__(self, index)
self.index = index
def parse_format_string(format_string):
"""Parses a format string, returning a tuple of (keys, num_args), where keys
is the set of mapping keys in the format string, and num_args is the number
of arguments required by the format string. Raises
IncompleteFormatString or UnsupportedFormatCharacter if a
parse error occurs."""
keys = set()
num_args = 0
def next_char(i):
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == '%':
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == '(':
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == '(':
depth += 1
elif char == ')':
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in '#0- +':
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == '*':
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == '.':
i, char = next_char(i)
if char == '*':
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in 'hlL':
i, char = next_char(i)
# Parse the conversion type (mandatory).
if PY3K:
flags = 'diouxXeEfFgGcrs%a'
else:
flags = 'diouxXeEfFgGcrs%'
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
elif char != '%':
num_args += 1
i += 1
return keys, num_args
def is_attr_protected(attrname):
"""return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return attrname[0] == '_' and not attrname == '_' and not (
attrname.startswith('__') and attrname.endswith('__'))
def node_frame_class(node):
"""return klass node for a method node (or a staticmethod or a
classmethod), return null otherwise
"""
klass = node.frame()
while klass is not None and not isinstance(klass, astroid.Class):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
return klass
def is_super_call(expr):
"""return True if expression node is a function call and if function name
is super. Check before that you're in a method.
"""
return (isinstance(expr, astroid.CallFunc) and
isinstance(expr.func, astroid.Name) and
expr.func.name == 'super')
def is_attr_private(attrname):
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile('^_{2,}.*[^_]+_?$')
return regex.match(attrname)
def get_argument_from_call(callfunc_node, position=None, keyword=None):
"""Returns the specified argument from a function call.
:param callfunc_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError('Must specify at least one of: position or keyword.')
try:
if position is not None and not isinstance(callfunc_node.args[position], astroid.Keyword):
return callfunc_node.args[position]
except IndexError as error:
raise NoSuchArgumentError(error)
if keyword:
for arg in callfunc_node.args:
if isinstance(arg, astroid.Keyword) and arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def inherit_from_std_ex(node):
"""
Return true if the given class node is subclass of
exceptions.Exception.
"""
if node.name in ('Exception', 'BaseException') \
and node.root().name == EXCEPTIONS_MODULE:
return True
return any(inherit_from_std_ex(parent)
for parent in node.ancestors(recurs=False))
def is_import_error(handler):
"""
Check if the given exception handler catches
ImportError.
:param handler: A node, representing an ExceptHandler node.
:returns: True if the handler catches ImportError, False otherwise.
"""
names = None
if isinstance(handler.type, astroid.Tuple):
names = [name for name in handler.type.elts
if isinstance(name, astroid.Name)]
elif isinstance(handler.type, astroid.Name):
names = [handler.type]
else:
# Don't try to infer that.
return
for name in names:
try:
for infered in name.infer():
if (isinstance(infered, astroid.Class) and
inherit_from_std_ex(infered) and
infered.name == 'ImportError'):
return True
except astroid.InferenceError:
continue
def has_known_bases(klass):
"""Returns true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base)
# TODO: check for A->B->A->B pattern in class structure too?
if (not isinstance(result, astroid.Class) or
result is klass or
not has_known_bases(result)):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def decorated_with_property(node):
""" Detect if the given function node is decorated with a property. """
if not node.decorators:
return False
for decorator in node.decorators.nodes:
if not isinstance(decorator, astroid.Name):
continue
try:
for infered in decorator.infer():
if isinstance(infered, astroid.Class):
if (infered.root().name == BUILTINS_NAME and
infered.name == 'property'):
return True
for ancestor in infered.ancestors():
if (ancestor.name == 'property' and
ancestor.root().name == BUILTINS_NAME):
return True
except astroid.InferenceError:
pass
def decorated_with_abc(func):
"""Determine if the `func` node is decorated with `abc` decorators."""
if func.decorators:
for node in func.decorators.nodes:
try:
infered = next(node.infer())
except astroid.InferenceError:
continue
if infered and infered.qname() in ABC_METHODS:
return True
def unimplemented_abstract_methods(node, is_abstract_cb=decorated_with_abc):
"""
Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
The function will work only for new-style classes. For old-style
classes, it will simply return an empty dictionary.
For the rest of them, it will return a dictionary of abstract method
names and their inferred objects.
"""
visited = {}
try:
mro = reversed(node.mro())
except NotImplementedError:
# Old style class, it will not have a mro.
return {}
for ancestor in mro:
for obj in ancestor.values():
infered = obj
if isinstance(obj, astroid.AssName):
infered = safe_infer(obj)
if not infered:
continue
if not isinstance(infered, astroid.Function):
if obj.name in visited:
del visited[obj.name]
if isinstance(infered, astroid.Function):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(infered)
if abstract:
visited[obj.name] = infered
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited
|
[
"logilab.common.compat.builtins.__dict__.copy",
"re.compile"
] |
[((4277, 4301), 'logilab.common.compat.builtins.__dict__.copy', 'builtins.__dict__.copy', ([], {}), '()\n', (4299, 4301), False, 'from logilab.common.compat import builtins\n'), ((14366, 14396), 're.compile', 're.compile', (['"""^_{2,}.*[^_]+_?$"""'], {}), "('^_{2,}.*[^_]+_?$')\n", (14376, 14396), False, 'import re\n')]
|
import unittest
class Node(object):
def __init__(self, data, children=None):
self.data = data
if children is None:
self.children = []
else:
self.children = children
def is_connected(a, b):
todo = [a]
seen = set(todo)
while len(todo) > 0:
current = todo.pop() # DFS
if current is b:
return True
for child in current.children:
if child not in seen:
seen.add(child)
todo.append(child)
return False
class IsConnectedTests(unittest.TestCase):
def test_is_connected(self):
a = Node("a")
b = Node("b")
c = Node("c")
a.children.append(b)
b.children.append(c)
c.children.append(a)
self.assertTrue(is_connected(a, c))
def test_is_not_connected(self):
a = Node("a")
b = Node("b")
self.assertFalse(is_connected(a, b))
def test_does_not_get_stuck_in_loops(self):
# Create loop
a = Node("a")
b = Node("b")
c = Node("c")
a.children.append(b)
b.children.append(c)
c.children.append(a)
d = Node("d")
self.assertFalse(is_connected(a, d))
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main"
] |
[((1115, 1130), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1128, 1130), False, 'import unittest\n')]
|
import types
from collections.abc import Iterator
import torch
import torch.nn as nn
from uninas.register import Register
from uninas.utils.shape import Shape, ShapeList, ShapeOrList
from uninas.utils.args import ArgsInterface
from uninas.utils.paths import make_base_dirs
from uninas.utils.torch.misc import randomize_parameters
from typing import Union, List
tensor_type = Union[torch.Tensor, List[torch.Tensor]]
class AbstractModule(nn.Module):
"""
the basis for all .config() saving + restoring
"""
def __init__(self, *_, **__):
nn.Module.__init__(self)
if len(_) > 0:
print('unknown args (%s):' % self.__class__.__name__, __)
if len(__) > 0:
print('unknown kwargs (%s):' % self.__class__.__name__, __)
# dicts that contain the keys of everything that goes into a config and can be restored
self._kwargs = [] # saved, printed
self._np_kwargs = [] # saved, not printed
self._p_kwargs = [] # not saved, printed
self._submodules = []
self._submodule_lists = []
self._submodule_dicts = []
self._add_to_print_kwargs(**__)
self.dropout_rate = None
self.cached = dict(built=False) # some info about shapes in/out
def set(self, **kwargs):
""" set new value to a parameter and kwargs / misc_kwargs """
for k, v in kwargs.items():
self.__dict__[k] = v
def get_cached(self, k: str, default=None):
return self.cached.get(k, default)
def is_built(self) -> bool:
return self.cached.get("built", False)
def get_shape_in(self, may_be_none=False) -> ShapeOrList:
s_in = self.get_cached('shape_in')
if not may_be_none:
assert isinstance(s_in, ShapeOrList.__args__)
return s_in
def get_shape_out(self, may_be_none=False) -> ShapeOrList:
s_out = self.get_cached('shape_out')
if not may_be_none:
assert isinstance(s_out, ShapeOrList.__args__)
return s_out
# listing modules/kwargs to save+restore via configs ---------------------------------------------------------------
def _add(self, lst: list, are_modules=False, **kwargs):
for k, v in kwargs.items():
lst.append(k)
if are_modules:
self.add_module(k, v)
else:
self.__dict__[k] = v
def _add_to_kwargs(self, **kwargs):
""" store named values (not Modules, which need to have config stored and be rebuilt) """
self._add(self._kwargs, are_modules=False, **kwargs)
def _add_to_kwargs_np(self, **kwargs):
""" store named values (not Modules, which need to have config stored and be rebuilt) """
self._add(self._np_kwargs, are_modules=False, **kwargs)
def _add_to_print_kwargs(self, **kwargs):
""" store named values for printing only """
self._add(self._p_kwargs, are_modules=False, **kwargs)
def _add_to_submodules(self, **kwargs):
""" store named modules """
self._add(self._submodules, are_modules=True, **kwargs)
def _add_to_submodule_lists(self, **kwargs):
""" store named lists of modules (nn.ModuleList) """
self._add(self._submodule_lists, are_modules=True, **kwargs)
def _add_to_submodule_dict(self, **kwargs):
""" store named dicts of modules (nn.ModuleDict) """
self._add(self._submodule_dicts, are_modules=True, **kwargs)
def kwargs(self):
return {k: self.__dict__[k] for k in self._kwargs+self._np_kwargs}
def config(self, **_) -> dict:
"""
get a dictionary describing this module, so that a builder can assemble it correctly again
subclasses may receive specific instructions via kwargs, e.g. whether to finalize a search architecture
"""
cfg_keys = ['kwargs', 'submodules', 'submodule_lists', 'submodule_dicts']
cfg = dict(name=self.__class__.__name__)
cfg.update({k: {} for k in cfg_keys})
for k in self._kwargs+self._np_kwargs:
cfg['kwargs'][k] = self.__dict__[k]
for k in self._submodules:
cfg['submodules'][k] = self._modules[k].config(**_)
for k in self._submodule_lists:
lst = self._modules[k]
cfg['submodule_lists'][k] = [v.config(**_) if v is not None else None for v in iter(lst)]
for k in self._submodule_dicts:
dct = self._modules[k]
cfg['submodule_dicts'][k] = {dk: dv.config(**_) if dv is not None else None for dk, dv in dct.items()}
# remove empty dicts
for k in list(cfg_keys):
if len(cfg[k]) == 0:
cfg.pop(k)
return cfg
@classmethod
def from_config(cls, **kwargs):
""" upon receiving a dictionary as created in self.config(), reassemble this module properly """
kwargs_ = kwargs.pop('kwargs', {})
submodules_ = {k: Register.builder.from_config(v) if v is not None else None
for k, v in kwargs.pop('submodules', {}).items()}
submodule_lists_ = {k: nn.ModuleList([Register.builder.from_config(v) if v is not None else None for v in lst])
for k, lst in kwargs.pop('submodule_lists', {}).items()}
submodule_dicts_ = {k: {dk: Register.builder.from_config(dv) if dv is not None else None
for dk, dv in dct.items()}
for k, dct in kwargs.pop('submodule_dicts', {}).items()}
return cls(**kwargs_, **submodules_, **submodule_lists_, **submodule_dicts_, **kwargs)
# presenting as string ---------------------------------------------------------------------------------------------
def _str_kwargs(self) -> str:
lst = []
for k in self._kwargs+self._p_kwargs:
lst.append('%s=%s' % (k, str(self.__dict__[k])))
return ', '.join(lst)
@staticmethod
def _str_tuple_submodule(obj, depth: int, max_depth: int, name='') -> [(int, str)]:
""" describe this module via indentation instructions and strings """
ss = []
if obj is not None and len(obj) > 0:
if depth < max_depth:
if isinstance(obj, (dict, nn.ModuleDict)):
for n, m in obj.items():
if isinstance(m, AbstractModule):
ss += m.str_tuples(depth=depth+1, max_depth=max_depth, name=n)
else:
ss += AbstractModule._str_tuple_submodule(m, depth + 1, max_depth, name=n)
elif isinstance(obj, (list, nn.ModuleList)):
for i, m in enumerate(obj):
n = '(%d)' % i
if isinstance(m, AbstractModule):
ss += m.str_tuples(depth=depth+1, max_depth=max_depth, name=n)
else:
ss += AbstractModule._str_tuple_submodule(m, depth + 1, max_depth, name=n)
else:
ss.append((depth, '<%d entries>' % (len(obj))))
if len(ss) == 0:
return []
s0, s1 = '%s = [' % name, ']'
if len(ss) == 1:
return [(depth, s0 + ss[0][1] + s1)]
return [(depth, s0)] + ss + [(depth, s1)]
def str_tuples(self, depth=0, max_depth=5, name='', add_s=None, add_sl=None, add_sd=None) -> [(int, str)]:
""" describe this module via indentation instructions and strings """
add_s = {} if add_s is None else add_s.copy()
add_sl = {} if add_sl is None else add_sl.copy()
add_sd = {} if add_sd is None else add_sd.copy()
add_s['Modules'] = {k: self._modules[k] for k in self._submodules}
add_sl['Module Lists'] = {k: self._modules[k] for k in self._submodule_lists}
add_sd['Module Dicts'] = {k: self._modules[k] for k in self._submodule_dicts}
s0 = '{n}{cls}({k}) ['.format(**{
'n': ('%s = ' % name) if len(name) > 0 else '',
'cls': self.__class__.__name__,
'k': self._str_kwargs(),
})
s1 = ']'
if depth >= max_depth:
ss = [(depth, '<%d modules, %d module lists, %d module dicts>' % (len(add_s), len(add_sl), len(add_sd)))]
else:
ss = []
for k, v in add_s.copy().items():
ss.extend(self._str_tuple_submodule(v, depth+1, max_depth, name=k))
for k, v in add_sl.copy().items():
ss.extend(self._str_tuple_submodule(v, depth+1, max_depth, name=k))
for k, v in add_sd.copy().items():
ss.extend(self._str_tuple_submodule(v, depth+1, max_depth, name=k))
ss = [s for s in ss if s is not None]
if len(ss) == 0:
return [(depth, s0 + s1)]
if len(ss) == 1:
return [(depth, s0 + ss[0][1] + s1)]
return [(depth, s0)] + ss + [(depth, s1)]
def str(self, depth=0, max_depth=5, name='', add_s=None, add_sl=None, add_sd=None) -> str:
strings = self.str_tuples(depth, max_depth, name, add_s, add_sl, add_sd)
return ''.join('\n%s%s' % ('. '*d, s) for d, s in strings)
# (recursive) utility ----------------------------------------------------------------------------------------------
@classmethod
def _get_base_modules(cls, m) -> list:
base_modules = []
if isinstance(m, AbstractModule):
base_modules.append(m)
elif isinstance(m, nn.ModuleList):
for m2 in iter(m):
base_modules.extend(cls._get_base_modules(m2))
elif isinstance(m, nn.ModuleDict):
for m2 in m.values():
base_modules.extend(cls._get_base_modules(m2))
return base_modules
def base_modules(self, recursive=True) -> Iterator:
""" yield all base modules, therefore all layers/modules of this project """
fun = self.modules if recursive else self.children
for m in fun():
for m2 in self._get_base_modules(m):
yield m2
def base_modules_by_condition(self, condition, recursive=True) -> Iterator:
""" get list of all base modules that pass a condition, condition is a function that returns a boolean """
for m in self.base_modules(recursive=recursive):
if condition(m):
yield m
def hierarchical_base_modules(self) -> (type, ShapeOrList, ShapeOrList, list):
""" get a hierarchical/recursive representation of (class, shapes_in, shapes_out, submodules) """
submodules = list(self.base_modules(recursive=False))
r0 = self.get_shape_in(may_be_none=True)
r1 = self.get_shape_out(may_be_none=True)
r2 = [m.hierarchical_base_modules() for m in submodules]
return self, r0, r1, r2
def set_dropout_rate(self, p=None):
""" set the dropout rate of every dropout layer to p, no change for p=None """
for m in self.base_modules(recursive=False):
m.set_dropout_rate(p)
def get_device(self) -> torch.device:
""" get the device of one of the weights """
for w in self.parameters():
return w.device
def is_layer(self, cls) -> bool:
return isinstance(self, cls)
# building and running ---------------------------------------------------------------------------------------------
def probe_outputs(self, s_in: ShapeOrList, module: nn.Module = None, multiple_outputs=False) -> ShapeOrList:
""" returning the output shape of one forward pass using zero tensors """
with torch.no_grad():
if module is None:
module = self
x = s_in.random_tensor(batch_size=2)
s = module(x)
if multiple_outputs:
return ShapeList([Shape(list(sx.shape)[1:]) for sx in s])
return Shape(list(s.shape)[1:])
def build(self, *args, **kwargs) -> ShapeOrList:
""" build/compile this module, save input/output shape(s), return output shape """
assert not self.is_built(), "The module is already built"
for arg in list(args) + list(kwargs.values()):
if isinstance(arg, (Shape, ShapeList)):
self.cached['shape_in'] = arg.copy(copy_id=True)
break
s_out = self._build(*args, **kwargs)
self.cached['shape_out'] = s_out.copy(copy_id=True)
self.cached['built'] = True
return s_out
def _build(self, *args, **kwargs) -> ShapeOrList:
""" build/compile this module, return output shape """
raise NotImplementedError
def forward(self, x: tensor_type) -> tensor_type:
raise NotImplementedError
def export_onnx(self, save_path: str, **kwargs):
save_path = make_base_dirs(save_path)
x = self.get_shape_in(may_be_none=False).random_tensor(batch_size=2).to(self.get_device())
torch.onnx.export(model=self, args=x, f=save_path, **kwargs)
# can disable state dict
def disable_state_dict(self):
"""
makes the state_dict irreversibly disfunctional for this module and all children
this is used to prevent specific modules to save/load
"""
def state_dict(self_, *args, **kwargs):
return None
def _load_from_state_dict(self_, *args, **kwargs):
pass
def _disable_state_dict(module: nn.Module):
for name, child in module._modules.items():
if child is not None:
_disable_state_dict(child)
module.state_dict = types.MethodType(state_dict, self)
module._load_from_state_dict = types.MethodType(_load_from_state_dict, self)
_disable_state_dict(self)
_disable_state_dict = None
# misc -------------------------------------------------------------------------------------------------------------
def randomize_parameters(self):
""" set all parameters to normally distributed values """
randomize_parameters(self)
class AbstractArgsModule(AbstractModule, ArgsInterface):
"""
an AbstractModule that can easily store+reuse the parsed argparse arguments of previous times
"""
def __init__(self, *_, **kwargs_to_store):
AbstractModule.__init__(self, *_)
ArgsInterface.__init__(self)
self._add_to_kwargs(**kwargs_to_store)
def _build(self, *args) -> ShapeOrList:
raise NotImplementedError
def forward(self, x: tensor_type) -> tensor_type:
raise NotImplementedError
|
[
"uninas.utils.torch.misc.randomize_parameters",
"types.MethodType",
"uninas.utils.paths.make_base_dirs",
"torch.nn.Module.__init__",
"uninas.utils.args.ArgsInterface.__init__",
"uninas.register.Register.builder.from_config",
"torch.no_grad",
"torch.onnx.export"
] |
[((561, 585), 'torch.nn.Module.__init__', 'nn.Module.__init__', (['self'], {}), '(self)\n', (579, 585), True, 'import torch.nn as nn\n'), ((12825, 12850), 'uninas.utils.paths.make_base_dirs', 'make_base_dirs', (['save_path'], {}), '(save_path)\n', (12839, 12850), False, 'from uninas.utils.paths import make_base_dirs\n'), ((12958, 13018), 'torch.onnx.export', 'torch.onnx.export', ([], {'model': 'self', 'args': 'x', 'f': 'save_path'}), '(model=self, args=x, f=save_path, **kwargs)\n', (12975, 13018), False, 'import torch\n'), ((14062, 14088), 'uninas.utils.torch.misc.randomize_parameters', 'randomize_parameters', (['self'], {}), '(self)\n', (14082, 14088), False, 'from uninas.utils.torch.misc import randomize_parameters\n'), ((14360, 14388), 'uninas.utils.args.ArgsInterface.__init__', 'ArgsInterface.__init__', (['self'], {}), '(self)\n', (14382, 14388), False, 'from uninas.utils.args import ArgsInterface\n'), ((11639, 11654), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11652, 11654), False, 'import torch\n'), ((13635, 13669), 'types.MethodType', 'types.MethodType', (['state_dict', 'self'], {}), '(state_dict, self)\n', (13651, 13669), False, 'import types\n'), ((13713, 13758), 'types.MethodType', 'types.MethodType', (['_load_from_state_dict', 'self'], {}), '(_load_from_state_dict, self)\n', (13729, 13758), False, 'import types\n'), ((4975, 5006), 'uninas.register.Register.builder.from_config', 'Register.builder.from_config', (['v'], {}), '(v)\n', (5003, 5006), False, 'from uninas.register import Register\n'), ((5348, 5380), 'uninas.register.Register.builder.from_config', 'Register.builder.from_config', (['dv'], {}), '(dv)\n', (5376, 5380), False, 'from uninas.register import Register\n'), ((5153, 5184), 'uninas.register.Register.builder.from_config', 'Register.builder.from_config', (['v'], {}), '(v)\n', (5181, 5184), False, 'from uninas.register import Register\n')]
|
import os
import numpy as np
from sst import Fisher
from sst import camb_tools as ct
from sst import plot_tools
opj = os.path.join
def get_cls(cls_path, lmax, A_lens=1):
'''
returns
-------
cls : array-like
Lensed Cls (shape (4,lmax-1) with BB lensing power
reduced depending on A_lens.
order: TT, EE, BB, TE
'''
cls_nolens, _ = ct.get_spectra(cls_path, tag='r0',
lensed=False, prim_type='tot')
cls_lensed, _ = ct.get_spectra(cls_path, tag='r0',
lensed=True, prim_type='tot')
# truncate to lmax
cls_nolens = cls_nolens[:,:lmax-1]
cls_lensed = cls_lensed[:,:lmax-1]
BB_nolens = cls_nolens[2]
BB_lensed = cls_lensed[2]
# difference BB (lensed - unlensed = lens_contribution)
BB_lens_contr = BB_lensed - BB_nolens
# depending on A_lens, remove lensing contribution
cls_lensed[2] -= (1. - A_lens) * BB_lens_contr
return cls_lensed
def get_nls(lat_path, lmax, sac_path=None,
deproj_level=0):
'''
Arguments
-----------------
lat_path : str
Path to folder containing LAT noise cuves
lmax : int
Keyword Arguments
-----------------
sac_path : str, None
Path to folder containing SAC noise cuves
deproj_level : int
Foreground cleaning assumption, 0 - 4
0 is most optimistic
Returns
-------
nls : array-like
Shape (6, lmax - 1), order: TT, EE, BB, TE, TB, EB
Notes
-----
Looks like SAC noise curves are only for pol, so use
SAT TT for TT.
'''
# Add option to skip SAT.
# SO V3 (deproj0, S2(goal) 16000 deg2
# init noise curves (fill with 1K^2 noise)
# truncate later
nls = np.ones((6, 20000)) * 1e12
# load up LAT
# lat_tt_file = 'S4_2LAT_T_default_noisecurves_'\
# 'deproj{}_SENS0_mask_16000_ell_TT_yy.txt'.format(deproj_level) # NOTE
lat_tt_file = 'SOV3_T_default1-4-2_noisecurves_'\
'deproj{}_SENS2_mask_16000_ell_TT_yy.txt'.format(deproj_level)
lat_pol_file = lat_tt_file.replace('_T_', '_pol_')
lat_pol_file = lat_pol_file.replace('_TT_yy', '_EE_BB')
lat_tt_file = opj(lat_path, lat_tt_file)
lat_pol_file = opj(lat_path, lat_pol_file)
# load lat
ells_tt, nl_tt, ells_pol, nl_ee, nl_bb = ct.get_so_noise(
tt_file=lat_tt_file, pol_file=lat_pol_file, sat_file=None)
lmin_tt = int(ells_tt[0])
lmax_tt = int(ells_tt[-1])
#lmin_pol = int(ells_pol[0])
lmin_pol = 30 # as suggested on wiki
lmax_pol = int(ells_pol[-1])
if sac_path is not None:
sac_file = 'Db_noise_04.00_ilc_bin3_av.dat'
sac_file = opj(sac_path, sac_file)
# load sac, note these are Dell bandpowers
ell, sac_ee, sac_bb = np.loadtxt(sac_file).transpose()
dell = ell * (ell + 1) / 2. / np.pi
sac_ee /= dell
sac_bb /= dell
# interpolate
lmin_sac = int(ell[0])
lmax_sac = int(ell[-1])
ell_f = np.arange(lmin_sac, lmax_sac+1)
sac_ee = np.interp(ell_f, ell, sac_ee)
sac_bb = np.interp(ell_f, ell, sac_bb)
# combine, first lat then (if needed )sac because lat has lower lmin
nls[0,lmin_tt - 2:lmax_tt - 1] = nl_tt
nls[1,lmin_pol - 2:lmax_pol - 1] = nl_ee[ells_pol >= lmin_pol]
nls[2,lmin_pol - 2:lmax_pol - 1] = nl_bb[ells_pol >= lmin_pol]
nls[3] *= 0.
nls[4] *= 0.
nls[5] *= 0.
if sac_path is not None:
nls[1,lmin_sac - 2:lmax_sac - 1] = sac_ee
nls[2,lmin_sac - 2:lmax_sac - 1] = sac_bb
# trunacte to lmax
nls = nls[:,:lmax - 1]
return nls
def get_fiducial_nls(noise_amp_temp, noise_amp_pol, lmax):
'''
Create N_{\ell} = noise_amp^2 noise arrays.
Arguments
-----------------
noise_amp_temp : float
Noise ampltidue in uK arcmin.
noise_amp_pol : float
lmax : int
Returns
-------
nls : array-like
Shape (6, lmax - 1), order: TT, EE, BB, TE, TB, EB
'''
# init noise curves (fill with 1K^2 noise)
# truncate later
nls = np.ones((6, 20000)) * 1e12
# N_{\ell} = uK^2 radians^2
arcmin2radians = np.pi / 180. / 60.
noise_amp_temp *= arcmin2radians
noise_amp_pol *= arcmin2radians
# combine, first lat then sac because lat has lower lmin
nls[0,:] = noise_amp_temp ** 2
nls[1,:] = noise_amp_pol ** 2
nls[2,:] = noise_amp_pol ** 2
nls[3] *= 0.
nls[4] *= 0.
nls[5] *= 0.
# trunacte to lmax
nls = nls[:,:lmax - 1]
return nls
def get_prim_amp(prim_template='local', scalar_amp=2.1e-9):
common_amp = 16 * np.pi**4 * scalar_amp**2
if prim_template == 'local':
return 2 * common_amp
elif prim_template == 'equilateral':
return 6 * common_amp
elif prim_template == 'orthogonal':
return 6 * common_amp
def get_totcov(cls, nls, no_ee=False, no_tt=False):
totcov = nls.copy()
totcov[:4,:] += cls
if no_ee:
totcov[1,:] = 1e12
if no_tt:
totcov[0,:] = 1e12
return totcov
def run_fisher(template, ana_dir, camb_dir, totcov, ells, lmin=2, lmax=4999,fsky=0.03,
plot_tag='', tag=None):
F = Fisher(ana_dir)
camb_opts = dict(camb_out_dir=camb_dir,
tag='r0',
lensed=False,
high_ell=True)
F.get_camb_output(**camb_opts)
radii = F.get_updated_radii()
radii = radii[::2]
F.get_bins(lmin=lmin, lmax=lmax, load=True, verbose=False,
parity='odd', tag=tag)
# F.get_beta(func='equilateral', load=True, verbose=False, radii=radii, tag=tag)
F.get_beta(func='equilateral', load=True, verbose=True, radii=radii, tag=tag,
interp_factor=10)
# F.get_binned_bispec(template, load=True, tag=tag)
F.get_binned_bispec(template, load=True, tag=tag)
bin_invcov, bin_cov = F.get_binned_invcov(ells, totcov, return_bin_cov=True)
# Plot invcov, cov
plot_opts = dict(lmin=2)
bins = F.bins['bins']
plot_tools.cls_matrix(plot_tag, bins, bin_invcov, log=False, plot_dell=False,
inv=True, **plot_opts)
plot_tools.cls_matrix(plot_tag.replace('invcov', 'cov_dell'),
bins, bin_cov, log=False, plot_dell=True,
**plot_opts)
print(lmin, lmax)
fisher = F.naive_fisher(bin_invcov, lmin=lmin, lmax=lmax, fsky=fsky)
sigma = 1/np.sqrt(fisher)
return fisher, sigma
###### OLD
amp = get_prim_amp(template)
F.bispec['bispec'] *= amp
F.get_binned_invcov(nls=totcov)
bin_invcov = F.bin_invcov
bin_cov = F.bin_cov
bin_size = F.bins['bins'].size
bins = F.bins['bins']
num_pass = F.bins['num_pass_full']
bispec = F.bispec['bispec']
# Plot invcov, cov
plot_opts = dict(lmin=2)
plot_tools.cls_matrix(plot_tag, bins, bin_invcov, log=False, plot_dell=False,
inv=True, **plot_opts)
plot_tools.cls_matrix(plot_tag.replace('invcov', 'cov_dell'),
bins, bin_cov, log=False, plot_dell=True,
**plot_opts)
plot_tools.cls_matrix(plot_tag.replace('invcov', 'cov'),
bins, bin_cov, log=False, plot_dell=False,
**plot_opts)
# allocate bin-sized fisher matrix (same size as outer loop)
fisher_per_bin = np.ones(bin_size) * np.nan
# allocate 12 x 12 cov for use in inner loop
invcov = np.zeros((F.bispec['pol_trpl'].size, F.bispec['pol_trpl'].size))
# create (binned) inverse cov matrix for each ell
# i.e. use the fact that 12x12 pol invcov can be factored
# as (Cl-1)_l1^ip (Cl-1)_l2^jq (Cl-1)_l3^kr
invcov1 = np.ones((bin_size, 12, 12))
invcov2 = np.ones((bin_size, 12, 12))
invcov3 = np.ones((bin_size, 12, 12))
f_check = 0
for tidx_a, ptrp_a in enumerate(F.bispec['pol_trpl']):
# ptrp_a = ijk
for tidx_b, ptrp_b in enumerate(F.bispec['pol_trpl']):
# ptrp_a = pqr
# a is first bispectrum, b second one
# ptrp = pol triplet
ptrp_a1 = ptrp_a[0]
ptrp_a2 = ptrp_a[1]
ptrp_a3 = ptrp_a[2]
ptrp_b1 = ptrp_b[0]
ptrp_b2 = ptrp_b[1]
ptrp_b3 = ptrp_b[2]
invcov1[:,tidx_a,tidx_b] = bin_invcov[:,ptrp_a1,ptrp_b1]
invcov2[:,tidx_a,tidx_b] = bin_invcov[:,ptrp_a2,ptrp_b2]
invcov3[:,tidx_a,tidx_b] = bin_invcov[:,ptrp_a3,ptrp_b3]
# Depending on lmin, start outer loop not at first bin.
start_bidx = np.where(bins >= lmin)[0][0]
end_bidx = np.where(bins >= min(lmax, bins[-1]))[0][0] + 1
# loop same loop as in binned_bispectrum
for idx1, i1 in enumerate(bins[start_bidx:end_bidx]):
idx1 += start_bidx
cl1 = invcov1[idx1,:,:] # 12x12
# init
fisher_per_bin[idx1] = 0.
for idx2, i2 in enumerate(bins[idx1:end_bidx]):
idx2 += idx1
cl2 = invcov2[idx1,:,:] # 12x12
cl12 = cl1 * cl2
for idx3, i3 in enumerate(bins[idx2:end_bidx]):
idx3 += idx2
num = num_pass[idx1,idx2,idx3]
if num == 0:
continue
cl123 = cl12 * invcov3[idx3,:,:] #12x12
B = bispec[idx1,idx2,idx3,:]
f = np.einsum("i,ij,j", B, cl123, B)
# f0 = np.einsum("i,i", B, B)
# b0 = np.einsum("ij,ij", cl123, cl123)
# both B's have num
f /= float(num)
if i1 == i2 == i3:
f /= 6.
elif i1 != i2 != i3:
pass
else:
f /= 2.
fisher_per_bin[idx1] += f
f_check += f
fisher_per_bin *= fsky
f_check *= fsky
min_f = []
# print 'fisher_check:', f_check * (4*np.pi / np.sqrt(8))**2
# print 'sigma:', 1/np.sqrt(f_check) * (np.sqrt(8)/4./np.pi)
fisher_check = f_check * (4*np.pi / np.sqrt(8))**2
sigma = 1/np.sqrt(f_check) * (np.sqrt(8)/4./np.pi)
return fisher_check, sigma
# for lidx, lmin in enumerate(range(2, 40)):
# f = np.sum(fisher_per_bin[lmin-2:])
# min_f.append(np.sqrt(f))
if __name__ == '__main__':
# ana_dir = '/mn/stornext/d8/ITA/spider/adri/analysis/20181112_sst/' # S5
# ana_dir = '/mn/stornext/d8/ITA/spider/adri/analysis/20181123_sst/'
ana_dir = '/mn/stornext/d8/ITA/spider/adri/analysis/20181214_sst_debug/'
out_dir = opj(ana_dir, 'fisher')
camb_base = '/mn/stornext/d8/ITA/spider/adri/analysis/20171217_sst'
camb_dir = opj(camb_base, 'camb_output/high_acy/sparse_5000')
noise_base = '/mn/stornext/u3/adriaand/cmb_sst_ksw/ancillary/noise_curves'
# noise_base = '/mn/stornext/u3/adriaand/cmb_sst_ksw/ancillary/noise_curves/so/v3/so'
# lat_path = opj(noise_base, 's4/S4_2LAT_Tpol_default_noisecurves')
lat_path = opj(noise_base, 'so/v3/so')
# sac_path = noise_base
sac_path = None
# fixed
lmin = 2
# lmax = 4999
lmax = 4000 # has to match beta etc
lmax_f = 3000 # for fisher
lmin_f = 250
# A_lens = 0.13
A_lens = 1.
noise_amp_temp = 6.
noise_amp_pol = 6 * np.sqrt(2)
# NOTE
# noise_amp_temp = .0
# noise_amp_pol = .0 * np.sqrt(2)
opts = {}
# opts['nominal'] = dict(fsky=0.03, no_ee=False, no_tt=False, no_noise=False)
# opts['no_ee'] = dict(fsky=0.03, no_ee=True, no_tt=False, no_noise=False)
# opts['no_tt'] = dict(fsky=0.03, no_ee=False, no_tt=True, no_noise=False)
opts['nominal'] = dict(fsky=1., no_ee=False, no_tt=False, no_noise=False)
opts['no_ee'] = dict(fsky=1., no_ee=True, no_tt=False, no_noise=False)
opts['no_tt'] = dict(fsky=1., no_ee=False, no_tt=True, no_noise=False)
opts['cv_lim'] = dict(fsky=1., no_ee=False, no_tt=False, no_noise=True)
opts['no_ee_cv_lim'] = dict(fsky=1., no_ee=True, no_tt=False, no_noise=True)
opts['no_tt_cv_lim'] = dict(fsky=1., no_ee=False, no_tt=True, no_noise=True)
# for template in ['local', 'equilateral']:
for template in ['local']:
# with open(opj(out_dir, 'fisher_{}.txt'.format(template)), 'w') as text_file:
with open(opj(out_dir, 'fisher_so_{}.txt'.format(template)), 'w') as text_file:
for key in opts:
opt = opts[key]
no_noise = opt.get('no_noise')
fsky = opt.get('fsky')
no_ee = opt.get('no_ee')
no_tt = opt.get('no_tt')
cls = get_cls(camb_dir, lmax, A_lens=A_lens)
nls = get_nls(lat_path, lmax, sac_path=sac_path)
#nls = get_fiducial_nls(noise_amp_temp, noise_amp_pol, lmax)
if no_noise:
nls *= 0.
totcov = get_totcov(cls, nls, no_ee=no_ee, no_tt=no_tt)
ells = np.arange(2, lmax+1)
# plot_name = opj(out_dir, 'b_invcov_{}.png'.format(key))
plot_name = opj(out_dir, 'b_so_invcov_{}.png'.format(key))
# for template in ['local', 'equilateral', 'orthogonal']:
text_file.write('template: {}\n'.format(template))
text_file.write('option: {}\n'.format(key))
text_file.write('no_noise: {}\n'.format(no_noise))
text_file.write('fsky: {}\n'.format(fsky))
text_file.write('A_lens: {}\n'.format(A_lens))
text_file.write('no_ee: {}\n'.format(no_ee))
text_file.write('no_tt: {}\n'.format(no_tt))
fisher_check, sigma = run_fisher(template,
ana_dir, camb_dir, totcov, ells,
lmin=lmin_f, lmax=lmax_f, fsky=fsky,
plot_tag=plot_name, tag='r1_i10_b4')
text_file.write('fisher: {}\n'.format(fisher_check))
text_file.write('sigma: {}\n'.format(sigma))
text_file.write('\n')
|
[
"sst.camb_tools.get_so_noise",
"sst.plot_tools.cls_matrix",
"numpy.zeros",
"sst.Fisher",
"numpy.ones",
"numpy.einsum",
"numpy.where",
"numpy.arange",
"numpy.loadtxt",
"numpy.interp",
"sst.camb_tools.get_spectra",
"numpy.sqrt"
] |
[((387, 452), 'sst.camb_tools.get_spectra', 'ct.get_spectra', (['cls_path'], {'tag': '"""r0"""', 'lensed': '(False)', 'prim_type': '"""tot"""'}), "(cls_path, tag='r0', lensed=False, prim_type='tot')\n", (401, 452), True, 'from sst import camb_tools as ct\n'), ((502, 566), 'sst.camb_tools.get_spectra', 'ct.get_spectra', (['cls_path'], {'tag': '"""r0"""', 'lensed': '(True)', 'prim_type': '"""tot"""'}), "(cls_path, tag='r0', lensed=True, prim_type='tot')\n", (516, 566), True, 'from sst import camb_tools as ct\n'), ((2417, 2491), 'sst.camb_tools.get_so_noise', 'ct.get_so_noise', ([], {'tt_file': 'lat_tt_file', 'pol_file': 'lat_pol_file', 'sat_file': 'None'}), '(tt_file=lat_tt_file, pol_file=lat_pol_file, sat_file=None)\n', (2432, 2491), True, 'from sst import camb_tools as ct\n'), ((5317, 5332), 'sst.Fisher', 'Fisher', (['ana_dir'], {}), '(ana_dir)\n', (5323, 5332), False, 'from sst import Fisher\n'), ((6147, 6252), 'sst.plot_tools.cls_matrix', 'plot_tools.cls_matrix', (['plot_tag', 'bins', 'bin_invcov'], {'log': '(False)', 'plot_dell': '(False)', 'inv': '(True)'}), '(plot_tag, bins, bin_invcov, log=False, plot_dell=\n False, inv=True, **plot_opts)\n', (6168, 6252), False, 'from sst import plot_tools\n'), ((6963, 7068), 'sst.plot_tools.cls_matrix', 'plot_tools.cls_matrix', (['plot_tag', 'bins', 'bin_invcov'], {'log': '(False)', 'plot_dell': '(False)', 'inv': '(True)'}), '(plot_tag, bins, bin_invcov, log=False, plot_dell=\n False, inv=True, **plot_opts)\n', (6984, 7068), False, 'from sst import plot_tools\n'), ((7614, 7678), 'numpy.zeros', 'np.zeros', (["(F.bispec['pol_trpl'].size, F.bispec['pol_trpl'].size)"], {}), "((F.bispec['pol_trpl'].size, F.bispec['pol_trpl'].size))\n", (7622, 7678), True, 'import numpy as np\n'), ((7859, 7886), 'numpy.ones', 'np.ones', (['(bin_size, 12, 12)'], {}), '((bin_size, 12, 12))\n', (7866, 7886), True, 'import numpy as np\n'), ((7901, 7928), 'numpy.ones', 'np.ones', (['(bin_size, 12, 12)'], {}), '((bin_size, 12, 12))\n', (7908, 7928), True, 'import numpy as np\n'), ((7943, 7970), 'numpy.ones', 'np.ones', (['(bin_size, 12, 12)'], {}), '((bin_size, 12, 12))\n', (7950, 7970), True, 'import numpy as np\n'), ((1789, 1808), 'numpy.ones', 'np.ones', (['(6, 20000)'], {}), '((6, 20000))\n', (1796, 1808), True, 'import numpy as np\n'), ((3107, 3140), 'numpy.arange', 'np.arange', (['lmin_sac', '(lmax_sac + 1)'], {}), '(lmin_sac, lmax_sac + 1)\n', (3116, 3140), True, 'import numpy as np\n'), ((3156, 3185), 'numpy.interp', 'np.interp', (['ell_f', 'ell', 'sac_ee'], {}), '(ell_f, ell, sac_ee)\n', (3165, 3185), True, 'import numpy as np\n'), ((3203, 3232), 'numpy.interp', 'np.interp', (['ell_f', 'ell', 'sac_bb'], {}), '(ell_f, ell, sac_bb)\n', (3212, 3232), True, 'import numpy as np\n'), ((4194, 4213), 'numpy.ones', 'np.ones', (['(6, 20000)'], {}), '((6, 20000))\n', (4201, 4213), True, 'import numpy as np\n'), ((6556, 6571), 'numpy.sqrt', 'np.sqrt', (['fisher'], {}), '(fisher)\n', (6563, 6571), True, 'import numpy as np\n'), ((7524, 7541), 'numpy.ones', 'np.ones', (['bin_size'], {}), '(bin_size)\n', (7531, 7541), True, 'import numpy as np\n'), ((11430, 11440), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (11437, 11440), True, 'import numpy as np\n'), ((8724, 8746), 'numpy.where', 'np.where', (['(bins >= lmin)'], {}), '(bins >= lmin)\n', (8732, 8746), True, 'import numpy as np\n'), ((10245, 10261), 'numpy.sqrt', 'np.sqrt', (['f_check'], {}), '(f_check)\n', (10252, 10261), True, 'import numpy as np\n'), ((2882, 2902), 'numpy.loadtxt', 'np.loadtxt', (['sac_file'], {}), '(sac_file)\n', (2892, 2902), True, 'import numpy as np\n'), ((9514, 9546), 'numpy.einsum', 'np.einsum', (['"""i,ij,j"""', 'B', 'cl123', 'B'], {}), "('i,ij,j', B, cl123, B)\n", (9523, 9546), True, 'import numpy as np\n'), ((10216, 10226), 'numpy.sqrt', 'np.sqrt', (['(8)'], {}), '(8)\n', (10223, 10226), True, 'import numpy as np\n'), ((10265, 10275), 'numpy.sqrt', 'np.sqrt', (['(8)'], {}), '(8)\n', (10272, 10275), True, 'import numpy as np\n'), ((13117, 13139), 'numpy.arange', 'np.arange', (['(2)', '(lmax + 1)'], {}), '(2, lmax + 1)\n', (13126, 13139), True, 'import numpy as np\n')]
|
from Vector import Vector
from Neuron import Neuron
class HiddenLayer:
def __init__(self, size, input_size, weights, activation, activation_d, loss, loss_d, bias=1.0):
self.size = size
#self.input_layer = input_layer
self.input_size = input_size
#self.output_layer = output_layer
self.loss = loss
self.loss_d = loss_d
self._neurons = []
for w in weights:
if len(w) != self.input_size:
raise ValueError("Mismatched weight and input size!")
self._neurons.append(Neuron(w, activation, activation_d, bias))
@property
def neurons(self):
return self._neurons
@neurons.setter
def neurons(self, n):
self._neurons = n
def __getitem__(self, key):
return self._neurons[key]
def __len__(self):
return self.size
def generate_output(self, input):
if len(input) != self.input_size:
raise ValueError("Input is not of the defined length!")
op = []
lin = []
for n in self._neurons:
output = n.output(input)
op.append(output['op'])
lin.append(output['lin'])
return {'op':Vector(op), 'lin':Vector(lin)}
|
[
"Vector.Vector",
"Neuron.Neuron"
] |
[((1241, 1251), 'Vector.Vector', 'Vector', (['op'], {}), '(op)\n', (1247, 1251), False, 'from Vector import Vector\n'), ((1259, 1270), 'Vector.Vector', 'Vector', (['lin'], {}), '(lin)\n', (1265, 1270), False, 'from Vector import Vector\n'), ((571, 612), 'Neuron.Neuron', 'Neuron', (['w', 'activation', 'activation_d', 'bias'], {}), '(w, activation, activation_d, bias)\n', (577, 612), False, 'from Neuron import Neuron\n')]
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: <NAME> 0xA3ADB67A2CDB8B35 <<EMAIL>>
# please also see AUTHORS file
# :copyright: (c) 2013 Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-clause BSD, see included LICENSE for information
"""Parsers for ``@type bridge-network-status 1.0`` descriptors.
.. _descriptors: https://metrics.torproject.org/formats.html#descriptortypes
**Module Overview:**
..
parse
\_networkstatus
|_ isValidRouterNickname - Determine if a nickname is according to spec
|_ parseRLine - Parse an 'r'-line from a networkstatus document
|_ parseALine - Parse an 'a'-line from a networkstatus document
\_ parseSLine - Parse an 's'-line from a networkstatus document
"""
import binascii
import logging
import string
import time
import warnings
from twisted.python.log import showwarning
from bridgedb.parse import addr
from bridgedb.parse import parseUnpaddedBase64
from bridgedb.parse import InvalidBase64
class NetworkstatusParsingError(Exception):
"""Unable to parse networkstatus document line."""
class InvalidNetworkstatusRouterIdentity(ValueError):
"""The ID field of a networkstatus document 'r'-line is invalid."""
class InvalidRouterNickname(ValueError):
"""Router nickname doesn't follow tor-spec."""
def isValidRouterNickname(nickname):
"""Determine if a router's given nickname meets the specification.
:param string nickname: An OR's nickname.
"""
ALPHANUMERIC = string.letters + string.digits
if not (1 <= len(nickname) <= 19):
raise InvalidRouterNickname(
"Nicknames must be between 1 and 19 characters: %r" % nickname)
for letter in nickname:
if not letter in ALPHANUMERIC:
raise InvalidRouterNickname(
"Nicknames must only use [A-Za-z0-9]: %r" % nickname)
return True
def parseRLine(line):
"""Parse an 'r'-line from a networkstatus document.
From torspec.git/dir-spec.txt, commit 36761c7d553d L1499-1512:
|
|"r" SP nickname SP identity SP digest SP publication SP IP SP ORPort
| SP DirPort NL
|
| [At start, exactly once.]
|
| "Nickname" is the OR's nickname. "Identity" is a hash of its
| identity key, encoded in base64, with trailing equals sign(s)
| removed. "Digest" is a hash of its most recent descriptor as
| signed (that is, not including the signature), encoded in base64.
| "Publication" is the
| publication time of its most recent descriptor, in the form
| YYYY-MM-DD HH:MM:SS, in UTC. "IP" is its current IP address;
| ORPort is its current OR port, "DirPort" is its current directory
| port, or "0" for "none".
|
:param string line: An 'r'-line from an bridge-network-status descriptor.
"""
(nickname, ID, descDigest, timestamp,
ORaddr, ORport, dirport) = (None for x in xrange(7))
try:
if not line.startswith('r '):
raise NetworkstatusParsingError(
"Networkstatus parser received non 'r'-line: %r" % line)
line = line[2:] # Chop off the 'r '
fields = line.split()
if len(fields) != 8:
raise NetworkstatusParsingError(
"Wrong number of fields in networkstatus 'r'-line: %r" % line)
nickname, ID = fields[:2]
try:
ID = parseUnpaddedBase64(ID)
except InvalidBase64 as error:
raise InvalidNetworkstatusRouterIdentity(error)
# Check the nickname validity after parsing the ID, otherwise, if the
# nickname is invalid, we end up with the nickname being ``None`` and
# the ID being unparsed, unpadded (meaning it is technically invalid)
# base64.
isValidRouterNickname(nickname)
except NetworkstatusParsingError as error:
logging.error(error)
nickname, ID = None, None
except InvalidRouterNickname as error:
logging.error(error)
# Assume that we mostly care about obtaining the OR's ID, then it
# should be okay to set the nickname to ``None``, if it was invalid.
nickname = None
except InvalidNetworkstatusRouterIdentity as error:
logging.error(error)
ID = None
else:
try:
descDigest = parseUnpaddedBase64(fields[2])
timestamp = time.mktime(time.strptime(" ".join(fields[3:5]),
"%Y-%m-%d %H:%M:%S"))
ORaddr = fields[5]
ORport = int(fields[6])
dirport = fields[7]
except InvalidBase64 as error:
logging.error(error)
descDigest = None
except (AttributeError, ValueError, IndexError) as error:
logging.error(error)
timestamp = None
finally:
return (nickname, ID, descDigest, timestamp, ORaddr, ORport, dirport)
def parseALine(line, fingerprint=None):
"""Parse an 'a'-line of a bridge networkstatus document.
From torspec.git/dir-spec.txt, commit 36761c7d553d L1499-1512:
|
| "a" SP address ":" port NL
|
| [Any number.]
|
| Present only if the OR has at least one IPv6 address.
|
| Address and portlist are as for "or-address" as specified in
| 2.1.
|
| (Only included when the vote or consensus is generated with
| consensus-method 14 or later.)
:param string line: An 'a'-line from an bridge-network-status descriptor.
:type fingerprint: string or None
:param fingerprint: A string which identifies which OR the descriptor
we're parsing came from (since the 'a'-line doesn't tell us, this can
help make the log messages clearer).
:raises: :exc:`NetworkstatusParsingError`
:rtype: tuple
:returns: A 2-tuple of a string respresenting the IP address and a
:class:`bridgedb.parse.addr.PortList`.
"""
ip = None
portlist = None
if line.startswith('a '):
line = line[2:] # Chop off the 'a '
else:
logging.warn("Networkstatus parser received non 'a'-line for %r:"\
" %r" % (fingerprint or 'Unknown', line))
try:
ip, portlist = line.rsplit(':', 1)
except ValueError as error:
logging.error("Bad separator in networkstatus 'a'-line: %r" % line)
return (None, None)
if ip.startswith('[') and ip.endswith(']'):
ip = ip.strip('[]')
try:
if not addr.isIPAddress(ip):
raise NetworkstatusParsingError(
"Got invalid IP Address in networkstatus 'a'-line for %r: %r"
% (fingerprint or 'Unknown', line))
if addr.isIPv4(ip):
warnings.warn(FutureWarning(
"Got IPv4 address in networkstatus 'a'-line! "\
"Networkstatus document format may have changed!"))
except NetworkstatusParsingError as error:
logging.error(error)
ip, portlist = None, None
try:
portlist = addr.PortList(portlist)
if not portlist:
raise NetworkstatusParsingError(
"Got invalid portlist in 'a'-line for %r!\n Line: %r"
% (fingerprint or 'Unknown', line))
except (addr.InvalidPort, NetworkstatusParsingError) as error:
logging.error(error)
portlist = None
else:
logging.debug("Parsed networkstatus ORAddress line for %r:"\
"\n Address: %s \tPorts: %s"
% (fingerprint or 'Unknown', ip, portlist))
finally:
return (ip, portlist)
def parseSLine(line):
"""Parse an 's'-line from a bridge networkstatus document.
The 's'-line contains all flags assigned to a bridge. The flags which may
be assigned to a bridge are as follows:
From torspec.git/dir-spec.txt, commit 36761c7d553d L1526-1554:
|
| "s" SP Flags NL
|
| [Exactly once.]
|
| A series of space-separated status flags, in lexical order (as ASCII
| byte strings). Currently documented flags are:
|
| "BadDirectory" if the router is believed to be useless as a
| directory cache (because its directory port isn't working,
| its bandwidth is always throttled, or for some similar
| reason).
| "Fast" if the router is suitable for high-bandwidth circuits.
| "Guard" if the router is suitable for use as an entry guard.
| "HSDir" if the router is considered a v2 hidden service directory.
| "Named" if the router's identity-nickname mapping is canonical,
| and this authority binds names.
| "Stable" if the router is suitable for long-lived circuits.
| "Running" if the router is currently usable.
| "Valid" if the router has been 'validated'.
| "V2Dir" if the router implements the v2 directory protocol.
:param string line: An 's'-line from an bridge-network-status descriptor.
:rtype: tuple
:returns: A 2-tuple of booleans, the first is True if the bridge has the
"Running" flag, and the second is True if it has the "Stable" flag.
"""
line = line[2:]
flags = [x.capitalize() for x in line.split()]
fast = 'Fast' in flags
running = 'Running' in flags
stable = 'Stable' in flags
guard = 'Guard' in flags
valid = 'Valid' in flags
if (fast or running or stable or guard or valid):
logging.debug("Parsed Flags: %s%s%s%s%s"
% ('Fast ' if fast else '',
'Running ' if running else '',
'Stable ' if stable else '',
'Guard ' if guard else '',
'Valid ' if valid else ''))
# Right now, we only care about 'Running' and 'Stable'
return running, stable
|
[
"logging.error",
"bridgedb.parse.addr.isIPAddress",
"bridgedb.parse.addr.isIPv4",
"logging.debug",
"bridgedb.parse.parseUnpaddedBase64",
"logging.warn",
"bridgedb.parse.addr.PortList"
] |
[((6251, 6361), 'logging.warn', 'logging.warn', (['("Networkstatus parser received non \'a\'-line for %r: %r" % (fingerprint or\n \'Unknown\', line))'], {}), '("Networkstatus parser received non \'a\'-line for %r: %r" % (\n fingerprint or \'Unknown\', line))\n', (6263, 6361), False, 'import logging\n'), ((6882, 6897), 'bridgedb.parse.addr.isIPv4', 'addr.isIPv4', (['ip'], {}), '(ip)\n', (6893, 6897), False, 'from bridgedb.parse import addr\n'), ((7211, 7234), 'bridgedb.parse.addr.PortList', 'addr.PortList', (['portlist'], {}), '(portlist)\n', (7224, 7234), False, 'from bridgedb.parse import addr\n'), ((7566, 7709), 'logging.debug', 'logging.debug', (['("""Parsed networkstatus ORAddress line for %r:\n Address: %s \tPorts: %s""" %\n (fingerprint or \'Unknown\', ip, portlist))'], {}), '(\n """Parsed networkstatus ORAddress line for %r:\n Address: %s \tPorts: %s"""\n % (fingerprint or \'Unknown\', ip, portlist))\n', (7579, 7709), False, 'import logging\n'), ((9697, 9889), 'logging.debug', 'logging.debug', (["('Parsed Flags: %s%s%s%s%s' % ('Fast ' if fast else '', 'Running ' if\n running else '', 'Stable ' if stable else '', 'Guard ' if guard else '',\n 'Valid ' if valid else ''))"], {}), "('Parsed Flags: %s%s%s%s%s' % ('Fast ' if fast else '', \n 'Running ' if running else '', 'Stable ' if stable else '', 'Guard ' if\n guard else '', 'Valid ' if valid else ''))\n", (9710, 9889), False, 'import logging\n'), ((3563, 3586), 'bridgedb.parse.parseUnpaddedBase64', 'parseUnpaddedBase64', (['ID'], {}), '(ID)\n', (3582, 3586), False, 'from bridgedb.parse import parseUnpaddedBase64\n'), ((4035, 4055), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (4048, 4055), False, 'import logging\n'), ((4141, 4161), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (4154, 4161), False, 'import logging\n'), ((4401, 4421), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (4414, 4421), False, 'import logging\n'), ((4488, 4518), 'bridgedb.parse.parseUnpaddedBase64', 'parseUnpaddedBase64', (['fields[2]'], {}), '(fields[2])\n', (4507, 4518), False, 'from bridgedb.parse import parseUnpaddedBase64\n'), ((6475, 6542), 'logging.error', 'logging.error', (['("Bad separator in networkstatus \'a\'-line: %r" % line)'], {}), '("Bad separator in networkstatus \'a\'-line: %r" % line)\n', (6488, 6542), False, 'import logging\n'), ((6673, 6693), 'bridgedb.parse.addr.isIPAddress', 'addr.isIPAddress', (['ip'], {}), '(ip)\n', (6689, 6693), False, 'from bridgedb.parse import addr\n'), ((7127, 7147), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (7140, 7147), False, 'import logging\n'), ((7503, 7523), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (7516, 7523), False, 'import logging\n'), ((4814, 4834), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (4827, 4834), False, 'import logging\n'), ((4943, 4963), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (4956, 4963), False, 'import logging\n')]
|
import weakref
import numpy as np
class Tree:
'''
Implementation of Nary-tree.
The source code is modified based on https://github.com/lianemeth/forest/blob/master/forest/NaryTree.py
Parameters
----------
key: object
key of the node
num_branch: int
how many branches in each node
children: Iterable[Tree]
reference of the children
parent: Tree
reference of the parent node
Returns
-------
an N-ary tree.
'''
def __init__(self, key, num_branch, children=None, parent=None):
self.key = key
self.children = children or [None for _ in range(num_branch)]
self._parent = weakref.ref(parent) if parent else None
@property
def parent(self):
if self._parent:
return self._parent()
def __getstate__(self):
self._parent = None
def __setstate__(self, state):
self.__dict__ = state
for child in self.children:
child._parent = weakref.ref(self)
def traversal(self, visit=None, *args, **kwargs):
if visit is not None:
visit(self, *args, **kwargs)
l = [self]
for child in self.children:
if child is not None:
l += child.traversal(visit, *args, **kwargs)
return l
def tree_based_non_dominated_sort(F):
"""
Tree-based efficient non-dominated sorting (T-ENS).
This algorithm is very efficient in many-objective optimization problems (MaOPs).
Parameters
----------
F: np.array
objective values for each individual.
Returns
-------
indices of the individuals in each front.
References
----------
<NAME>, <NAME>, <NAME>, and <NAME>,
A decision variable clustering based evolutionary algorithm for large-scale many-objective optimization,
IEEE Transactions on Evolutionary Computation, 2018, 22(1): 97-112.
"""
N, M = F.shape
# sort the rows in F
indices = np.lexsort(F.T[::-1])
F = F[indices]
obj_seq = np.argsort(F[:, :0:-1], axis=1) + 1
k = 0
forest = []
left = np.full(N, True)
while np.any(left):
forest.append(None)
for p, flag in enumerate(left):
if flag:
update_tree(F, p, forest, k, left, obj_seq)
k += 1
# convert forest to fronts
fronts = [[] for _ in range(k)]
for k, tree in enumerate(forest):
fronts[k].extend([indices[node.key] for node in tree.traversal()])
return fronts
def update_tree(F, p, forest, k, left, obj_seq):
_, M = F.shape
if forest[k] is None:
forest[k] = Tree(key=p, num_branch=M - 1)
left[p] = False
elif check_tree(F, p, forest[k], obj_seq, True):
left[p] = False
def check_tree(F, p, tree, obj_seq, add_pos):
if tree is None:
return True
N, M = F.shape
# find the minimal index m satisfying that p[obj_seq[tree.root][m]] < tree.root[obj_seq[tree.root][m]]
m = 0
while m < M - 1 and F[p, obj_seq[tree.key, m]] >= F[tree.key, obj_seq[tree.key, m]]:
m += 1
# if m not found
if m == M - 1:
# p is dominated by the solution at the root
return False
else:
for i in range(m + 1):
# p is dominated by a solution in the branch of the tree
if not check_tree(F, p, tree.children[i], obj_seq, i == m and add_pos):
return False
if tree.children[m] is None and add_pos:
# add p to the branch of the tree
tree.children[m] = Tree(key=p, num_branch=M - 1)
return True
|
[
"numpy.full",
"numpy.lexsort",
"numpy.any",
"numpy.argsort",
"weakref.ref"
] |
[((1995, 2016), 'numpy.lexsort', 'np.lexsort', (['F.T[::-1]'], {}), '(F.T[::-1])\n', (2005, 2016), True, 'import numpy as np\n'), ((2127, 2143), 'numpy.full', 'np.full', (['N', '(True)'], {}), '(N, True)\n', (2134, 2143), True, 'import numpy as np\n'), ((2154, 2166), 'numpy.any', 'np.any', (['left'], {}), '(left)\n', (2160, 2166), True, 'import numpy as np\n'), ((2051, 2082), 'numpy.argsort', 'np.argsort', (['F[:, :0:-1]'], {'axis': '(1)'}), '(F[:, :0:-1], axis=1)\n', (2061, 2082), True, 'import numpy as np\n'), ((688, 707), 'weakref.ref', 'weakref.ref', (['parent'], {}), '(parent)\n', (699, 707), False, 'import weakref\n'), ((1011, 1028), 'weakref.ref', 'weakref.ref', (['self'], {}), '(self)\n', (1022, 1028), False, 'import weakref\n')]
|
import gunicorn
import os
workers = os.getenv("GUNICORN_WORKERS")
worker_class = "gevent"
keepalive = os.getenv("GUNICORN_KEEP_ALIVE")
bind = "0.0.0.0:5000"
gunicorn.SERVER_SOFTWARE = "None"
|
[
"os.getenv"
] |
[((38, 67), 'os.getenv', 'os.getenv', (['"""GUNICORN_WORKERS"""'], {}), "('GUNICORN_WORKERS')\n", (47, 67), False, 'import os\n'), ((104, 136), 'os.getenv', 'os.getenv', (['"""GUNICORN_KEEP_ALIVE"""'], {}), "('GUNICORN_KEEP_ALIVE')\n", (113, 136), False, 'import os\n')]
|
from django.conf.urls import include
from django.urls import path
urlpatterns = [
path("task", include("aws_pubsub.urls")),
]
|
[
"django.conf.urls.include"
] |
[((101, 127), 'django.conf.urls.include', 'include', (['"""aws_pubsub.urls"""'], {}), "('aws_pubsub.urls')\n", (108, 127), False, 'from django.conf.urls import include\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 07 11:41:07 2016
@author: Ferriss
"""
from pynams import experiments
reload(experiments)
experiments.convertH(110.,)
|
[
"pynams.experiments.convertH"
] |
[((139, 166), 'pynams.experiments.convertH', 'experiments.convertH', (['(110.0)'], {}), '(110.0)\n', (159, 166), False, 'from pynams import experiments\n')]
|
#!
'''
..%%%%...%%%%%%..%%%%%%..........%%%%%...%%..%%..%%%%%%..%%......%%%%%%.
.%%......%%........%%............%%..%%..%%..%%....%%....%%........%%...
.%%.%%%..%%%%......%%............%%%%%...%%..%%....%%....%%........%%...
.%%..%%..%%........%%............%%..%%..%%..%%....%%....%%........%%...
..%%%%...%%%%%%....%%............%%%%%....%%%%...%%%%%%..%%%%%%....%%...
........................................................................
'''
from flask import render_template, flash, redirect, url_for, jsonify, request, abort
from app import app, db
from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema
from marshmallow import validate, ValidationError
import json
from boto import kinesis
from flask_cors import CORS
CORS(app)
customer_schema = CustomerSchema()
budget_schema = BudgetItemSchema(many=True)
# kinesis = kinesis.connect_to_region(app.config['AWS_REGION'])
# kinesis.describe_stream(app.config['KINESIS_DATA_STREAM'])
@app.route('/')
@app.route('/index')
def index():
customers = Customer.query.all()
return render_template('index.html', title='Get Built', customers=customers)
@app.route('/customers')
def get_customers():
# STARTING TO BUILD OUT VUE FRONT END CONNECTION
customers = Customer.query.all()
many_customers = CustomerSchema(many=True)
res = many_customers.dumps(customers)
return res, 202
# ADD A NEW CUSTOMER
@app.route("/customer/details", methods=['POST',])
def create_customer():
data = request.get_json()
# VALIDATE POST DATA
try:
validate = customer_schema.load(data)
existing_customer = Customer.query.filter_by(name=data['name']).first()
if existing_customer is not None:
response = { 'message': 'customer already exists' }
return jsonify(response), 403
else:
# VALIDATION COMPLETE ADD NEW CUSTOMER
new_customer = Customer(**data)
db.session.add(new_customer)
db.session.commit()
try:
nc = customer_schema.dump(new_customer)
# kinesis.put_record(app.config['KINESIS_DATA_STREAM'], json.dumps(nc), "partitionkey")
except Exception as e:
print(e)
# CALL TO CLOUD WATCH OR OTHER ALERTING SYSTEM
response = { 'message': 'new customer registered', 'data': customer_schema.dump(new_customer) }
return jsonify(response), 202
except ValidationError as err:
errors = err.messages
validate = False
return jsonify(errors), 403
# GET CUSTOMER DETAILS
@app.route("/customer/details/<customer_id>", methods=['GET', 'DELETE'])
def get_customer(customer_id):
customer = Customer.query.filter_by(id=customer_id).first()
if customer is None:
response = { 'message': 'customer does not exist' }
return jsonify(response), 404
else:
if request.method == 'GET':
result = customer_schema.dump(customer)
response= { 'data': result }
return jsonify(response), 202
elif request.method == 'DELETE':
db.session.delete(customer)
db.session.commit()
response = { 'message': 'customer' + customer_id + ' deleted' }
return jsonify(response), 202
else:
return jsonify(response), 404
# GET ALL BUDGET ITEMS FOR A CUSTOMER
@app.route("/budget/details/<customer_id>", methods=['GET'])
def get_budget_items(customer_id):
items = BudgetItem.query.filter_by(customer_id=customer_id).all()
if items is None:
response = { 'message': 'customer does not exist' }
return jsonify(response), 404
else:
result = budget_schema.dump(items)
response = {'data': result, 'status_code' : 202 }
return jsonify(response)
# ADD OR UPDATE A NEW BUDGET ITEM
@app.route("/budget/details", methods=['POST', 'PUT'])
def create_budget_item():
data = request.get_json()
print(data)
print("Update a budget item")
try:
# VALIDATE JSON DATA
# AS WE HAVE ONE TO MANY RELATIONSHIP FROM CUSTOMER TO BUDGET ITEM DATA MUST BE ARRAY NOT OBJECT
validate = budget_schema.loads(json.dumps(data))
except ValidationError as err:
errors = err.messages
return jsonify(errors), 403
if request.method == 'POST':
# THERE IS A MORE ELEGANT WAY TO DO THIS, BUT WANT THE ABILITY TO ADD MANY BUDGET ITEMS AT ONCE
items = []
for x in data:
new_item = BudgetItem(**x)
db.session.add(new_item)
db.session.commit()
items.append(new_item)
items = budget_schema.dump(items)
response = { 'message': 'new budget item created','data': items }
return response, 202
elif request.method == 'PUT':
print("PUT")
# THERE IS A MORE ELEGANT WAY TO DO THIS, BUT WANT THE ABILITY TO ADD MANY BUDGET ITEMS AT ONCE
items = []
for x in data:
item = BudgetItem.query.filter_by(id=x['id']).update(dict(**x))
db.session.commit()
items.append(x)
items = budget_schema.dump(items)
response = { 'message': 'new budget item created', 'data' : items }
return jsonify(response), 202
else:
print("ELSE")
return abort(400)
# DELETE A BUDGET ITEM
@app.route("/budget/details/<item_id>", methods=['DELETE'])
def delete_budget_item(item_id):
item = BudgetItem.query.filter_by(id=item_id).first()
if item is None:
response = { 'message': 'budget item does not exist' }
return jsonify(response), 404
else:
db.session.delete(item)
db.session.commit()
response = { 'message': 'budget item ' + item_id + ' deleted' }
return jsonify(response), 202
'''
..%%%%...%%%%%%..%%%%%%..........%%%%%...%%..%%..%%%%%%..%%......%%%%%%.
.%%......%%........%%............%%..%%..%%..%%....%%....%%........%%...
.%%.%%%..%%%%......%%............%%%%%...%%..%%....%%....%%........%%...
.%%..%%..%%........%%............%%..%%..%%..%%....%%....%%........%%...
..%%%%...%%%%%%....%%............%%%%%....%%%%...%%%%%%..%%%%%%....%%...
........................................................................
'''
|
[
"app.app.route",
"flask_cors.CORS",
"app.models.Customer.query.filter_by",
"flask.abort",
"app.models.BudgetItemSchema",
"json.dumps",
"app.models.Customer",
"flask.request.get_json",
"flask.jsonify",
"app.db.session.delete",
"app.db.session.commit",
"flask.render_template",
"app.db.session.add",
"app.models.CustomerSchema",
"app.models.BudgetItem",
"app.models.Customer.query.all",
"app.models.BudgetItem.query.filter_by"
] |
[((752, 761), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (756, 761), False, 'from flask_cors import CORS\n'), ((781, 797), 'app.models.CustomerSchema', 'CustomerSchema', ([], {}), '()\n', (795, 797), False, 'from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema\n'), ((814, 841), 'app.models.BudgetItemSchema', 'BudgetItemSchema', ([], {'many': '(True)'}), '(many=True)\n', (830, 841), False, 'from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema\n'), ((969, 983), 'app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (978, 983), False, 'from app import app, db\n'), ((985, 1004), 'app.app.route', 'app.route', (['"""/index"""'], {}), "('/index')\n", (994, 1004), False, 'from app import app, db\n'), ((1140, 1163), 'app.app.route', 'app.route', (['"""/customers"""'], {}), "('/customers')\n", (1149, 1163), False, 'from app import app, db\n'), ((1409, 1457), 'app.app.route', 'app.route', (['"""/customer/details"""'], {'methods': "['POST']"}), "('/customer/details', methods=['POST'])\n", (1418, 1457), False, 'from app import app, db\n'), ((2616, 2687), 'app.app.route', 'app.route', (['"""/customer/details/<customer_id>"""'], {'methods': "['GET', 'DELETE']"}), "('/customer/details/<customer_id>', methods=['GET', 'DELETE'])\n", (2625, 2687), False, 'from app import app, db\n'), ((3414, 3473), 'app.app.route', 'app.route', (['"""/budget/details/<customer_id>"""'], {'methods': "['GET']"}), "('/budget/details/<customer_id>', methods=['GET'])\n", (3423, 3473), False, 'from app import app, db\n'), ((3879, 3932), 'app.app.route', 'app.route', (['"""/budget/details"""'], {'methods': "['POST', 'PUT']"}), "('/budget/details', methods=['POST', 'PUT'])\n", (3888, 3932), False, 'from app import app, db\n'), ((5389, 5447), 'app.app.route', 'app.route', (['"""/budget/details/<item_id>"""'], {'methods': "['DELETE']"}), "('/budget/details/<item_id>', methods=['DELETE'])\n", (5398, 5447), False, 'from app import app, db\n'), ((1036, 1056), 'app.models.Customer.query.all', 'Customer.query.all', ([], {}), '()\n', (1054, 1056), False, 'from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema\n'), ((1068, 1137), 'flask.render_template', 'render_template', (['"""index.html"""'], {'title': '"""Get Built"""', 'customers': 'customers'}), "('index.html', title='Get Built', customers=customers)\n", (1083, 1137), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((1256, 1276), 'app.models.Customer.query.all', 'Customer.query.all', ([], {}), '()\n', (1274, 1276), False, 'from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema\n'), ((1298, 1323), 'app.models.CustomerSchema', 'CustomerSchema', ([], {'many': '(True)'}), '(many=True)\n', (1312, 1323), False, 'from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema\n'), ((1493, 1511), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1509, 1511), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((3970, 3988), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (3986, 3988), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((3825, 3842), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (3832, 3842), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((5679, 5702), 'app.db.session.delete', 'db.session.delete', (['item'], {}), '(item)\n', (5696, 5702), False, 'from app import app, db\n'), ((5711, 5730), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5728, 5730), False, 'from app import app, db\n'), ((1912, 1928), 'app.models.Customer', 'Customer', ([], {}), '(**data)\n', (1920, 1928), False, 'from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema\n'), ((1941, 1969), 'app.db.session.add', 'db.session.add', (['new_customer'], {}), '(new_customer)\n', (1955, 1969), False, 'from app import app, db\n'), ((1982, 2001), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1999, 2001), False, 'from app import app, db\n'), ((2734, 2774), 'app.models.Customer.query.filter_by', 'Customer.query.filter_by', ([], {'id': 'customer_id'}), '(id=customer_id)\n', (2758, 2774), False, 'from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema\n'), ((2883, 2900), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (2890, 2900), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((3521, 3572), 'app.models.BudgetItem.query.filter_by', 'BudgetItem.query.filter_by', ([], {'customer_id': 'customer_id'}), '(customer_id=customer_id)\n', (3547, 3572), False, 'from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema\n'), ((3676, 3693), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (3683, 3693), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((4223, 4239), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4233, 4239), False, 'import json\n'), ((4545, 4560), 'app.models.BudgetItem', 'BudgetItem', ([], {}), '(**x)\n', (4555, 4560), False, 'from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema\n'), ((4573, 4597), 'app.db.session.add', 'db.session.add', (['new_item'], {}), '(new_item)\n', (4587, 4597), False, 'from app import app, db\n'), ((4610, 4629), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4627, 4629), False, 'from app import app, db\n'), ((5353, 5363), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (5358, 5363), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((5492, 5530), 'app.models.BudgetItem.query.filter_by', 'BudgetItem.query.filter_by', ([], {'id': 'item_id'}), '(id=item_id)\n', (5518, 5530), False, 'from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema\n'), ((5638, 5655), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (5645, 5655), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((5818, 5835), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (5825, 5835), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((1620, 1663), 'app.models.Customer.query.filter_by', 'Customer.query.filter_by', ([], {'name': "data['name']"}), "(name=data['name'])\n", (1644, 1663), False, 'from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema\n'), ((1797, 1814), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (1804, 1814), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((2430, 2447), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (2437, 2447), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((2570, 2585), 'flask.jsonify', 'jsonify', (['errors'], {}), '(errors)\n', (2577, 2585), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((3064, 3081), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (3071, 3081), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((3140, 3167), 'app.db.session.delete', 'db.session.delete', (['customer'], {}), '(customer)\n', (3157, 3167), False, 'from app import app, db\n'), ((3180, 3199), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3197, 3199), False, 'from app import app, db\n'), ((4321, 4336), 'flask.jsonify', 'jsonify', (['errors'], {}), '(errors)\n', (4328, 4336), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((5101, 5120), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5118, 5120), False, 'from app import app, db\n'), ((5283, 5300), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (5290, 5300), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((3295, 3312), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (3302, 3312), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((3351, 3368), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (3358, 3368), False, 'from flask import render_template, flash, redirect, url_for, jsonify, request, abort\n'), ((5032, 5070), 'app.models.BudgetItem.query.filter_by', 'BudgetItem.query.filter_by', ([], {'id': "x['id']"}), "(id=x['id'])\n", (5058, 5070), False, 'from app.models import Customer, BudgetItem, CustomerSchema, BudgetItemSchema\n')]
|
#
# Copyright 2022 Intel (Autonomous Agents Lab)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from models.common_tf import SpecialSparseConv, window_poly6
import open3d.ml.tf as ml3d
import tensorflow as tf
from collections import namedtuple
NNSResult = namedtuple(
"NNSResult",
["neighbors_index", "neighbors_distance", "neighbors_row_splits"])
class CConvAggregationBlock(tf.keras.Model):
"""The aggregation block is a single Continuous Convolution.
In addition to the features the block can return the importance information.
"""
def __init__(self, name, output_channels, return_importance=False):
super().__init__(name=name)
self._convs = []
self._return_importance = return_importance
conv_params = {
'kernel_size': [4, 4, 4],
'coordinate_mapping': 'ball_to_cube_radial',
'normalize': True,
}
def Conv(name, filters, activation, **kwargs):
conv = ml3d.layers.ContinuousConv(name=name,
filters=output_channels,
activation=activation,
**kwargs)
self._convs.append((name, conv))
setattr(self, name, conv)
activation = tf.keras.activations.relu
Conv(name='conv1',
filters=output_channels,
activation=activation,
**conv_params)
def call(self, feats, inp_points, out_points, out_extents, scale_compat,
nns):
"""Computes the features and optionally the importance.
Args:
feats: The point featurs.
inp_points: The input point positions.
out_points: These are the positions of the voxel centers of the
finest grid.
out_extents: This is the voxel size.
scale_compat: The scale compatibility between the input point radii
and the voxel sizes.
nns: tuple with
- neighbors_index: The indices to neighbor points for each voxel.
- neighbor_row_splits: Defines the start and end of each voxels
neighbors.
- neighbors_distance: The distance to each neighbor normalized with
respect to the voxel size.
"""
neighbors_importance = scale_compat * window_poly6(
nns.neighbors_distance)
feats = self.conv1(
feats,
inp_points,
out_points,
extents=out_extents,
user_neighbors_index=nns.neighbors_index,
user_neighbors_row_splits=nns.neighbors_row_splits,
user_neighbors_importance=neighbors_importance,
)
if self._return_importance:
return feats, neighbors_importance
else:
return feats
class SparseConvBlock(tf.keras.Model):
"""The convolution block for the adaptive grid.
Args:
input_channels: Number of input channels.
output_channels: The number of output channels.
normalized_channels: The number of channels that will be normalized
with respect to the importance values.
"""
def __init__(self, name, output_channels, normalized_channels=0):
super().__init__(name=name)
self._convs = []
self.output_channels = output_channels
self.normalized_channels = normalized_channels
conv_params = {
'kernel_size': 55,
}
def Conv(name, filters, activation, **kwargs):
conv = SpecialSparseConv(name=name,
filters=filters,
activation=activation,
**kwargs)
self._convs.append((name, conv))
setattr(self, name, conv)
activation = tf.keras.activations.relu
if normalized_channels == 'all':
Conv(name='conv1',
filters=output_channels,
activation=activation,
normalize=True,
**conv_params)
Conv(name='conv2',
filters=output_channels,
activation=activation,
normalize=True,
**conv_params)
Conv(name='conv3',
filters=output_channels,
activation=activation,
normalize=True,
**conv_params)
Conv(name='conv4',
filters=output_channels,
activation=activation,
normalize=True,
**conv_params)
elif normalized_channels and normalized_channels >= output_channels:
Conv(name='conv1',
filters=output_channels,
activation=activation,
normalize=True,
**conv_params)
Conv(name='conv2',
filters=output_channels,
activation=activation,
normalize=False,
**conv_params)
Conv(name='conv3',
filters=output_channels,
activation=activation,
normalize=False,
**conv_params)
Conv(name='conv4',
filters=output_channels,
activation=activation,
normalize=False,
**conv_params)
elif normalized_channels and normalized_channels < output_channels:
Conv(name='conv1a',
filters=output_channels - normalized_channels,
activation=activation,
**conv_params)
Conv(name='conv1b',
filters=normalized_channels,
activation=activation,
normalize=True,
**conv_params)
Conv(name='conv2',
filters=output_channels,
activation=activation,
**conv_params)
Conv(name='conv3',
filters=output_channels,
activation=activation,
**conv_params)
Conv(name='conv4',
filters=output_channels,
activation=activation,
**conv_params)
else:
Conv(name='conv1',
filters=output_channels,
activation=activation,
**conv_params)
Conv(name='conv2',
filters=output_channels,
activation=activation,
**conv_params)
Conv(name='conv3',
filters=output_channels,
activation=activation,
**conv_params)
Conv(name='conv4',
filters=output_channels,
activation=activation,
**conv_params)
def call(self, feats, points, neighbors, importance=None):
"""Computes the features and optionally the importance if there are
normalized channels.
Args:
feats: Input features.
neighbors: dict with the neighbor information.
importance: The per voxel importance value
"""
if self.normalized_channels == 'all':
feats1, out_importance = self.conv1(feats,
inp_importance=importance,
**neighbors)
feats2, _ = self.conv2(feats1,
inp_importance=importance,
**neighbors)
feats3, _ = self.conv3(feats2,
inp_importance=importance,
**neighbors)
feats4, _ = self.conv4(feats3,
inp_importance=importance,
**neighbors)
return feats4, out_importance
elif self.normalized_channels and self.normalized_channels < self.output_channels:
feats1a = self.conv1a(feats, **neighbors)
feats1b, out_importance = self.conv1b(feats,
inp_importance=importance,
**neighbors)
feats1 = tf.concat([feats1a, feats1b], axis=-1)
feats2 = self.conv2(feats1, **neighbors)
feats3 = self.conv3(feats2, **neighbors)
feats4 = self.conv4(feats3, **neighbors)
return feats4, out_importance
elif self.normalized_channels:
feats1, out_importance = self.conv1(feats,
inp_importance=importance,
**neighbors)
feats2 = self.conv2(feats1, **neighbors)
feats3 = self.conv3(feats2, **neighbors)
feats4 = self.conv4(feats3, **neighbors)
return feats4, out_importance
else:
feats1 = self.conv1(feats, **neighbors)
feats2 = self.conv2(feats1, **neighbors)
feats3 = self.conv3(feats2, **neighbors)
feats4 = self.conv4(feats3, **neighbors)
return feats4
class SparseConvTransitionBlock(tf.keras.Model):
"""The convolution block for transitions between grids (up- and downconvolutions).
Args:
input_channels: Number of input channels.
output_channels: The number of output channels.
normalized_channels: The number of channels that will be normalized
with respect to the importance values.
"""
def __init__(self, name, output_channels, normalized_channels=0):
super().__init__(name=name)
self._convs = []
self.output_channels = output_channels
self.normalized_channels = normalized_channels
conv_params = {
'kernel_size': 9,
'activation': tf.keras.activations.relu
}
def Conv(name, filters, activation, **kwargs):
conv = SpecialSparseConv(name=name,
filters=filters,
activation=activation,
**kwargs)
self._convs.append((name, conv))
setattr(self, name, conv)
if normalized_channels == 'all' or normalized_channels >= output_channels:
Conv(name='conv1',
filters=output_channels,
normalize=True,
**conv_params)
elif normalized_channels and normalized_channels < output_channels:
Conv(name='conv1a',
filters=output_channels - normalized_channels,
**conv_params)
Conv(name='conv1b',
filters=normalized_channels,
normalize=True,
**conv_params)
else:
Conv(name='conv1', filters=output_channels, **conv_params)
def call(self, feats, inp_points, out_points, neighbors, importance=None):
"""Computes the features and optionally the importance if there are
normalized channels.
Args:
feats: Input features.
neighbors: dict with the neighbor information.
importance: The per voxel importance value
"""
if self.normalized_channels == 'all':
feats1, out_importance = self.conv1(feats,
inp_importance=importance,
**neighbors)
return feats1, out_importance
elif self.normalized_channels and self.normalized_channels < self.output_channels:
feats1a = self.conv1a(feats, **neighbors)
feats1b, out_importance = self.conv1b(feats,
inp_importance=importance,
**neighbors)
feats1 = tf.concat([feats1a, feats1b], axis=-1)
return feats1, out_importance
elif self.normalized_channels:
feats1, out_importance = self.conv1(feats,
inp_importance=importance,
**neighbors)
return feats1, out_importance
else:
feats1 = self.conv1(feats, **neighbors)
return feats1
class UNet5(tf.keras.Model):
"""Unet for adaptive grids predicting the signed and unsigned distance field.
Args:
channel_div: Reduces the number of channels for each layer.
with_importance: Adds channels normalized with the importance values.
normalized_channels: How many channels should be normalized with the importance.
residual_skip_connection: If True uses a residual connection for the last skip
connection. If 'all' uses residual connction for every skip connection.
"""
octree_levels = 5
def __init__(self,
name=None,
channel_div=1,
with_importance=False,
normalized_channels=0,
deeper=0,
residual_skip_connection=False):
super().__init__(name=name, autocast=False)
if not with_importance in (False, True, 'all'):
raise Exception('invalid value for "with_importance" {}'.format(
with_importance))
self.with_importance = with_importance
self.residual_skip_connection = residual_skip_connection
def SparseConvTransition(name,
filters,
normalized_channels=0,
**kwargs):
return SparseConvTransitionBlock(name, filters, normalized_channels)
d = channel_div
self.cconv_block_in = CConvAggregationBlock(
name="cconv_block_in",
output_channels=32 // d,
return_importance=with_importance)
params = {}
if with_importance:
params.update({
'normalized_channels': normalized_channels,
})
self.sparseconv_encblock0 = SparseConvBlock(name="sparseconv_encblock0",
output_channels=64 // d,
**params)
self.sparseconv_down1 = SparseConvTransition(name="sparseconv_down1",
filters=128 // d,
**params)
self.sparseconv_encblock1 = SparseConvBlock(name="sparseconv_encblock1",
output_channels=128 // d,
**params)
self.sparseconv_down2 = SparseConvTransition(name="sparseconv_down2",
filters=256 // d,
**params)
self.sparseconv_encblock2 = SparseConvBlock(name="sparseconv_encblock2",
output_channels=256 // d,
**params)
self.sparseconv_down3 = SparseConvTransition(name="sparseconv_down3",
filters=256 // d,
**params)
self.sparseconv_encblock3 = SparseConvBlock(name="sparseconv_encblock3",
output_channels=256 // d,
**params)
self.sparseconv_down4 = SparseConvTransition(name="sparseconv_down4",
filters=256 // d,
**params)
self.sparseconv_encblock4 = SparseConvBlock(name="sparseconv_encblock4",
output_channels=256 // d,
**params)
params = {}
self.sparseconv_up3 = SparseConvTransition(name="sparseconv_up3",
filters=256 // d)
self.sparseconv_decblock3 = SparseConvBlock(name="sparseconv_decblock3",
output_channels=256 // d,
**params)
self.sparseconv_up2 = SparseConvTransition(name="sparseconv_up2",
filters=256 // d)
self.sparseconv_decblock2 = SparseConvBlock(name="sparseconv_decblock2",
output_channels=256 // d,
**params)
if self.residual_skip_connection == 'all':
self.sparseconv_up1 = SparseConvTransition(name="sparseconv_up1",
filters=128 // d)
else:
self.sparseconv_up1 = SparseConvTransition(name="sparseconv_up1",
filters=256 // d)
self.sparseconv_decblock1 = SparseConvBlock(name="sparseconv_decblock1",
output_channels=128 // d,
**params)
self.sparseconv_up0 = SparseConvTransition(name="sparseconv_up0",
filters=64 // d)
self.sparseconv_decblock0 = SparseConvBlock(name="sparseconv_decblock0",
output_channels=32 // d,
**params)
activation = tf.keras.activations.relu
self.dense_decoder1 = tf.keras.layers.Dense(32 // d,
name='dense_decoder1',
activation=activation,
use_bias=True)
self.dense_decoder2 = tf.keras.layers.Dense(32 // d,
name='dense_decoder2',
activation=activation,
use_bias=True)
self.dense_decoder3 = tf.keras.layers.Dense(2,
name='dense_decoder3',
activation=None,
use_bias=False)
self._all_layers = []
self._collect_layers(self.layers)
def _collect_layers(self, layers):
for x in layers:
if hasattr(x, 'layers'):
self._collect_layers(x.layers)
else:
self._all_layers.append(x)
@tf.function
def call(self, input_dict):
"""Does a forward pass with aggregation and decode suited for training.
"""
feats = input_dict['feats']
feats1 = self.aggregate(input_dict)
code = self.unet(feats1, input_dict)
value, shifts_grad = self.decode(input_dict['voxel_shifts0'], code)
result = {
'value': value,
'shifts_grad': shifts_grad,
# 'code': code,
}
debug_info = {}
return result, debug_info
@tf.function
def unet(self, feats1, input_dict, keep_threshold=1):
"""Forward pass through the unet. Excludes aggregation and decode."""
neighbors = []
for i in range(5):
neighbors.append({
'neighbors_index':
input_dict['neighbors_index{}'.format(i)],
'neighbors_kernel_index':
input_dict['neighbors_kernel_index{}'.format(i)],
'neighbors_row_splits':
input_dict['neighbors_row_splits{}'.format(i)],
})
neighbors_down = []
for i in range(4):
num_points = tf.shape(input_dict['voxel_centers{}'.format(i + 1)],
out_type=tf.int64)[0]
ans = ml3d.ops.invert_neighbors_list(
num_points, input_dict['up_neighbors_index{}'.format(i)],
input_dict['up_neighbors_row_splits{}'.format(i)],
tf.cast(input_dict['up_neighbors_kernel_index{}'.format(i)],
dtype=tf.int32))
neighbors_down.append({
'neighbors_index':
ans.neighbors_index,
'neighbors_kernel_index':
tf.cast(ans.neighbors_attributes, dtype=tf.int16),
'neighbors_row_splits':
ans.neighbors_row_splits,
})
neighbors_up = []
for i in range(4):
neighbors_up.append({
'neighbors_index':
input_dict['up_neighbors_index{}'.format(i)],
'neighbors_kernel_index':
input_dict['up_neighbors_kernel_index{}'.format(i)],
'neighbors_row_splits':
input_dict['up_neighbors_row_splits{}'.format(i)],
})
if self.with_importance and keep_threshold < 1:
feats1, importance = feats1
nonzero_count = tf.math.count_nonzero(importance > 1e-3)
threshold_idx = tf.cast(tf.cast(nonzero_count, dtype=tf.float32) *
keep_threshold,
dtype=tf.int32)
threshold_value = tf.sort(importance,
direction='DESCENDING')[threshold_idx]
feats1 = tf.where((importance < threshold_value)[:, None],
tf.zeros_like(feats1), feats1)
importance = tf.where(importance < threshold_value,
tf.zeros_like(importance), importance)
feats1 = (feats1, importance)
if self.with_importance:
feats1, importance = feats1
else:
importance = None
feats2 = self.sparseconv_encblock0(feats1,
input_dict['voxel_centers0'],
neighbors[0],
importance=importance)
if self.with_importance == 'all':
feats2, importance = feats2
feats3, importance = self.sparseconv_down1(
feats2,
input_dict['voxel_centers0'],
input_dict['voxel_centers1'],
neighbors_down[0],
importance=importance)
feats4, importance = self.sparseconv_encblock1(
feats3,
input_dict['voxel_centers1'],
neighbors[1],
importance=importance)
feats5, importance = self.sparseconv_down2(
feats4,
input_dict['voxel_centers1'],
input_dict['voxel_centers2'],
neighbors_down[1],
importance=importance)
feats6, importance = self.sparseconv_encblock2(
feats5,
input_dict['voxel_centers2'],
neighbors[2],
importance=importance)
feats7, importance = self.sparseconv_down3(
feats6,
input_dict['voxel_centers2'],
input_dict['voxel_centers3'],
neighbors_down[2],
importance=importance)
feats8, importance = self.sparseconv_encblock3(
feats7,
input_dict['voxel_centers3'],
neighbors[3],
importance=importance)
feats9, importance = self.sparseconv_down3(
feats8,
input_dict['voxel_centers3'],
input_dict['voxel_centers4'],
neighbors_down[3],
importance=importance)
feats10, importance = self.sparseconv_encblock4(
feats9,
input_dict['voxel_centers4'],
neighbors[4],
importance=importance)
else:
if self.with_importance:
feats2, _ = feats2
feats3 = self.sparseconv_down1(feats2, input_dict['voxel_centers0'],
input_dict['voxel_centers1'],
neighbors_down[0])
feats4 = self.sparseconv_encblock1(feats3,
input_dict['voxel_centers1'],
neighbors[1])
feats5 = self.sparseconv_down2(feats4, input_dict['voxel_centers1'],
input_dict['voxel_centers2'],
neighbors_down[1])
feats6 = self.sparseconv_encblock2(feats5,
input_dict['voxel_centers2'],
neighbors[2])
feats7 = self.sparseconv_down3(feats6, input_dict['voxel_centers2'],
input_dict['voxel_centers3'],
neighbors_down[2])
feats8 = self.sparseconv_encblock3(feats7,
input_dict['voxel_centers3'],
neighbors[3])
feats9 = self.sparseconv_down3(feats8, input_dict['voxel_centers3'],
input_dict['voxel_centers4'],
neighbors_down[3])
feats10 = self.sparseconv_encblock4(feats9,
input_dict['voxel_centers4'],
neighbors[4])
feats11 = self.sparseconv_up3(feats10, input_dict['voxel_centers4'],
input_dict['voxel_centers3'],
neighbors_up[3])
if self.residual_skip_connection == 'all':
feats12 = feats11 + feats8
else:
feats12 = tf.concat([feats11, feats8], axis=-1)
feats13 = self.sparseconv_decblock3(feats12,
input_dict['voxel_centers3'],
neighbors[3])
feats14 = self.sparseconv_up2(feats13, input_dict['voxel_centers3'],
input_dict['voxel_centers2'],
neighbors_up[2])
if self.residual_skip_connection == 'all':
feats15 = feats14 + feats6
else:
feats15 = tf.concat([feats14, feats6], axis=-1)
feats16 = self.sparseconv_decblock2(feats15,
input_dict['voxel_centers2'],
neighbors[2])
feats17 = self.sparseconv_up1(feats16, input_dict['voxel_centers2'],
input_dict['voxel_centers1'],
neighbors_up[1])
if self.residual_skip_connection == 'all':
feats18 = feats17 + feats4
else:
feats18 = tf.concat([feats17, feats4], axis=-1)
feats19 = self.sparseconv_decblock1(feats18,
input_dict['voxel_centers1'],
neighbors[1])
feats20 = self.sparseconv_up0(feats19, input_dict['voxel_centers1'],
input_dict['voxel_centers0'],
neighbors_up[0])
if self.residual_skip_connection:
feats21 = feats20 + feats2
else:
feats21 = tf.concat([feats20, feats2], axis=-1)
code = self.sparseconv_decblock0(feats21, input_dict['voxel_centers0'],
neighbors[0])
return code
@tf.function
def aggregate(self, input_dict):
"""Aggregation step."""
feats = input_dict['feats']
nns = NNSResult(input_dict['aggregation_neighbors_index'],
input_dict['aggregation_neighbors_dist'],
input_dict['aggregation_row_splits'])
feats1 = self.cconv_block_in(
feats,
input_dict['points'],
input_dict['voxel_centers0'],
input_dict['voxel_sizes0'],
scale_compat=input_dict['aggregation_scale_compat'],
nns=nns)
return feats1
@tf.function
def decode(self, shifts, code):
"""Decode step and returns the gradient with respect to the shift.
Args:
shifts: Positions inside the voxels.
code: Output features of the unet for each voxel.
"""
new_code_shape = tf.concat(
[tf.shape(shifts)[:-1], tf.shape(code)[-1:]], axis=0)
code = tf.broadcast_to(code, new_code_shape)
decoder_input = tf.concat([shifts, code], axis=-1)
feats1 = self.dense_decoder1(decoder_input)
feats2 = self.dense_decoder2(feats1)
value = self.dense_decoder3(feats2)
shifts_grad = tf.gradients(value[..., 0], shifts)[0]
return tf.dtypes.cast(value, dtype=tf.float32), tf.dtypes.cast(
shifts_grad, dtype=tf.float32)
|
[
"tensorflow.keras.layers.Dense",
"open3d.ml.tf.layers.ContinuousConv",
"tensorflow.dtypes.cast",
"tensorflow.concat",
"tensorflow.sort",
"tensorflow.math.count_nonzero",
"tensorflow.zeros_like",
"tensorflow.cast",
"tensorflow.shape",
"collections.namedtuple",
"tensorflow.broadcast_to",
"tensorflow.gradients",
"models.common_tf.SpecialSparseConv",
"models.common_tf.window_poly6"
] |
[((758, 852), 'collections.namedtuple', 'namedtuple', (['"""NNSResult"""', "['neighbors_index', 'neighbors_distance', 'neighbors_row_splits']"], {}), "('NNSResult', ['neighbors_index', 'neighbors_distance',\n 'neighbors_row_splits'])\n", (768, 852), False, 'from collections import namedtuple\n'), ((18555, 18650), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(32 // d)'], {'name': '"""dense_decoder1"""', 'activation': 'activation', 'use_bias': '(True)'}), "(32 // d, name='dense_decoder1', activation=activation,\n use_bias=True)\n", (18576, 18650), True, 'import tensorflow as tf\n'), ((18833, 18928), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(32 // d)'], {'name': '"""dense_decoder2"""', 'activation': 'activation', 'use_bias': '(True)'}), "(32 // d, name='dense_decoder2', activation=activation,\n use_bias=True)\n", (18854, 18928), True, 'import tensorflow as tf\n'), ((19111, 19196), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'name': '"""dense_decoder3"""', 'activation': 'None', 'use_bias': '(False)'}), "(2, name='dense_decoder3', activation=None, use_bias=False\n )\n", (19132, 19196), True, 'import tensorflow as tf\n'), ((29870, 29907), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['code', 'new_code_shape'], {}), '(code, new_code_shape)\n', (29885, 29907), True, 'import tensorflow as tf\n'), ((29933, 29967), 'tensorflow.concat', 'tf.concat', (['[shifts, code]'], {'axis': '(-1)'}), '([shifts, code], axis=-1)\n', (29942, 29967), True, 'import tensorflow as tf\n'), ((1486, 1586), 'open3d.ml.tf.layers.ContinuousConv', 'ml3d.layers.ContinuousConv', ([], {'name': 'name', 'filters': 'output_channels', 'activation': 'activation'}), '(name=name, filters=output_channels, activation=\n activation, **kwargs)\n', (1512, 1586), True, 'import open3d.ml.tf as ml3d\n'), ((2981, 3017), 'models.common_tf.window_poly6', 'window_poly6', (['nns.neighbors_distance'], {}), '(nns.neighbors_distance)\n', (2993, 3017), False, 'from models.common_tf import SpecialSparseConv, window_poly6\n'), ((4191, 4269), 'models.common_tf.SpecialSparseConv', 'SpecialSparseConv', ([], {'name': 'name', 'filters': 'filters', 'activation': 'activation'}), '(name=name, filters=filters, activation=activation, **kwargs)\n', (4208, 4269), False, 'from models.common_tf import SpecialSparseConv, window_poly6\n'), ((10702, 10780), 'models.common_tf.SpecialSparseConv', 'SpecialSparseConv', ([], {'name': 'name', 'filters': 'filters', 'activation': 'activation'}), '(name=name, filters=filters, activation=activation, **kwargs)\n', (10719, 10780), False, 'from models.common_tf import SpecialSparseConv, window_poly6\n'), ((22113, 22154), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['(importance > 0.001)'], {}), '(importance > 0.001)\n', (22134, 22154), True, 'import tensorflow as tf\n'), ((27056, 27093), 'tensorflow.concat', 'tf.concat', (['[feats11, feats8]'], {'axis': '(-1)'}), '([feats11, feats8], axis=-1)\n', (27065, 27093), True, 'import tensorflow as tf\n'), ((27605, 27642), 'tensorflow.concat', 'tf.concat', (['[feats14, feats6]'], {'axis': '(-1)'}), '([feats14, feats6], axis=-1)\n', (27614, 27642), True, 'import tensorflow as tf\n'), ((28154, 28191), 'tensorflow.concat', 'tf.concat', (['[feats17, feats4]'], {'axis': '(-1)'}), '([feats17, feats4], axis=-1)\n', (28163, 28191), True, 'import tensorflow as tf\n'), ((28694, 28731), 'tensorflow.concat', 'tf.concat', (['[feats20, feats2]'], {'axis': '(-1)'}), '([feats20, feats2], axis=-1)\n', (28703, 28731), True, 'import tensorflow as tf\n'), ((30131, 30166), 'tensorflow.gradients', 'tf.gradients', (['value[..., 0]', 'shifts'], {}), '(value[..., 0], shifts)\n', (30143, 30166), True, 'import tensorflow as tf\n'), ((30185, 30224), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (['value'], {'dtype': 'tf.float32'}), '(value, dtype=tf.float32)\n', (30199, 30224), True, 'import tensorflow as tf\n'), ((30226, 30271), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (['shifts_grad'], {'dtype': 'tf.float32'}), '(shifts_grad, dtype=tf.float32)\n', (30240, 30271), True, 'import tensorflow as tf\n'), ((8961, 8999), 'tensorflow.concat', 'tf.concat', (['[feats1a, feats1b]'], {'axis': '(-1)'}), '([feats1a, feats1b], axis=-1)\n', (8970, 8999), True, 'import tensorflow as tf\n'), ((12634, 12672), 'tensorflow.concat', 'tf.concat', (['[feats1a, feats1b]'], {'axis': '(-1)'}), '([feats1a, feats1b], axis=-1)\n', (12643, 12672), True, 'import tensorflow as tf\n'), ((22367, 22410), 'tensorflow.sort', 'tf.sort', (['importance'], {'direction': '"""DESCENDING"""'}), "(importance, direction='DESCENDING')\n", (22374, 22410), True, 'import tensorflow as tf\n'), ((22565, 22586), 'tensorflow.zeros_like', 'tf.zeros_like', (['feats1'], {}), '(feats1)\n', (22578, 22586), True, 'import tensorflow as tf\n'), ((22694, 22719), 'tensorflow.zeros_like', 'tf.zeros_like', (['importance'], {}), '(importance)\n', (22707, 22719), True, 'import tensorflow as tf\n'), ((21406, 21455), 'tensorflow.cast', 'tf.cast', (['ans.neighbors_attributes'], {'dtype': 'tf.int16'}), '(ans.neighbors_attributes, dtype=tf.int16)\n', (21413, 21455), True, 'import tensorflow as tf\n'), ((22190, 22230), 'tensorflow.cast', 'tf.cast', (['nonzero_count'], {'dtype': 'tf.float32'}), '(nonzero_count, dtype=tf.float32)\n', (22197, 22230), True, 'import tensorflow as tf\n'), ((29802, 29818), 'tensorflow.shape', 'tf.shape', (['shifts'], {}), '(shifts)\n', (29810, 29818), True, 'import tensorflow as tf\n'), ((29825, 29839), 'tensorflow.shape', 'tf.shape', (['code'], {}), '(code)\n', (29833, 29839), True, 'import tensorflow as tf\n')]
|
import pytest
from pathlib import Path
@pytest.fixture(scope="module")
def resources_path():
return Path(__file__).parent / "resources"
@pytest.fixture(scope="module")
def tasks_base_path(resources_path):
return resources_path / "tasks"
@pytest.fixture(scope="module")
def results_base_path(resources_path):
return resources_path / "results"
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
parser.addoption(
"--runintegration",
action="store_true",
default=False,
help="run integration tests",
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow") and config.getoption("--runintegration"):
return
if not config.getoption("--runslow"):
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
if not config.getoption("--runintegration"):
skip_integration = pytest.mark.skip(
reason="need --runintegration option to run"
)
for item in items:
if "integration" in item.keywords:
item.add_marker(skip_integration)
|
[
"pathlib.Path",
"pytest.mark.skip",
"pytest.fixture"
] |
[((42, 72), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (56, 72), False, 'import pytest\n'), ((145, 175), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (159, 175), False, 'import pytest\n'), ((252, 282), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (266, 282), False, 'import pytest\n'), ((854, 909), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""need --runslow option to run"""'}), "(reason='need --runslow option to run')\n", (870, 909), False, 'import pytest\n'), ((1097, 1159), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""need --runintegration option to run"""'}), "(reason='need --runintegration option to run')\n", (1113, 1159), False, 'import pytest\n'), ((106, 120), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (110, 120), False, 'from pathlib import Path\n')]
|
#from __future__ import print_function
import winreg as reg
from scapy.all import *
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
ADAPTER_KEY = r'SYSTEM\CurrentControlSet\Control\Class\{4D36E972-E325-11CE-BFC1-08002BE10318}'
OpenVpnPath = "C:\\Program Files\\OpenVPN\\bin\\openvpn.exe"
ConfigPath = os.environ['USERPROFILE']+"\\OpenVPN\\config"
ConfTcp= "C:\\Users\\quent\\Downloads\\ovpn\\ovpn_tcp\\uk298.nordvpn.com.tcp.ovpn"
ConfUdp= "C:\\Users\\quent\\Downloads\\ovpn\\ovpn_udp\\uk298.nordvpn.com.udp.ovpn"
ConnectionKey = "SYSTEM\\CurrentControlSet\\Control\\Network\\{4D36E972-E325-11CE-BFC1-08002BE10318}"
interfaces = []
with reg.OpenKey(reg.HKEY_LOCAL_MACHINE, ADAPTER_KEY) as adapters:
try:
for i in range(10000):
key_name = reg.EnumKey(adapters, i)
with reg.OpenKey(adapters, key_name) as adapter:
try:
interfaces.append(reg.QueryValueEx(adapter, 'DriverDesc')[0])
except :
pass
except:
pass
print(interfaces[6])
#conf.color_theme=RastaTheme
#Description du nom de la carte wifi
conf.iface=interfaces[6]
"""
def packet_callback(packet):
if packet[TCP].payload:
pkt = str(packet[TCP].payload)
if packet[IP].dport == 80:
print("\n{} ----HTTP----> {}:{}:\n{}".format(packet[IP].src, packet[IP].dst, packet[IP].dport, str(bytes(packet[TCP].payload))))
sniff(filter="tcp", prn=packet_callback, store=0) """
pkt = []
#pkt = sniff(prn=lambda x: x.summary())
#print(pkt.summary())
packet = {}
cpt_pkt =0
def packet_callback(pkt):
global cpt_pkt
if pkt.haslayer(TCP):
packet[cpt_pkt]= {}
packet[cpt_pkt]["source_Port"] = pkt[TCP].sport
packet[cpt_pkt]["destination_Port"] = pkt[TCP].dport
print("Port Src:", packet[cpt_pkt]["source_Port"], "Port Dst:", packet[cpt_pkt]["destination_Port"])
if pkt.haslayer(IP):
packet[cpt_pkt]= {}
packet[cpt_pkt]["source_IP"] = pkt[IP].src
packet[cpt_pkt]["destination_IP"] = pkt[IP].dst
packet[cpt_pkt]["ttl"] = pkt.ttl
print("IP Src:", packet[cpt_pkt]["source_IP"], "Ip Dst:", packet[cpt_pkt]["destination_IP"])
packet[cpt_pkt] = {}
packet[cpt_pkt]["source_MAC"] = pkt.src
packet[cpt_pkt]["destination_MAC"] = pkt.dst
print("Mac Src:", packet[cpt_pkt]["source_MAC"], "Mac Dst:", packet[cpt_pkt]["destination_MAC"])
cpt_pkt += 1
#pkt.show()
pkt = sniff(count=10, prn=packet_callback, filter="tcp")
wrpcap('packets.pcap', pkt)
print(cpt_pkt)
i=0
print("boucle debut\n")
for cle, valeur in packet.items():
for key, value in packet[i].items():
print(key, value)
i =+1
print("boucle fin\n")
print("Packet 2\n")
print(packet[6])
print("Info packet 2 (IP:Port)\n")
print(packet[4]["source_IP"]+":"+packet[4]["source_Port"]+ "------>"+ packet[4]["destination_IP"]+":"+packet[4]["destination_Port"])
|
[
"winreg.QueryValueEx",
"logging.getLogger",
"winreg.EnumKey",
"winreg.OpenKey"
] |
[((670, 718), 'winreg.OpenKey', 'reg.OpenKey', (['reg.HKEY_LOCAL_MACHINE', 'ADAPTER_KEY'], {}), '(reg.HKEY_LOCAL_MACHINE, ADAPTER_KEY)\n', (681, 718), True, 'import winreg as reg\n'), ((102, 136), 'logging.getLogger', 'logging.getLogger', (['"""scapy.runtime"""'], {}), "('scapy.runtime')\n", (119, 136), False, 'import logging\n'), ((795, 819), 'winreg.EnumKey', 'reg.EnumKey', (['adapters', 'i'], {}), '(adapters, i)\n', (806, 819), True, 'import winreg as reg\n'), ((837, 868), 'winreg.OpenKey', 'reg.OpenKey', (['adapters', 'key_name'], {}), '(adapters, key_name)\n', (848, 868), True, 'import winreg as reg\n'), ((940, 979), 'winreg.QueryValueEx', 'reg.QueryValueEx', (['adapter', '"""DriverDesc"""'], {}), "(adapter, 'DriverDesc')\n", (956, 979), True, 'import winreg as reg\n')]
|
from PyFlow.Core.Common import *
from PyFlow.Core import FunctionLibraryBase
from PyFlow.Core import IMPLEMENT_NODE
PIN_ALLOWS_ANYTHING = {PinSpecifires.ENABLED_OPTIONS: PinOptions.AllowAny | PinOptions.ArraySupported | PinOptions.DictSupported}
class ActionLibrary(FunctionLibraryBase):
'''doc string for DemoLib'''
def __init__(self, packageName):
super(ActionLibrary, self).__init__(packageName)
@staticmethod
@IMPLEMENT_NODE(returns=None, nodeType=NodeTypes.Callable, meta={NodeMeta.CATEGORY: 'ActionLibrary-L0', NodeMeta.KEYWORDS: []})
def Hi(robot=('AnyPin', "Robot", PIN_ALLOWS_ANYTHING.copy())):
print("Robot say: Hi!")
# @staticmethod
# @IMPLEMENT_NODE(returns=None, nodeType=NodeTypes.Callable, meta={NodeMeta.CATEGORY: 'ActionLibrary', NodeMeta.KEYWORDS: []})
# def set_grippers():
# print("Robot say: Hi!")
@staticmethod
@IMPLEMENT_NODE(returns=('AnyPin', None, PIN_ALLOWS_ANYTHING.copy()), meta={NodeMeta.CATEGORY: 'ActionLibrary-L0', NodeMeta.KEYWORDS: []})
def set_grippers(robot=('AnyPin', "Robot", PIN_ALLOWS_ANYTHING.copy()),
value=('FloatPin', 'Value/0~1')
):
'''Returns attribute from object using "getattr(name)"'''
print("{} Gripper set to->{}".format(robot.name,value))
# return getattr(obj, name)
return True
@staticmethod
@IMPLEMENT_NODE(returns=('AnyPin', None, PIN_ALLOWS_ANYTHING.copy()), meta={NodeMeta.CATEGORY: 'ActionLibrary-L0', NodeMeta.KEYWORDS: []})
def arm_cart_move(robot=("AnyPin","Robot", PIN_ALLOWS_ANYTHING.copy()),
arm=('StringPin', "Arm (l/r)"),
pos=('AnyPin',"pos=[x,y,z]"),
orn=('AnyPin',"orn=[a,b,c,g]"),
maxforce=('AnyPin',"maxforce=[Fx,Fy,Fz,Fr,Fp,Fy]"),
wait=('BoolPin',"wait=True/False")
):
'''Returns attribute from object using "getattr(name)"'''
print("Robot {} execute arm_cart_move to pos={} orn ={} with maxforce ={}".format(
robot.name,arm,pos,orn,maxforce
))
return True
@staticmethod
@IMPLEMENT_NODE(returns=('AnyPin', None, PIN_ALLOWS_ANYTHING.copy()), meta={NodeMeta.CATEGORY: 'ActionLibrary-L0', NodeMeta.KEYWORDS: []})
def base_move(robot=("AnyPin","Robot", PIN_ALLOWS_ANYTHING.copy())
):
'''Returns attribute from object using "getattr(name)"'''
return True
@staticmethod
@IMPLEMENT_NODE(returns=('AnyPin', None, PIN_ALLOWS_ANYTHING.copy()), meta={NodeMeta.CATEGORY: 'ActionLibrary-L1', NodeMeta.KEYWORDS: []})
def Pick(robot=("AnyPin","Robot", PIN_ALLOWS_ANYTHING.copy()),
execute=('ExecPin', "Execute")
):
'''Returns attribute from object using "getattr(name)"'''
print("Do Pick")
return True
@staticmethod
@IMPLEMENT_NODE(returns=('AnyPin', None, PIN_ALLOWS_ANYTHING.copy()), meta={NodeMeta.CATEGORY: 'ActionLibrary-L1', NodeMeta.KEYWORDS: []})
def Place(robot=("AnyPin","Robot", PIN_ALLOWS_ANYTHING.copy()),
execute=('ExecPin', "Execute")
):
print("Do Place")
'''Returns attribute from object using "getattr(name)"'''
return True
@staticmethod
@IMPLEMENT_NODE(returns=('AnyPin', None, PIN_ALLOWS_ANYTHING.copy()), meta={NodeMeta.CATEGORY: 'ActionLibrary-L1', NodeMeta.KEYWORDS: []})
def Insert(robot=("AnyPin","Robot", PIN_ALLOWS_ANYTHING.copy())
):
'''Returns attribute from object using "getattr(name)"'''
return True
@staticmethod
@IMPLEMENT_NODE(returns=('AnyPin', None, PIN_ALLOWS_ANYTHING.copy()), meta={NodeMeta.CATEGORY: 'ActionLibrary-L1', NodeMeta.KEYWORDS: []})
def Screw(robot=("AnyPin","Robot", PIN_ALLOWS_ANYTHING.copy())
):
'''Returns attribute from object using "getattr(name)"'''
return True
@staticmethod
@IMPLEMENT_NODE(returns=('AnyPin', None, PIN_ALLOWS_ANYTHING.copy()), meta={NodeMeta.CATEGORY: 'ActionLibrary-L2', NodeMeta.KEYWORDS: []})
def PickInsertScrew(robot=("AnyPin","Robot", PIN_ALLOWS_ANYTHING.copy())
):
'''Returns attribute from object using "getattr(name)"'''
return True
@staticmethod
@IMPLEMENT_NODE(returns=('AnyPin', None, PIN_ALLOWS_ANYTHING.copy()), meta={NodeMeta.CATEGORY: 'ActionLibrary-L2', NodeMeta.KEYWORDS: []})
def PickInsert(robot=("AnyPin","Robot", PIN_ALLOWS_ANYTHING.copy())
):
'''Returns attribute from object using "getattr(name)"'''
return True
|
[
"PyFlow.Core.IMPLEMENT_NODE"
] |
[((444, 575), 'PyFlow.Core.IMPLEMENT_NODE', 'IMPLEMENT_NODE', ([], {'returns': 'None', 'nodeType': 'NodeTypes.Callable', 'meta': "{NodeMeta.CATEGORY: 'ActionLibrary-L0', NodeMeta.KEYWORDS: []}"}), "(returns=None, nodeType=NodeTypes.Callable, meta={NodeMeta.\n CATEGORY: 'ActionLibrary-L0', NodeMeta.KEYWORDS: []})\n", (458, 575), False, 'from PyFlow.Core import IMPLEMENT_NODE\n')]
|
from cpt.packager import ConanMultiPackager, tools
if __name__ == "__main__":
builder = ConanMultiPackager(
reference="turtle/{}".format( tools.load("version.txt") )
)
builder.add_common_builds()
builder.run()
|
[
"cpt.packager.tools.load"
] |
[((152, 177), 'cpt.packager.tools.load', 'tools.load', (['"""version.txt"""'], {}), "('version.txt')\n", (162, 177), False, 'from cpt.packager import ConanMultiPackager, tools\n')]
|
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMainWindow, QAction, QFileDialog, QCheckBox
class Window(QMainWindow):
def __init__(self, controller):
super().__init__()
self.controller = controller
# setup actions
open_comparison_dir_action = QAction('Open Comparison Images Directory', self)
open_comparison_dir_action.setShortcut('Ctrl+O')
open_comparison_dir_action.setStatusTip('Open a folder of comparison images.')
open_comparison_dir_action.triggered.connect(self.open_comparison_dir)
open_dataset_action = QAction('Open Dataset', self)
open_dataset_action.setShortcut('Ctrl+D')
open_dataset_action.setStatusTip('Open a folder of images to work on.')
open_dataset_action.triggered.connect(self.open_dataset)
exit_action = QAction('Exit', self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Exit application')
exit_action.triggered.connect(self.close)
# setup gui elements
self.core_widget = QtWidgets.QWidget()
self.core_v_layout = QtWidgets.QVBoxLayout()
self.core_widget.setLayout(self.core_v_layout)
self.secondary_h_layout = QtWidgets.QHBoxLayout()
self.secondary_h_layout.addStretch(0)
self.core_v_layout.addLayout(self.secondary_h_layout)
self.navigation = QtWidgets.QHBoxLayout()
self.use_gpu_checkbox = QCheckBox('Use GPU')
self.use_gpu_checkbox.setChecked(True)
# self.navigation.addWidget(self.use_gpu_checkbox)
self.run_lpips = QtWidgets.QPushButton('Run lpips')
self.run_lpips.clicked.connect(self.run_lpips_func)
self.navigation.addWidget(self.run_lpips)
self.next_image = QtWidgets.QPushButton('Next Image')
self.next_image.clicked.connect(self.next_image_func)
self.navigation.addWidget(self.next_image)
self.core_v_layout.addLayout(self.navigation)
self.statusBar()
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu('&File')
file_menu.addAction(open_comparison_dir_action)
file_menu.addAction(open_dataset_action)
file_menu.addAction(exit_action)
# setup edit grid and comparison list
self.controller.edit_grid.create_q_widgets()
self.controller.edit_grid.set_grid_parent(self.secondary_h_layout)
self.controller.comparison_list.create_q_widgets()
self.controller.comparison_list.set_parent(self.secondary_h_layout)
# finish setting up gui
self.setCentralWidget(self.core_widget)
self.setGeometry(0, 0, 1024, 768)
self.setWindowTitle('Main window')
self.show()
def open_comparison_dir(self):
dir_name = QFileDialog.getExistingDirectory(self, 'Open Comparison Images Directory')
self.controller.load_comparison_images(dir_name)
def open_dataset(self):
dir_name = QFileDialog.getExistingDirectory(self, 'Open Dataset')
self.controller.load_dataset(dir_name)
def next_image_func(self):
self.controller.next_image()
def run_lpips_func(self):
self.controller.run_lpips()
|
[
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QFileDialog.getExistingDirectory",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QCheckBox",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QAction"
] |
[((294, 343), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Open Comparison Images Directory"""', 'self'], {}), "('Open Comparison Images Directory', self)\n", (301, 343), False, 'from PyQt5.QtWidgets import QMainWindow, QAction, QFileDialog, QCheckBox\n'), ((598, 627), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Open Dataset"""', 'self'], {}), "('Open Dataset', self)\n", (605, 627), False, 'from PyQt5.QtWidgets import QMainWindow, QAction, QFileDialog, QCheckBox\n'), ((846, 867), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Exit"""', 'self'], {}), "('Exit', self)\n", (853, 867), False, 'from PyQt5.QtWidgets import QMainWindow, QAction, QFileDialog, QCheckBox\n'), ((1070, 1089), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (1087, 1089), False, 'from PyQt5 import QtWidgets\n'), ((1119, 1142), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (1140, 1142), False, 'from PyQt5 import QtWidgets\n'), ((1233, 1256), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (1254, 1256), False, 'from PyQt5 import QtWidgets\n'), ((1392, 1415), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (1413, 1415), False, 'from PyQt5 import QtWidgets\n'), ((1449, 1469), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['"""Use GPU"""'], {}), "('Use GPU')\n", (1458, 1469), False, 'from PyQt5.QtWidgets import QMainWindow, QAction, QFileDialog, QCheckBox\n'), ((1602, 1636), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Run lpips"""'], {}), "('Run lpips')\n", (1623, 1636), False, 'from PyQt5 import QtWidgets\n'), ((1774, 1809), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Next Image"""'], {}), "('Next Image')\n", (1795, 1809), False, 'from PyQt5 import QtWidgets\n'), ((2783, 2857), 'PyQt5.QtWidgets.QFileDialog.getExistingDirectory', 'QFileDialog.getExistingDirectory', (['self', '"""Open Comparison Images Directory"""'], {}), "(self, 'Open Comparison Images Directory')\n", (2815, 2857), False, 'from PyQt5.QtWidgets import QMainWindow, QAction, QFileDialog, QCheckBox\n'), ((2963, 3017), 'PyQt5.QtWidgets.QFileDialog.getExistingDirectory', 'QFileDialog.getExistingDirectory', (['self', '"""Open Dataset"""'], {}), "(self, 'Open Dataset')\n", (2995, 3017), False, 'from PyQt5.QtWidgets import QMainWindow, QAction, QFileDialog, QCheckBox\n')]
|
from __future__ import print_function, absolute_import
import collections
import os
from ctypes import (POINTER, c_char_p, c_longlong, c_int, c_size_t,
c_void_p, string_at, byref)
from . import ffi
from .module import parse_assembly
from .common import _decode_string, _encode_string
def get_default_triple():
"""
Return the default target triple LLVM is configured to produce code for.
"""
with ffi.OutputString() as out:
ffi.lib.LLVMPY_GetDefaultTargetTriple(out)
return str(out)
def get_host_cpu_name():
"""
Get the name of the host's CPU, suitable for using with
:meth:`Target.create_target_machine()`.
"""
with ffi.OutputString() as out:
ffi.lib.LLVMPY_GetHostCPUName(out)
return str(out)
def create_target_data(strrep):
return TargetData(ffi.lib.LLVMPY_CreateTargetData(_encode_string(strrep)))
class TargetData(ffi.ObjectRef):
"""
A TargetData provides structured access to a data layout.
Use :func:`create_target_data` to create instances.
"""
def __str__(self):
if self._closed:
return "<dead TargetData>"
with ffi.OutputString() as out:
ffi.lib.LLVMPY_CopyStringRepOfTargetData(self, out)
return str(out)
def _dispose(self):
self._capi.LLVMPY_DisposeTargetData(self)
def get_abi_size(self, ty):
"""
Get ABI size of LLVM type *ty*.
"""
return ffi.lib.LLVMPY_ABISizeOfType(self, ty)
def get_pointee_abi_size(self, ty):
"""
Get ABI size of pointee type of LLVM pointer type *ty*.
"""
size = ffi.lib.LLVMPY_ABISizeOfElementType(self, ty)
if size == -1:
raise RuntimeError("Not a pointer type: %s" % (ty,))
return size
def get_pointee_abi_alignment(self, ty):
"""
Get minimum ABI alignment of pointee type of LLVM pointer type *ty*.
"""
size = ffi.lib.LLVMPY_ABIAlignmentOfElementType(self, ty)
if size == -1:
raise RuntimeError("Not a pointer type: %s" % (ty,))
return size
def add_pass(self, pm):
"""
Add a DataLayout pass to PassManager *pm*.
"""
ffi.lib.LLVMPY_AddTargetData(self, pm)
# Once added to a PassManager, we can never get it back.
self._owned = True
RELOC = frozenset(['default', 'static', 'pic', 'dynamicnopic'])
CODEMODEL = frozenset(['default', 'jitdefault', 'small', 'kernel',
'medium', 'large'])
class Target(ffi.ObjectRef):
_triple = ''
# No _dispose() method since LLVMGetTargetFromTriple() returns a
# persistent object.
@classmethod
def from_default_triple(cls):
triple = get_default_triple()
# For MCJIT under Windows, see http://lists.cs.uiuc.edu/pipermail/llvmdev/2013-December/068381.html
if os.name == 'nt':
triple += '-elf'
return cls.from_triple(triple)
@classmethod
def from_triple(cls, triple):
with ffi.OutputString() as outerr:
target = ffi.lib.LLVMPY_GetTargetFromTriple(triple.encode('utf8'),
outerr)
if not target:
raise RuntimeError(str(outerr))
target = cls(target)
target._triple = triple
return target
@property
def name(self):
s = ffi.lib.LLVMPY_GetTargetName(self)
return _decode_string(s)
@property
def description(self):
s = ffi.lib.LLVMPY_GetTargetDescription(self)
return _decode_string(s)
@property
def triple(self):
return self._triple
def __str__(self):
return "<Target {0} ({1})>".format(self.name, self.description)
def create_target_machine(self, cpu='', features='',
opt=2, reloc='default', codemodel='jitdefault',
jitdebug=False, printmc=False):
assert 0 <= opt <= 3
assert reloc in RELOC
assert codemodel in CODEMODEL
tm = ffi.lib.LLVMPY_CreateTargetMachine(self,
_encode_string(self._triple),
_encode_string(cpu),
_encode_string(features),
opt,
_encode_string(reloc),
_encode_string(codemodel),
int(jitdebug),
int(printmc),
)
if tm:
return TargetMachine(tm)
else:
raise RuntimeError("Cannot create target machine")
class TargetMachine(ffi.ObjectRef):
def _dispose(self):
self._capi.LLVMPY_DisposeTargetMachine(self)
def add_analysis_passes(self, pm):
"""
Register analysis passes for this target machine with a pass manager.
"""
ffi.lib.LLVMPY_AddAnalysisPasses(self, pm)
def emit_object(self, module):
"""
Represent the module as a code object, suitable for use with
the platform's linker. Returns a byte string.
"""
return self._emit_to_memory(module, use_object=True)
def emit_assembly(self, module):
"""
Return the raw assembler of the module, as a string.
llvm.initialize_native_asmprinter() must have been called first.
"""
return _decode_string(self._emit_to_memory(module, use_object=False))
def _emit_to_memory(self, module, use_object=False):
"""Returns bytes of object code of the module.
Args
----
use_object : bool
Emit object code or (if False) emit assembly code.
"""
with ffi.OutputString() as outerr:
mb = ffi.lib.LLVMPY_TargetMachineEmitToMemory(self, module,
int(use_object),
outerr)
if not mb:
raise RuntimeError(str(outerr))
bufptr = ffi.lib.LLVMPY_GetBufferStart(mb)
bufsz = ffi.lib.LLVMPY_GetBufferSize(mb)
try:
return string_at(bufptr, bufsz)
finally:
ffi.lib.LLVMPY_DisposeMemoryBuffer(mb)
@property
def target_data(self):
td = TargetData(ffi.lib.LLVMPY_GetTargetMachineData(self))
td._owned = True
return td
def create_target_library_info(triple):
return TargetLibraryInfo(
ffi.lib.LLVMPY_CreateTargetLibraryInfo(_encode_string(triple, ))
)
class TargetLibraryInfo(ffi.ObjectRef):
"""
A LLVM TargetLibraryInfo. Use :func:`create_target_library_info`
to create instances.
"""
def _dispose(self):
self._capi.LLVMPY_DisposeTargetLibraryInfo(self)
def add_pass(self, pm):
"""
Add this library info as a pass to PassManager *pm*.
"""
ffi.lib.LLVMPY_AddTargetLibraryInfo(self, pm)
# Once added to a PassManager, we can never get it back.
self._owned = True
def disable_all(self):
"""
Disable all "builtin" functions.
"""
ffi.lib.LLVMPY_DisableAllBuiltins(self)
def get_libfunc(self, name):
"""
Get the library function *name*. NameError is raised if not found.
"""
lf = c_int()
if not ffi.lib.LLVMPY_GetLibFunc(self, _encode_string(name),
byref(lf)):
raise NameError("LibFunc '{name}' not found".format(name=name))
return LibFunc(name=name, identity=lf.value)
def set_unavailable(self, libfunc):
"""
Mark the given library function (*libfunc*) as unavailable.
"""
ffi.lib.LLVMPY_SetUnavailableLibFunc(self, libfunc.identity)
LibFunc = collections.namedtuple("LibFunc", ["identity", "name"])
# ============================================================================
# FFI
ffi.lib.LLVMPY_GetDefaultTargetTriple.argtypes = [POINTER(c_char_p)]
ffi.lib.LLVMPY_GetHostCPUName.argtypes = [POINTER(c_char_p)]
ffi.lib.LLVMPY_CreateTargetData.argtypes = [c_char_p]
ffi.lib.LLVMPY_CreateTargetData.restype = ffi.LLVMTargetDataRef
ffi.lib.LLVMPY_CopyStringRepOfTargetData.argtypes = [
ffi.LLVMTargetDataRef,
POINTER(c_char_p),
]
ffi.lib.LLVMPY_DisposeTargetData.argtypes = [
ffi.LLVMTargetDataRef,
]
ffi.lib.LLVMPY_AddTargetData.argtypes = [ffi.LLVMTargetDataRef,
ffi.LLVMPassManagerRef]
ffi.lib.LLVMPY_ABISizeOfType.argtypes = [ffi.LLVMTargetDataRef,
ffi.LLVMTypeRef]
ffi.lib.LLVMPY_ABISizeOfType.restype = c_longlong
ffi.lib.LLVMPY_ABISizeOfElementType.argtypes = [ffi.LLVMTargetDataRef,
ffi.LLVMTypeRef]
ffi.lib.LLVMPY_ABISizeOfElementType.restype = c_longlong
ffi.lib.LLVMPY_ABIAlignmentOfElementType.argtypes = [ffi.LLVMTargetDataRef,
ffi.LLVMTypeRef]
ffi.lib.LLVMPY_ABIAlignmentOfElementType.restype = c_longlong
ffi.lib.LLVMPY_GetTargetFromTriple.argtypes = [c_char_p, POINTER(c_char_p)]
ffi.lib.LLVMPY_GetTargetFromTriple.restype = ffi.LLVMTargetRef
ffi.lib.LLVMPY_GetTargetName.argtypes = [ffi.LLVMTargetRef]
ffi.lib.LLVMPY_GetTargetName.restype = c_char_p
ffi.lib.LLVMPY_GetTargetDescription.argtypes = [ffi.LLVMTargetRef]
ffi.lib.LLVMPY_GetTargetDescription.restype = c_char_p
ffi.lib.LLVMPY_CreateTargetMachine.argtypes = [
ffi.LLVMTargetRef,
# Triple
c_char_p,
# CPU
c_char_p,
# Features
c_char_p,
# OptLevel
c_int,
# Reloc
c_char_p,
# CodeModel
c_char_p,
]
ffi.lib.LLVMPY_CreateTargetMachine.restype = ffi.LLVMTargetMachineRef
ffi.lib.LLVMPY_DisposeTargetMachine.argtypes = [ffi.LLVMTargetMachineRef]
ffi.lib.LLVMPY_AddAnalysisPasses.argtypes = [
ffi.LLVMTargetMachineRef,
ffi.LLVMPassManagerRef,
]
ffi.lib.LLVMPY_TargetMachineEmitToMemory.argtypes = [
ffi.LLVMTargetMachineRef,
ffi.LLVMModuleRef,
c_int,
POINTER(c_char_p),
]
ffi.lib.LLVMPY_TargetMachineEmitToMemory.restype = ffi.LLVMMemoryBufferRef
ffi.lib.LLVMPY_GetBufferStart.argtypes = [ffi.LLVMMemoryBufferRef]
ffi.lib.LLVMPY_GetBufferStart.restype = c_void_p
ffi.lib.LLVMPY_GetBufferSize.argtypes = [ffi.LLVMMemoryBufferRef]
ffi.lib.LLVMPY_GetBufferSize.restype = c_size_t
ffi.lib.LLVMPY_DisposeMemoryBuffer.argtypes = [ffi.LLVMMemoryBufferRef]
ffi.lib.LLVMPY_CreateTargetLibraryInfo.argtypes = [c_char_p]
ffi.lib.LLVMPY_CreateTargetLibraryInfo.restype = ffi.LLVMTargetLibraryInfoRef
ffi.lib.LLVMPY_DisposeTargetLibraryInfo.argtypes = [
ffi.LLVMTargetLibraryInfoRef,
]
ffi.lib.LLVMPY_AddTargetLibraryInfo.argtypes = [
ffi.LLVMTargetLibraryInfoRef,
ffi.LLVMPassManagerRef,
]
ffi.lib.LLVMPY_DisableAllBuiltins.argtypes = [
ffi.LLVMTargetLibraryInfoRef,
]
ffi.lib.LLVMPY_GetLibFunc.argtypes = [
ffi.LLVMTargetLibraryInfoRef,
c_char_p,
POINTER(c_int),
]
ffi.lib.LLVMPY_GetLibFunc.restype = c_int
ffi.lib.LLVMPY_SetUnavailableLibFunc.argtypes = [
ffi.LLVMTargetLibraryInfoRef,
c_int,
]
ffi.lib.LLVMPY_GetTargetMachineData.argtypes = [
ffi.LLVMTargetMachineRef,
]
ffi.lib.LLVMPY_GetTargetMachineData.restype = ffi.LLVMTargetDataRef
|
[
"ctypes.c_int",
"ctypes.string_at",
"ctypes.byref",
"collections.namedtuple",
"ctypes.POINTER"
] |
[((8017, 8072), 'collections.namedtuple', 'collections.namedtuple', (['"""LibFunc"""', "['identity', 'name']"], {}), "('LibFunc', ['identity', 'name'])\n", (8039, 8072), False, 'import collections\n'), ((8210, 8227), 'ctypes.POINTER', 'POINTER', (['c_char_p'], {}), '(c_char_p)\n', (8217, 8227), False, 'from ctypes import POINTER, c_char_p, c_longlong, c_int, c_size_t, c_void_p, string_at, byref\n'), ((8272, 8289), 'ctypes.POINTER', 'POINTER', (['c_char_p'], {}), '(c_char_p)\n', (8279, 8289), False, 'from ctypes import POINTER, c_char_p, c_longlong, c_int, c_size_t, c_void_p, string_at, byref\n'), ((8496, 8513), 'ctypes.POINTER', 'POINTER', (['c_char_p'], {}), '(c_char_p)\n', (8503, 8513), False, 'from ctypes import POINTER, c_char_p, c_longlong, c_int, c_size_t, c_void_p, string_at, byref\n'), ((9357, 9374), 'ctypes.POINTER', 'POINTER', (['c_char_p'], {}), '(c_char_p)\n', (9364, 9374), False, 'from ctypes import POINTER, c_char_p, c_longlong, c_int, c_size_t, c_void_p, string_at, byref\n'), ((10282, 10299), 'ctypes.POINTER', 'POINTER', (['c_char_p'], {}), '(c_char_p)\n', (10289, 10299), False, 'from ctypes import POINTER, c_char_p, c_longlong, c_int, c_size_t, c_void_p, string_at, byref\n'), ((11203, 11217), 'ctypes.POINTER', 'POINTER', (['c_int'], {}), '(c_int)\n', (11210, 11217), False, 'from ctypes import POINTER, c_char_p, c_longlong, c_int, c_size_t, c_void_p, string_at, byref\n'), ((7544, 7551), 'ctypes.c_int', 'c_int', ([], {}), '()\n', (7549, 7551), False, 'from ctypes import POINTER, c_char_p, c_longlong, c_int, c_size_t, c_void_p, string_at, byref\n'), ((6365, 6389), 'ctypes.string_at', 'string_at', (['bufptr', 'bufsz'], {}), '(bufptr, bufsz)\n', (6374, 6389), False, 'from ctypes import POINTER, c_char_p, c_longlong, c_int, c_size_t, c_void_p, string_at, byref\n'), ((7662, 7671), 'ctypes.byref', 'byref', (['lf'], {}), '(lf)\n', (7667, 7671), False, 'from ctypes import POINTER, c_char_p, c_longlong, c_int, c_size_t, c_void_p, string_at, byref\n')]
|
#!/usr/bin/env python
import socket
class Socket:
'''from python.org docs
demonstration class only -- coded for clarity, not efficiency
'''
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
def connect(self, host, port):
self.sock.connect((host, port))
def mysend(self, msg):
totalsent = 0
while totalsent < MSGLEN:
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
def myreceive(self):
chunks = []
bytes_recd = 0
while bytes_recd < MSGLEN:
chunk = self.sock.recv(min(MSGLEN - bytes_recd, 2048))
if chunk == '':
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
return ''.join(chunks)
class Subject:
def __init__(self):
self._observers = []
def attach(self, observer):
'''Attach an observer to this subject so it's methods are called when required
in Sam's java this is `Subject.addObserver()`'''
if not observer in self._observers:
self._observers.append(observer)
def detach(self, observer):
'''Dettach an observer from this subject so it's methods are no longer called when the Subject is updated
in Sam's java there is no `Subject.removeObserver()`'''
try:
self._observers.remove(observer)
except ValueError:
pass
def notify(self, modifier=None):
'''Notify observers of a change in state
in Sam's java this is `Subject.notifyObserver()`'''
for observer in self._observers:
if modifier != observer:
observer.update(self)
class Gamer(Subject):
"""
The Gamer class defines methods for both meta-gaming and move selection in a
pre-specified amount of time. The Gamer class is based on the <i>algorithm</i>
design pattern.
"""
match = Match()
roleName = GdlConstant()
def __init__(self):
""" generated source for method __init__ """
super(Gamer, self).__init__()
observers = ArrayList()
# When not playing a match, the variables 'match'
# and 'roleName' should be NULL. This indicates that
# the player is available for starting a new match.
self.match = None
self.roleName = None
# The following values are recommendations to the implementations
# * for the minimum length of time to leave between the stated timeout
# * and when you actually return from metaGame and selectMove. They are
# * stored here so they can be shared amongst all Gamers.
PREFERRED_METAGAME_BUFFER = 3900
PREFERRED_PLAY_BUFFER = 1900
# ==== The Gaming Algorithms ====
def metaGame(self, timeout):
""" generated source for method metaGame """
def selectMove(self, timeout):
""" generated source for method selectMove """
# Note that the match's goal values will not necessarily be known when
# * stop() is called, as we only know the final set of moves and haven't
# * interpreted them yet. To get the final goal values, process the final
# * moves of the game.
#
def stop(self):
""" generated source for method stop """
# Cleanly stop playing the match
def abort(self):
""" generated source for method abort """
# Abruptly stop playing the match
def preview(self, g, timeout):
""" generated source for method preview """
# Preview a game
# ==== Gamer Profile and Configuration ====
def getName(self):
""" generated source for method getName """
def getSpecies(self):
""" generated source for method getSpecies """
return None
def isComputerPlayer(self):
""" generated source for method isComputerPlayer """
return True
def getConfigPanel(self):
""" generated source for method getConfigPanel """
return EmptyConfigPanel()
def getDetailPanel(self):
""" generated source for method getDetailPanel """
return EmptyDetailPanel()
# ==== Accessors ====
def getMatch(self):
""" generated source for method getMatch """
return self.match
def setMatch(self, match):
""" generated source for method setMatch """
self.match = match
def getRoleName(self):
""" generated source for method getRoleName """
return self.roleName
def setRoleName(self, roleName):
""" generated source for method setRoleName """
self.roleName = roleName
class GamePlayer(Thread, Subject):
'''A game-playing `threading.Thread` that listens to a `player.Subject`
The `player.Subject` is associated with a match/game this GamePlayer is playing.
'''
port = int()
gamer = Gamer()
listener = ServerSocket()
observers = List()
def __init__(self, port, gamer):
""" generated source for method __init__ """
super(GamePlayer, self).__init__()
self.observers = ArrayList()
self.listener = None
while self.listener == None:
try:
self.listener = ServerSocket(port)
except IOException as ex:
self.listener = None
port += 1
System.err.println("Failed to start gamer on port: " + (port - 1) + " trying port " + port)
self.port = port
self.gamer = gamer
def getGamerPort(self):
""" generated source for method getGamerPort """
return self.port
def getGamer(self):
""" generated source for method getGamer """
return self.gamer
def run(self):
""" generated source for method run """
while not isInterrupted():
try:
if 0 == len(in_):
raise IOException("Empty message received.")
self.notifyObservers(PlayerReceivedMessageEvent(in_))
GamerLogger.log("GamePlayer", "[Received at " + System.currentTimeMillis() + "] " + in_, GamerLogger.LOG_LEVEL_DATA_DUMP)
HttpWriter.writeAsServer(connection, out)
connection.close()
self.notifyObservers(PlayerSentMessageEvent(out))
GamerLogger.log("GamePlayer", "[Sent at " + System.currentTimeMillis() + "] " + out, GamerLogger.LOG_LEVEL_DATA_DUMP)
except Exception as e:
self.notifyObservers(PlayerDroppedPacketEvent())
# Simple main function that starts a RandomGamer on a specified port.
# It might make sense to factor this out into a separate app sometime,
# so that the GamePlayer class doesn't have to import RandomGamer.
@classmethod
def main(cls, args):
""" generated source for method main """
if len(args):
System.err.println("Usage: GamePlayer <port>")
System.exit(1)
try:
player.run()
except NumberFormatException as e:
System.err.println("Illegal port number: " + args[0])
e.printStackTrace()
System.exit(2)
except IOException as e:
System.err.println("IO Exception: " + e)
e.printStackTrace()
System.exit(3)
if __name__ == '__main__':
import sys
GamePlayer.main(sys.argv)
|
[
"socket.socket"
] |
[((244, 293), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (257, 293), False, 'import socket\n')]
|
'''
Created on 2.10.2011
@author: xaralis
'''
from django import template
from django.conf import settings
from boris.services.models.core import service_list
register = template.Library()
@register.inclusion_tag('services/interface.html')
def render_service_interface(encounter):
return {
'encounter': encounter,
'services_done': encounter.services.all(),
'service_list': service_list(encounter.person)
}
@register.inclusion_tag('services/inc/option.html')
def render_service_option(service, encounter):
return {
'service': service,
'is_default': encounter.person.cast().is_default_service(service)
}
|
[
"boris.services.models.core.service_list",
"django.template.Library"
] |
[((174, 192), 'django.template.Library', 'template.Library', ([], {}), '()\n', (190, 192), False, 'from django import template\n'), ((407, 437), 'boris.services.models.core.service_list', 'service_list', (['encounter.person'], {}), '(encounter.person)\n', (419, 437), False, 'from boris.services.models.core import service_list\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# These modules must be imported in order to load the commands into the ALL_COMMANDS table.
import pyocd.commands.commands
import pyocd.commands.values
from pyocd.commands.base import (
ALL_COMMANDS,
ValueBase,
)
ACCESS_DESC = {
'r': "read-only",
"w": "write-only",
"rw": "read-write",
}
GROUP_DOCS = {
'bringup': "These commands are meant to be used when starting up Commander in no-init mode. They are primarily useful for low-level debugging of debug infrastructure on a new chip.",
'symbols': "These commands require an ELF to be set.",
}
def gen_command(info):
names = info['names']
usage = info['usage']
help = info['help']
extra_help = info.get('extra_help')
print("""<tr><td>""")
name_docs = []
for name in names:
name_docs.append(f"""<a href="#{names[0]}"><tt>{name}</tt></a>""")
print(",\n".join(name_docs))
print("""</td><td>""")
if usage:
print(usage)
print("""</td><td>""")
print(help)
print("""</td></tr>""")
print()
def gen_value(info):
names = info['names']
access = info['access']
help = info['help']
extra_help = info.get('extra_help')
print("""<tr><td>""")
name_docs = []
for name in names:
name_docs.append(f"""<a href="#{names[0]}"><tt>{name}</tt></a>""")
print(",\n".join(name_docs))
print("""</td><td>""")
print(ACCESS_DESC[access])
print("""</td><td>""")
print(help)
print("""</td></tr>""")
print()
def build_categories(commands):
categories = {}
for cmdlist in commands.values():
for cmd in cmdlist:
categories.setdefault(cmd.INFO['category'], []).append(cmd)
return categories
def gen_cmd_groups(commands):
categories = build_categories(commands)
for group in sorted(categories.keys()):
print(f"""<tr><td colspan="3"><b>{group.capitalize()}</b></tr>
""")
group_cmds = sorted(categories[group], key=lambda c: c.INFO['names'][0])
for cmd in group_cmds:
gen_command(cmd.INFO)
def gen_value_groups(commands):
for group in sorted(commands.keys()):
# print(f"""<tr><td colspan="3"><b>{group.capitalize()}</b></td></tr>""")
group_cmds = sorted(commands[group], key=lambda c: c.INFO['names'][0])
for cmd in group_cmds:
gen_value(cmd.INFO)
def gen_command_docs(commands):
nl = "\\"
categories = build_categories(commands)
for group in sorted(categories.keys()):
group_docs = GROUP_DOCS.get(group, '')
print(f"""
### {group.capitalize()}""")
if group_docs:
print(group_docs)
group_cmds = sorted(categories[group], key=lambda c: c.INFO['names'][0])
for cmd in group_cmds:
info = cmd.INFO
print(f"""
##### `{info['names'][0]}`
""")
if len(info['names']) > 1:
print(f"""**Aliases**: {', '.join("`%s`" % n for n in info['names'][1:])} """ + nl)
print(f"""**Usage**: {info['usage']} {nl}
{info['help']} {info.get('extra_help', '')}
""")
def get_all_command_classes():
klasses = set()
for cmds in ALL_COMMANDS.values():
klasses.update(cmds)
return klasses
def split_into_commands_and_values():
commands = get_all_command_classes()
value_classes = {klass for klass in commands if issubclass(klass, ValueBase)}
cmd_classes = commands - value_classes
cmd_groups = {}
value_groups = {}
for cmd in cmd_classes:
cmd_groups.setdefault(cmd.INFO['group'], set()).add(cmd)
for val in value_classes:
value_groups.setdefault(val.INFO['group'], set()).add(val)
return cmd_groups, value_groups
def main():
all_cmds_by_group, all_values_by_group = split_into_commands_and_values()
print("""
All commands
------------
<table>
<tr><th>Command</th><th>Arguments</th><th>Description</th></tr>
""")
gen_cmd_groups(all_cmds_by_group)
print("""
</table>
""")
print("""
All values
----------
Values represent a setting or piece of information that can be read and/or changed. They are accessed with
the [`show`](#show) and [`set`](#set) commands. The "Access" column of the table below shows whether the
command can be read, written, or both.
<table>
<tr><th>Value</th><th>Access</th><th>Description</th></tr>
""")
gen_value_groups(all_values_by_group)
print("""
</table>
""")
print("""
Commands
--------""")
gen_command_docs(all_cmds_by_group)
if __name__ == '__main__':
main()
|
[
"pyocd.commands.base.ALL_COMMANDS.values"
] |
[((3830, 3851), 'pyocd.commands.base.ALL_COMMANDS.values', 'ALL_COMMANDS.values', ([], {}), '()\n', (3849, 3851), False, 'from pyocd.commands.base import ALL_COMMANDS, ValueBase\n')]
|
# Copyright (c) 2011, <NAME>, <NAME>, TU Darmstadt
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the TU Darmstadt nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import traceback
from python_qt_binding.QtCore import qCritical, qDebug, QObject, Qt, qWarning, Signal, Slot
from python_qt_binding.QtWidgets import QDockWidget, QToolBar
from qt_gui.dock_widget import DockWidget
from qt_gui.dock_widget_title_bar import DockWidgetTitleBar
from qt_gui.icon_loader import get_icon
from qt_gui.window_changed_signaler import WindowChangedSignaler
class PluginHandler(QObject):
"""
Base class for the bidirectional exchange between the framework and one `Plugin` instance.
It utilizes a `PluginProvider` to load/unload the plugin and provides callbacks for the
`PluginContext`.
"""
label_updated = Signal(str, str)
close_signal = Signal(str)
reload_signal = Signal(str)
help_signal = Signal(str)
_defered_check_close = Signal()
def __init__(self, parent, main_window, instance_id, application_context, container_manager,
argv=None):
super(PluginHandler, self).__init__(parent)
self.setObjectName('PluginHandler')
self._main_window = main_window
self._instance_id = instance_id
self._application_context = application_context
self._container_manager = container_manager
self._argv = argv if argv else []
self._minimized_dock_widgets_toolbar = None
self._plugin_descriptor = None
self._defered_check_close.connect(self._check_close, Qt.QueuedConnection)
self._plugin_provider = None
self.__callback = None
self.__instance_settings = None
self._plugin_has_configuration = False
# mapping of added widgets to their parent dock widget and WindowChangedSignaler
self._widgets = {}
self._toolbars = []
def instance_id(self):
return self._instance_id
def argv(self):
return self._argv
def set_minimized_dock_widgets_toolbar(self, toolbar):
self._minimized_dock_widgets_toolbar = toolbar
def set_plugin_descriptor(self, plugin_descriptor):
self._plugin_descriptor = plugin_descriptor
def load(self, plugin_provider, callback=None):
"""
Load plugin.
Completion is signaled asynchronously if a callback is passed.
"""
self._plugin_provider = plugin_provider
self.__callback = callback
try:
self._load()
except Exception as e:
self._emit_load_completed(e)
def _load(self):
raise NotImplementedError
def _emit_load_completed(self, exception=None):
if exception is not None:
self._garbage_widgets_and_toolbars()
if self.__callback is not None:
callback = self.__callback
self.__callback = None
callback(self, exception)
elif exception is not None:
qCritical('PluginHandler.load() failed%s' %
(':\n%s' % str(exception) if not exception else ''))
def _garbage_widgets_and_toolbars(self):
for widget in list(self._widgets.keys()):
self.remove_widget(widget)
self._delete_widget(widget)
for toolbar in list(self._toolbars):
self.remove_toolbar(toolbar)
self._delete_toolbar(toolbar)
def shutdown_plugin(self, callback):
"""
Shut down the plugin and remove all added widgets.
Completion is signaled asynchronously if a callback is passed.
"""
self.__callback = callback
try:
self._shutdown_plugin()
except Exception:
qCritical('PluginHandler.shutdown_plugin() plugin "%s" raised an exception:\n%s' %
(str(self._instance_id), traceback.format_exc()))
self.emit_shutdown_plugin_completed()
def _shutdown_plugin(self):
raise NotImplementedError
def emit_shutdown_plugin_completed(self):
self._garbage_widgets_and_toolbars()
if self.__callback is not None:
callback = self.__callback
self.__callback = None
callback(self._instance_id)
def _delete_widget(self, widget):
widget.deleteLater()
def _delete_toolbar(self, toolbar):
toolbar.deleteLater()
def unload(self, callback=None):
"""
Unload plugin.
Completion is signaled asynchronously if a callback is passed.
"""
self.__callback = callback
try:
self._unload()
except Exception:
qCritical('PluginHandler.unload() plugin "%s" raised an exception:\n%s' %
(str(self._instance_id), traceback.format_exc()))
self._emit_unload_completed()
def _unload(self):
raise NotImplementedError
def _emit_unload_completed(self):
if self.__callback is not None:
callback = self.__callback
self.__callback = None
callback(self._instance_id)
def save_settings(self, plugin_settings, instance_settings, callback=None):
"""
Save settings of the plugin and all dock widget title bars.
Completion is signaled asynchronously if a callback is passed.
"""
qDebug('PluginHandler.save_settings()')
self.__instance_settings = instance_settings
self.__callback = callback
try:
self._save_settings(plugin_settings, instance_settings)
except Exception:
qCritical('PluginHandler.save_settings() plugin "%s" raised an exception:\n%s' %
(str(self._instance_id), traceback.format_exc()))
self.emit_save_settings_completed()
def _save_settings(self, plugin_settings, instance_settings):
raise NotImplementedError
def emit_save_settings_completed(self):
qDebug('PluginHandler.emit_save_settings_completed()')
self._call_method_on_all_dock_widgets('save_settings', self.__instance_settings)
self.__instance_settings = None
if self.__callback is not None:
callback = self.__callback
self.__callback = None
callback(self._instance_id)
def _call_method_on_all_dock_widgets(self, method_name, instance_settings):
for dock_widget, _, _ in self._widgets.values():
name = 'dock_widget' + \
dock_widget.objectName().replace(self._instance_id.tidy_str(), '', 1)
settings = instance_settings.get_settings(name)
method = getattr(dock_widget, method_name)
try:
method(settings)
except Exception:
qCritical('PluginHandler._call_method_on_all_dock_widgets(%s) failed:\n%s' %
(method_name, traceback.format_exc()))
def restore_settings(self, plugin_settings, instance_settings, callback=None):
"""
Restore settings of the plugin and all dock widget title bars.
Completion is signaled asynchronously if a callback is passed.
"""
qDebug('PluginHandler.restore_settings()')
self.__instance_settings = instance_settings
self.__callback = callback
try:
self._restore_settings(plugin_settings, instance_settings)
except Exception:
qCritical('PluginHandler.restore_settings() plugin "%s" raised an exception:\n%s' %
(str(self._instance_id), traceback.format_exc()))
self.emit_restore_settings_completed()
def _restore_settings(self, plugin_settings, instance_settings):
raise NotImplementedError
def emit_restore_settings_completed(self):
qDebug('PluginHandler.emit_restore_settings_completed()')
# call after plugin has restored settings as it may spawn additional dock widgets
self._call_method_on_all_dock_widgets('restore_settings', self.__instance_settings)
self.__instance_settings = None
if self.__callback is not None:
callback = self.__callback
self.__callback = None
callback(self._instance_id)
def _create_dock_widget(self):
dock_widget = DockWidget(self._container_manager)
self._update_dock_widget_features(dock_widget)
self._update_title_bar(dock_widget)
self._set_window_icon(dock_widget)
return dock_widget
def _update_dock_widget_features(self, dock_widget):
if self._application_context.options.lock_perspective or \
self._application_context.options.standalone_plugin:
# dock widgets are not closable when perspective is locked or plugin is
# running standalone
features = dock_widget.features()
dock_widget.setFeatures(features ^ QDockWidget.DockWidgetClosable)
if self._application_context.options.freeze_layout:
# dock widgets are not closable when perspective is locked or plugin is
# running standalone
features = dock_widget.features()
dock_widget.setFeatures(
features ^ (QDockWidget.DockWidgetMovable | QDockWidget.DockWidgetFloatable))
def _update_title_bar(self, dock_widget, hide_help=False, hide_reload=False):
title_bar = dock_widget.titleBarWidget()
if title_bar is None:
title_bar = DockWidgetTitleBar(
dock_widget, self._application_context.qtgui_path,
hide_title=self._application_context.options.hide_title)
dock_widget.setTitleBarWidget(title_bar)
# connect extra buttons
title_bar.connect_close_button(self._remove_widget_by_dock_widget)
title_bar.connect_button('help', self._emit_help_signal)
if hide_help:
title_bar.show_button('help', not hide_help)
title_bar.connect_button('reload', self._emit_reload_signal)
if hide_reload:
title_bar.show_button('reload', not hide_reload)
title_bar.connect_button('configuration', self._trigger_configuration)
title_bar.show_button('configuration', self._plugin_has_configuration)
def _set_window_icon(self, widget):
if self._plugin_descriptor:
action_attributes = self._plugin_descriptor.action_attributes()
if 'icon' in action_attributes and action_attributes['icon'] is not None:
base_path = self._plugin_descriptor.attributes().get('plugin_path')
icon = get_icon(
action_attributes['icon'], action_attributes.get('icontype', None), base_path)
widget.setWindowIcon(icon)
def _update_title_bars(self):
if self._plugin_has_configuration:
for dock_widget, _, _ in self._widgets.values():
title_bar = dock_widget.titleBarWidget()
title_bar.show_button('configuration')
def _remove_widget_by_dock_widget(self, dock_widget):
widget = [key for key, value in self._widgets.items() if value[0] == dock_widget][0]
self.remove_widget(widget)
def _emit_help_signal(self):
self.help_signal.emit(str(self._instance_id))
def _emit_reload_signal(self):
self.reload_signal.emit(str(self._instance_id))
def _trigger_configuration(self):
self._plugin.trigger_configuration()
def _add_dock_widget(self, dock_widget, widget):
dock_widget.setWidget(widget)
# every dock widget needs a unique name for save/restore geometry/state to work
dock_widget.setObjectName(self._instance_id.tidy_str() + '__' + widget.objectName())
self._add_dock_widget_to_main_window(dock_widget)
signaler = WindowChangedSignaler(widget, widget)
signaler.window_icon_changed_signal.connect(self._on_widget_icon_changed)
signaler.window_title_changed_signal.connect(self._on_widget_title_changed)
signaler2 = WindowChangedSignaler(dock_widget, dock_widget)
signaler2.hide_signal.connect(self._on_dock_widget_hide)
signaler2.show_signal.connect(self._on_dock_widget_show)
signaler2.window_title_changed_signal.connect(self._on_dock_widget_title_changed)
self._widgets[widget] = [dock_widget, signaler, signaler2]
# trigger to update initial window icon and title
signaler.emit_all()
# trigger to update initial window state
signaler2.emit_all()
def _add_dock_widget_to_main_window(self, dock_widget):
if self._main_window is not None:
# warn about dock_widget with same object name
old_dock_widget = self._main_window.findChild(DockWidget, dock_widget.objectName())
if old_dock_widget is not None:
qWarning('PluginHandler._add_dock_widget_to_main_window() duplicate object name ' +
'"%s", assign unique object names before adding widgets!' %
dock_widget.objectName())
self._main_window.addDockWidget(Qt.BottomDockWidgetArea, dock_widget)
def _on_widget_icon_changed(self, widget):
dock_widget, _, _ = self._widgets[widget]
dock_widget.setWindowIcon(widget.windowIcon())
def _on_widget_title_changed(self, widget):
dock_widget, _, _ = self._widgets[widget]
dock_widget.setWindowTitle(widget.windowTitle())
def _on_dock_widget_hide(self, dock_widget):
if self._minimized_dock_widgets_toolbar:
self._minimized_dock_widgets_toolbar.addDockWidget(dock_widget)
def _on_dock_widget_show(self, dock_widget):
if self._minimized_dock_widgets_toolbar:
self._minimized_dock_widgets_toolbar.removeDockWidget(dock_widget)
def _on_dock_widget_title_changed(self, dock_widget):
self.label_updated.emit(str(self._instance_id), dock_widget.windowTitle())
# pointer to QWidget must be used for PySide to work (at least with 1.0.1)
@Slot('QWidget*')
def remove_widget(self, widget):
dock_widget, signaler, signaler2 = self._widgets[widget]
self._widgets.pop(widget)
if signaler is not None:
signaler.window_icon_changed_signal.disconnect(self._on_widget_icon_changed)
signaler.window_title_changed_signal.disconnect(self._on_widget_title_changed)
if signaler2 is not None:
# emit show signal to remove dock widget from minimized toolbar before removal
signaler2.show_signal.emit(dock_widget)
signaler2.hide_signal.disconnect(self._on_dock_widget_hide)
signaler2.show_signal.disconnect(self._on_dock_widget_show)
# remove dock widget from parent and delete later
if self._main_window is not None:
dock_widget.parent().removeDockWidget(dock_widget)
# do not delete the widget, only the dock widget
dock_widget.setParent(None)
widget.setParent(None)
dock_widget.deleteLater()
# defer check for last widget closed to give plugin a chance to add
# another widget right away
self._defered_check_close.emit()
def _add_toolbar(self, toolbar):
# every toolbar needs a unique name for save/restore geometry/state to work
toolbar_object_name = toolbar.objectName()
prefix = self._instance_id.tidy_str() + '__'
# when added, removed and readded the prefix should not be prepended multiple times
if not toolbar_object_name.startswith(prefix):
toolbar_object_name = prefix + toolbar_object_name
toolbar.setObjectName(toolbar_object_name)
if self._application_context.options.freeze_layout:
toolbar.setMovable(False)
self._toolbars.append(toolbar)
if self._main_window is not None:
# warn about toolbar with same object name
old_toolbar = self._main_window.findChild(QToolBar, toolbar.objectName())
if old_toolbar is not None:
qWarning('PluginHandler._add_toolbar() duplicate object name "%s", '
'assign unique object names before adding toolbars!' %
toolbar.objectName())
self._main_window.addToolBar(Qt.TopToolBarArea, toolbar)
# pointer to QToolBar must be used for PySide to work (at least with 1.0.1)
@Slot('QToolBar*')
def remove_toolbar(self, toolbar):
self._toolbars.remove(toolbar)
# detach toolbar from parent
if toolbar.parent():
toolbar.parent().removeToolBar(toolbar)
# defer check for last widget closed to give plugin a chance to add
# another widget right away
self._defered_check_close.emit()
def _check_close(self):
# close plugin when no widgets or toolbars are left
if len(self._widgets) + len(self._toolbars) == 0:
self._emit_close_plugin()
def _emit_close_plugin(self):
self.close_signal.emit(str(self._instance_id))
|
[
"python_qt_binding.QtCore.Slot",
"python_qt_binding.QtCore.Signal",
"python_qt_binding.QtCore.qDebug",
"qt_gui.window_changed_signaler.WindowChangedSignaler",
"qt_gui.dock_widget_title_bar.DockWidgetTitleBar",
"traceback.format_exc",
"qt_gui.dock_widget.DockWidget"
] |
[((2234, 2250), 'python_qt_binding.QtCore.Signal', 'Signal', (['str', 'str'], {}), '(str, str)\n', (2240, 2250), False, 'from python_qt_binding.QtCore import qCritical, qDebug, QObject, Qt, qWarning, Signal, Slot\n'), ((2270, 2281), 'python_qt_binding.QtCore.Signal', 'Signal', (['str'], {}), '(str)\n', (2276, 2281), False, 'from python_qt_binding.QtCore import qCritical, qDebug, QObject, Qt, qWarning, Signal, Slot\n'), ((2302, 2313), 'python_qt_binding.QtCore.Signal', 'Signal', (['str'], {}), '(str)\n', (2308, 2313), False, 'from python_qt_binding.QtCore import qCritical, qDebug, QObject, Qt, qWarning, Signal, Slot\n'), ((2332, 2343), 'python_qt_binding.QtCore.Signal', 'Signal', (['str'], {}), '(str)\n', (2338, 2343), False, 'from python_qt_binding.QtCore import qCritical, qDebug, QObject, Qt, qWarning, Signal, Slot\n'), ((2371, 2379), 'python_qt_binding.QtCore.Signal', 'Signal', ([], {}), '()\n', (2377, 2379), False, 'from python_qt_binding.QtCore import qCritical, qDebug, QObject, Qt, qWarning, Signal, Slot\n'), ((15463, 15479), 'python_qt_binding.QtCore.Slot', 'Slot', (['"""QWidget*"""'], {}), "('QWidget*')\n", (15467, 15479), False, 'from python_qt_binding.QtCore import qCritical, qDebug, QObject, Qt, qWarning, Signal, Slot\n'), ((17840, 17857), 'python_qt_binding.QtCore.Slot', 'Slot', (['"""QToolBar*"""'], {}), "('QToolBar*')\n", (17844, 17857), False, 'from python_qt_binding.QtCore import qCritical, qDebug, QObject, Qt, qWarning, Signal, Slot\n'), ((6757, 6796), 'python_qt_binding.QtCore.qDebug', 'qDebug', (['"""PluginHandler.save_settings()"""'], {}), "('PluginHandler.save_settings()')\n", (6763, 6796), False, 'from python_qt_binding.QtCore import qCritical, qDebug, QObject, Qt, qWarning, Signal, Slot\n'), ((7359, 7413), 'python_qt_binding.QtCore.qDebug', 'qDebug', (['"""PluginHandler.emit_save_settings_completed()"""'], {}), "('PluginHandler.emit_save_settings_completed()')\n", (7365, 7413), False, 'from python_qt_binding.QtCore import qCritical, qDebug, QObject, Qt, qWarning, Signal, Slot\n'), ((8570, 8612), 'python_qt_binding.QtCore.qDebug', 'qDebug', (['"""PluginHandler.restore_settings()"""'], {}), "('PluginHandler.restore_settings()')\n", (8576, 8612), False, 'from python_qt_binding.QtCore import qCritical, qDebug, QObject, Qt, qWarning, Signal, Slot\n'), ((9190, 9247), 'python_qt_binding.QtCore.qDebug', 'qDebug', (['"""PluginHandler.emit_restore_settings_completed()"""'], {}), "('PluginHandler.emit_restore_settings_completed()')\n", (9196, 9247), False, 'from python_qt_binding.QtCore import qCritical, qDebug, QObject, Qt, qWarning, Signal, Slot\n'), ((9682, 9717), 'qt_gui.dock_widget.DockWidget', 'DockWidget', (['self._container_manager'], {}), '(self._container_manager)\n', (9692, 9717), False, 'from qt_gui.dock_widget import DockWidget\n'), ((13230, 13267), 'qt_gui.window_changed_signaler.WindowChangedSignaler', 'WindowChangedSignaler', (['widget', 'widget'], {}), '(widget, widget)\n', (13251, 13267), False, 'from qt_gui.window_changed_signaler import WindowChangedSignaler\n'), ((13454, 13501), 'qt_gui.window_changed_signaler.WindowChangedSignaler', 'WindowChangedSignaler', (['dock_widget', 'dock_widget'], {}), '(dock_widget, dock_widget)\n', (13475, 13501), False, 'from qt_gui.window_changed_signaler import WindowChangedSignaler\n'), ((10863, 10993), 'qt_gui.dock_widget_title_bar.DockWidgetTitleBar', 'DockWidgetTitleBar', (['dock_widget', 'self._application_context.qtgui_path'], {'hide_title': 'self._application_context.options.hide_title'}), '(dock_widget, self._application_context.qtgui_path,\n hide_title=self._application_context.options.hide_title)\n', (10881, 10993), False, 'from qt_gui.dock_widget_title_bar import DockWidgetTitleBar\n'), ((5268, 5290), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5288, 5290), False, 'import traceback\n'), ((6186, 6208), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6206, 6208), False, 'import traceback\n'), ((7132, 7154), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7152, 7154), False, 'import traceback\n'), ((8954, 8976), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8974, 8976), False, 'import traceback\n'), ((8286, 8308), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8306, 8308), False, 'import traceback\n')]
|
import sys
import pickle
import numpy as np
sys.path.append('./../')
sys.path.append('./../../')
from src.LocalGlobalAttentionModel.model import Model as parent_model
from .vel_param import VelParam as vel_param
from src.HMC.hmc import HMC
class Model(parent_model):
"""
This class describes a model where fixations are chosen from the static saliency
convolved with a Gaussian.
p(z_t|z_{t-1}) = s(t) * n(z_t|z_{t-1}, xi)
"""
def __init__(self, saliencies, xi):
super().__init__(saliencies)
self.xi = xi
self.gammas = None
def get_next_fix(self, im_ind, sub_ind, prev_fix, cur_fix, s_t):
"""
This method samples the next fixation given the current fixation from
p(z_t|z_{t-1}) = s(t) * n(z_t|z_{t-1}, xi).
It includes
:param im_ind: index of the current image
:param sub_ind:
:param prev_fix:
:param cur_fix: coordinates of the current fixation
:param s_t:
:return: [z_x, z_y] coordinates of the next fixation location.
"""
xi_val = self.xi.value
mean = cur_fix
rad_rows = (self.rows_grid - mean[0]) ** 2
rad_cols = (self.cols_grid - mean[1]) ** 2
# normal distribution over the entire image
gauss = np.exp(- rad_rows / (2 * xi_val[0]) - rad_cols / (2 * xi_val[1])) / \
(2 * np.pi * np.sqrt(xi_val[0] * xi_val[1]))
prob = gauss * self.saliencies[im_ind]
prob /= prob.sum()
# chose a pixel in the image from the distribution defined above
inds = np.random.choice(range(self.pixels_num), 1,
p=prob.flatten()) # choice uses the inverse transform method in 1d
next_fix = np.unravel_index(inds, self.saliencies[im_ind].shape)
next_fix = np.array([next_fix[0][0], next_fix[1][0]])
return next_fix, 0
def generate_gammas(self):
"""
In this model gamma = 1 for each data point.
"""
self.gammas = []
for i in range(len(self.fix_dists_2)):
self.gammas.append([])
for s in range(len(self.fix_dists_2[i])):
self.gammas[-1].append(np.zeros(self.fix_dists_2[i][s].shape[1]))
def sample(self, num_samples, save_steps, file_path):
"""
This methods generates samples from the posterior distribution of xi.
Since there is no explicit form for the posterior distribution of xi an HMC sampler is used.
See paper for further information.
:param num_samples: number of sampled to be generated.
:param save_steps: whether to save the chain
:param file_path: path where to save the chain
:return: list of length num_samples with samples of xi
"""
if not self.gammas:
self.generate_gammas()
vel = vel_param([0.1, 0.1])
delta = 1.5
n = 10
m = num_samples
# initiate an HMC instance
hmc = HMC(self.xi, vel, delta, n, m)
gammas_xi = [[self.gammas[i][s].copy() - 1] for i in range(len(self.gammas)) for s in
range(len(self.gammas[i]))]
# perform the sampling
hmc.HMC(gammas_xi, self.saliencies, self.fix_dists_2, self.dist_mat_per_fix)
samples_xi = hmc.get_samples()
if save_steps:
with open(file_path, 'wb') as f:
pickle.dump([samples_xi], f)
return samples_xi
def calc_prob_local(self, *args):
"""
This method calculates the probability of a local step which is always 0 in the case of this model.
:return: 0
"""
return 0
def calc_prob_global(self, im_ind, fixs_dists_2, sal_ts, fixs, for_nss=False):
"""
This method calculates the probability of a global step according to the local saliency model,
for an entire scanpath.
p(z_t|z_{t-1}) = s(z_t) * n(z_t|z_{t-1}, xi)
:param im_ind: index of the image
:param fixs_dists_2: an array of shape 3 x (T -1). see set_fix_dist_2 for description.
:param sal_ts: time series of the saliency value for each fixation. Array of length T.
:param fixs: fixation locations. Array of shape 2 x T
:param for_nss: whether to standerize the density for NSS or not.
:return: array of length T with the probability of each fixation
"""
xi = self.xi.value
radx = (self.rows_grid[:, :, np.newaxis] - fixs[im_ind][0][0, :-1]) ** 2
rady = (self.cols_grid[:, :, np.newaxis] - fixs[im_ind][0][1, :-1]) ** 2
gauss = np.exp(- radx / (2 * xi[0]) - rady / (2 * xi[1])) / (2 * np.pi * np.sqrt(xi[0] * xi[1]))
prob_all_pixels = gauss * self.saliencies[im_ind][:, :, np.newaxis]
if for_nss:
prob_global = prob_all_pixels / prob_all_pixels.sum(axis=(0, 1))
else:
# we assume here just one subject
sub = 0
X = fixs_dists_2[im_ind][sub]
nominator_gauss = np.exp(- 0.5 * X[0] / xi[0] - 0.5 * X[1] / xi[1]) / \
(2 * np.pi * np.sqrt(xi[0] * xi[1]))
nominator = nominator_gauss * sal_ts[im_ind][0][1:]
prob_global = nominator / prob_all_pixels.sum(axis=(0, 1))
return prob_global
def calc_ros(self, *args):
"""
This methods calculates the probability of a local step. In this model it is always 0.
:return: 0
"""
return 0
|
[
"sys.path.append",
"pickle.dump",
"numpy.zeros",
"numpy.unravel_index",
"numpy.array",
"numpy.exp",
"numpy.sqrt",
"src.HMC.hmc.HMC"
] |
[((46, 70), 'sys.path.append', 'sys.path.append', (['"""./../"""'], {}), "('./../')\n", (61, 70), False, 'import sys\n'), ((71, 98), 'sys.path.append', 'sys.path.append', (['"""./../../"""'], {}), "('./../../')\n", (86, 98), False, 'import sys\n'), ((1755, 1808), 'numpy.unravel_index', 'np.unravel_index', (['inds', 'self.saliencies[im_ind].shape'], {}), '(inds, self.saliencies[im_ind].shape)\n', (1771, 1808), True, 'import numpy as np\n'), ((1828, 1870), 'numpy.array', 'np.array', (['[next_fix[0][0], next_fix[1][0]]'], {}), '([next_fix[0][0], next_fix[1][0]])\n', (1836, 1870), True, 'import numpy as np\n'), ((2998, 3028), 'src.HMC.hmc.HMC', 'HMC', (['self.xi', 'vel', 'delta', 'n', 'm'], {}), '(self.xi, vel, delta, n, m)\n', (3001, 3028), False, 'from src.HMC.hmc import HMC\n'), ((1297, 1361), 'numpy.exp', 'np.exp', (['(-rad_rows / (2 * xi_val[0]) - rad_cols / (2 * xi_val[1]))'], {}), '(-rad_rows / (2 * xi_val[0]) - rad_cols / (2 * xi_val[1]))\n', (1303, 1361), True, 'import numpy as np\n'), ((4621, 4669), 'numpy.exp', 'np.exp', (['(-radx / (2 * xi[0]) - rady / (2 * xi[1]))'], {}), '(-radx / (2 * xi[0]) - rady / (2 * xi[1]))\n', (4627, 4669), True, 'import numpy as np\n'), ((1396, 1426), 'numpy.sqrt', 'np.sqrt', (['(xi_val[0] * xi_val[1])'], {}), '(xi_val[0] * xi_val[1])\n', (1403, 1426), True, 'import numpy as np\n'), ((3414, 3442), 'pickle.dump', 'pickle.dump', (['[samples_xi]', 'f'], {}), '([samples_xi], f)\n', (3425, 3442), False, 'import pickle\n'), ((4686, 4708), 'numpy.sqrt', 'np.sqrt', (['(xi[0] * xi[1])'], {}), '(xi[0] * xi[1])\n', (4693, 4708), True, 'import numpy as np\n'), ((5037, 5085), 'numpy.exp', 'np.exp', (['(-0.5 * X[0] / xi[0] - 0.5 * X[1] / xi[1])'], {}), '(-0.5 * X[0] / xi[0] - 0.5 * X[1] / xi[1])\n', (5043, 5085), True, 'import numpy as np\n'), ((2208, 2249), 'numpy.zeros', 'np.zeros', (['self.fix_dists_2[i][s].shape[1]'], {}), '(self.fix_dists_2[i][s].shape[1])\n', (2216, 2249), True, 'import numpy as np\n'), ((5134, 5156), 'numpy.sqrt', 'np.sqrt', (['(xi[0] * xi[1])'], {}), '(xi[0] * xi[1])\n', (5141, 5156), True, 'import numpy as np\n')]
|
import copy
import os
from datetime import datetime
import dialogic
import attr
from dialogic.cascade import Cascade, Pr, DialogTurn
from dialogic.dialog import Context, Response
from dialogic.dialog_manager import TurnDialogManager
csc = Cascade()
@attr.s
class PTurn(DialogTurn):
forms_collection = attr.ib(default=None)
polylogs_collection = attr.ib(default=None)
no_response: bool = attr.ib(default=False)
upload_filename: str = attr.ib(default=None)
class PleyadeDM(TurnDialogManager):
def __init__(self, *args, forms_collection=None, polylogs_collection=None, **kwargs):
super(PleyadeDM, self).__init__(*args, **kwargs)
self.polylogs_collection = polylogs_collection
self.forms_collection = forms_collection
def preprocess_turn(self, turn: PTurn):
if not turn.user_object:
turn.user_object = {}
# turn.stage = None # the old stage will be left intact
turn.polylogs_collection = self.polylogs_collection
turn.forms_collection = self.forms_collection
def postprocess_response(self, response: Response, turn: PTurn):
response.no_response = turn.no_response
response.updated_user_object['last_time'] = str(datetime.now())
# todo: add filename to response
class FFDM(dialogic.dialog_manager.FormFillingDialogManager):
def __init__(self, *args, forms_collection=None, **kwargs):
super(FFDM, self).__init__(*args, **kwargs)
self.forms_collection = forms_collection
def handle_completed_form(self, form, user_object, ctx: Context):
document = copy.deepcopy(form)
document['user_id'] = ctx.user_id
document['timestamp'] = datetime.now()
if self.forms_collection:
self.forms_collection.insert_one(document)
return Response(
text=self.config.finish_message,
user_object=user_object,
suggests=self.config.finish_suggests or [],
)
forms_root = 'data/forms'
form_dms = [
FFDM(os.path.join(forms_root, fn))
for fn in os.listdir(forms_root)
]
@csc.add_handler(priority=Pr.STAGE)
def try_forms(turn: DialogTurn):
for dm in form_dms:
form_response = dm.try_to_respond(turn.ctx)
if form_response:
turn.response = form_response
return
def make_dm(forms_collection=None, polylogs_collection=None) -> PleyadeDM:
dm = PleyadeDM(
csc,
turn_cls=PTurn,
polylogs_collection=polylogs_collection,
forms_collection=forms_collection,
)
for m in form_dms:
m.forms_collection = forms_collection
return dm
|
[
"copy.deepcopy",
"os.path.join",
"dialogic.dialog.Response",
"attr.ib",
"datetime.datetime.now",
"dialogic.cascade.Cascade",
"os.listdir"
] |
[((242, 251), 'dialogic.cascade.Cascade', 'Cascade', ([], {}), '()\n', (249, 251), False, 'from dialogic.cascade import Cascade, Pr, DialogTurn\n'), ((310, 331), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (317, 331), False, 'import attr\n'), ((358, 379), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (365, 379), False, 'import attr\n'), ((404, 426), 'attr.ib', 'attr.ib', ([], {'default': '(False)'}), '(default=False)\n', (411, 426), False, 'import attr\n'), ((454, 475), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (461, 475), False, 'import attr\n'), ((1606, 1625), 'copy.deepcopy', 'copy.deepcopy', (['form'], {}), '(form)\n', (1619, 1625), False, 'import copy\n'), ((1700, 1714), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1712, 1714), False, 'from datetime import datetime\n'), ((1820, 1935), 'dialogic.dialog.Response', 'Response', ([], {'text': 'self.config.finish_message', 'user_object': 'user_object', 'suggests': '(self.config.finish_suggests or [])'}), '(text=self.config.finish_message, user_object=user_object, suggests\n =self.config.finish_suggests or [])\n', (1828, 1935), False, 'from dialogic.dialog import Context, Response\n'), ((2029, 2057), 'os.path.join', 'os.path.join', (['forms_root', 'fn'], {}), '(forms_root, fn)\n', (2041, 2057), False, 'import os\n'), ((2073, 2095), 'os.listdir', 'os.listdir', (['forms_root'], {}), '(forms_root)\n', (2083, 2095), False, 'import os\n'), ((1230, 1244), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1242, 1244), False, 'from datetime import datetime\n')]
|
import torch
import torch.nn as nn
import numpy as np
from itertools import combinations
import torch.nn.functional as F
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def cal_l2(x, y):
return torch.pow((x - y), 2).sum(-1).sum()
class ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
"""
def __init__(self, margin):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-9
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def forward(self, f_dic, B, N, size_average=True):
out = torch.zeros(1).to(self.device)
# Postive Samples Within Group Loss
# Assume the size of each feature is (B x N)
for kk in f_dic.keys():
# pdb.set_trace()
mat = f_dic[kk]
L = mat.size(0)
if L != 1:
mat_dup = mat.unsqueeze(0).expand(L, L, N)
batch_dup = mat.unsqueeze(1).expand(L, L, N)
distances = (mat_dup - batch_dup).pow(2).sum(dim=-1).sum()
out += (0.5 * distances / 6)
if len(f_dic) == 1:
pass
else:
for k1, k2 in list(combinations(f_dic, 2)):
b1 = len(f_dic[k1])
b2 = len(f_dic[k2])
for bb in range(b2):
# pdb.set_trace()
distances = cal_l2(f_dic[k1], f_dic[k2][bb].unsqueeze(0).expand(b1, N))/(b1+b2)
out += (0.5 * F.relu(self.margin - (distances + self.eps)).pow(2))
return out
|
[
"itertools.combinations",
"torch.cuda.is_available",
"numpy.exp",
"torch.pow",
"torch.nn.functional.relu",
"torch.zeros"
] |
[((159, 169), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (165, 169), True, 'import numpy as np\n'), ((607, 632), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (630, 632), False, 'import torch\n'), ((724, 738), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (735, 738), False, 'import torch\n'), ((1333, 1355), 'itertools.combinations', 'combinations', (['f_dic', '(2)'], {}), '(f_dic, 2)\n', (1345, 1355), False, 'from itertools import combinations\n'), ((201, 220), 'torch.pow', 'torch.pow', (['(x - y)', '(2)'], {}), '(x - y, 2)\n', (210, 220), False, 'import torch\n'), ((1639, 1683), 'torch.nn.functional.relu', 'F.relu', (['(self.margin - (distances + self.eps))'], {}), '(self.margin - (distances + self.eps))\n', (1645, 1683), True, 'import torch.nn.functional as F\n')]
|
from ..testutils import BaseTestCase, compare_files, temp_files, regenerate_references
import unittest
import numpy as np
import pickle
import time
import warnings
import pygsti
from pygsti.extras import idletomography as idt
#Helper functions
#Global dicts describing how to prep and measure in various bases
prepDict = { 'X': ('Gy',), 'Y': ('Gx',)*3, 'Z': (),
'-X': ('Gy',)*3, '-Y': ('Gx',), '-Z': ('Gx','Gx')}
measDict = { 'X': ('Gy',)*3, 'Y': ('Gx',), 'Z': (),
'-X': ('Gy',), '-Y': ('Gx',)*3, '-Z': ('Gx','Gx')}
#Global switches for debugging
hamiltonian=True
stochastic=True
affine=True
#Mimics a function that used to be in pyGSTi, replaced with build_cloudnoise_model_from_hops_and_weights
def build_XYCNOT_cloudnoise_model(nQubits, geometry="line", cnot_edges=None,
maxIdleWeight=1, maxSpamWeight=1, maxhops=0,
extraWeight1Hops=0, extraGateWeight=0, sparse=False,
roughNoise=None, sim_type="matrix", parameterization="H+S",
spamtype="lindblad", addIdleNoiseToAllGates=True,
errcomp_type="gates", return_clouds=False, verbosity=0):
availability = {}; nonstd_gate_unitaries = {}
if cnot_edges is not None: availability['Gcnot'] = cnot_edges
return pygsti.construction.build_cloudnoise_model_from_hops_and_weights(
nQubits, ['Gx','Gy','Gcnot'], nonstd_gate_unitaries, None, availability,
None, geometry, maxIdleWeight, maxSpamWeight, maxhops,
extraWeight1Hops, extraGateWeight, sparse,
roughNoise, sim_type, parameterization,
spamtype, addIdleNoiseToAllGates,
errcomp_type, True, return_clouds, verbosity)
def get_fileroot(nQubits, maxMaxLen, errMag, spamMag, nSamples, simtype, idleErrorInFiducials):
return temp_files + "/idletomog_%dQ_maxLen%d_errMag%.5f_spamMag%.5f_%s_%s_%s" % \
(nQubits,maxMaxLen,errMag,spamMag,
"nosampleerr" if (nSamples == "inf") else ("%dsamples" % nSamples),
simtype, 'idleErrInFids' if idleErrorInFiducials else 'noIdleErrInFids')
def make_idle_tomography_data(nQubits, maxLengths=(0,1,2,4), errMags=(0.01,0.001), spamMag=0,
nSamplesList=(100,'inf'), simtype="map"):
base_param = []
if hamiltonian: base_param.append('H')
if stochastic: base_param.append('S')
if affine: base_param.append('A')
base_param = '+'.join(base_param)
parameterization = base_param+" terms" if simtype.startswith('termorder') else base_param # "H+S+A"
gateset_idleInFids = build_XYCNOT_cloudnoise_model(nQubits, "line", [], min(2,nQubits), 1,
sim_type=simtype, parameterization=parameterization,
roughNoise=None, addIdleNoiseToAllGates=True)
gateset_noIdleInFids = build_XYCNOT_cloudnoise_model(nQubits, "line", [], min(2,nQubits), 1,
sim_type=simtype, parameterization=parameterization,
roughNoise=None, addIdleNoiseToAllGates=False)
listOfExperiments = idt.make_idle_tomography_list(nQubits, maxLengths, (prepDict,measDict), maxweight=min(2,nQubits),
include_hamiltonian=hamiltonian, include_stochastic=stochastic, include_affine=affine)
base_vec = None
for errMag in errMags:
#ky = 'A(Z%s)' % ('I'*(nQubits-1)); debug_errdict = {ky: 0.01 }
#ky = 'A(ZZ%s)' % ('I'*(nQubits-2)); debug_errdict = {ky: 0.01 }
debug_errdict = {}
if base_vec is None:
rand_vec = idt.set_idle_errors(nQubits, gateset_idleInFids, debug_errdict, rand_default=errMag,
hamiltonian=hamiltonian, stochastic=stochastic, affine=affine)
base_vec = rand_vec / errMag
err_vec = base_vec * errMag # for different errMags just scale the *same* random rates
idt.set_idle_errors(nQubits, gateset_idleInFids, debug_errdict, rand_default=err_vec,
hamiltonian=hamiltonian, stochastic=stochastic, affine=affine)
idt.set_idle_errors(nQubits, gateset_noIdleInFids, debug_errdict, rand_default=err_vec,
hamiltonian=hamiltonian, stochastic=stochastic, affine=affine) # same errors for w/ and w/out idle fiducial error
for nSamples in nSamplesList:
if nSamples == 'inf':
sampleError = 'none'; Nsamp = 100
else:
sampleError = 'multinomial'; Nsamp = nSamples
ds_idleInFids = pygsti.construction.generate_fake_data(
gateset_idleInFids, listOfExperiments, nSamples=Nsamp,
sampleError=sampleError, seed=8675309)
fileroot = get_fileroot(nQubits, maxLengths[-1], errMag, spamMag, nSamples, simtype, True)
pickle.dump(gateset_idleInFids, open("%s_gs.pkl" % fileroot, "wb"))
pickle.dump(ds_idleInFids, open("%s_ds.pkl" % fileroot, "wb"))
print("Wrote fileroot ",fileroot)
ds_noIdleInFids = pygsti.construction.generate_fake_data(
gateset_noIdleInFids, listOfExperiments, nSamples=Nsamp,
sampleError=sampleError, seed=8675309)
fileroot = get_fileroot(nQubits, maxLengths[-1], errMag, spamMag, nSamples, simtype, False)
pickle.dump(gateset_noIdleInFids, open("%s_gs.pkl" % fileroot, "wb"))
pickle.dump(ds_noIdleInFids, open("%s_ds.pkl" % fileroot, "wb"))
#FROM DEBUGGING Python2 vs Python3 issue (ended up being an ordered-dict)
##pygsti.io.write_dataset("%s_ds_chk.txt" % fileroot, ds_noIdleInFids)
#chk = pygsti.io.load_dataset("%s_ds_chk.txt" % fileroot)
#for opstr,dsrow in ds_noIdleInFids.items():
# for outcome in dsrow.counts:
# cnt1, cnt2 = dsrow.counts.get(outcome,0.0),chk[opstr].counts.get(outcome,0.0)
# if not np.isclose(cnt1,cnt2):
# raise ValueError("NOT EQUAL: %s != %s" % (str(dsrow.counts), str(chk[opstr].counts)))
#print("EQUAL!")
print("Wrote fileroot ",fileroot)
def helper_idle_tomography(nQubits, maxLengths=(1,2,4), file_maxLen=4, errMag=0.01, spamMag=0, nSamples=100,
simtype="map", idleErrorInFiducials=True, fitOrder=1, fileroot=None):
if fileroot is None:
fileroot = get_fileroot(nQubits, file_maxLen, errMag, spamMag, nSamples, simtype, idleErrorInFiducials)
mdl_datagen = pickle.load(open("%s_gs.pkl" % fileroot, "rb"))
ds = pickle.load(open("%s_ds.pkl" % fileroot, "rb"))
#print("DB: ",ds[ ('Gi',) ])
#print("DB: ",ds[ ('Gi','Gi') ])
#print("DB: ",ds[ ((('Gx',0),('Gx',1)),(('Gx',0),('Gx',1)),'Gi',(('Gx',0),('Gx',1)),(('Gx',0),('Gx',1))) ])
advanced = {'fit order': fitOrder}
results = idt.do_idle_tomography(nQubits, ds, maxLengths, (prepDict,measDict), maxweight=min(2,nQubits),
advancedOptions=advanced, include_hamiltonian=hamiltonian,
include_stochastic=stochastic, include_affine=affine)
if hamiltonian: ham_intrinsic_rates = results.intrinsic_rates['hamiltonian']
if stochastic: sto_intrinsic_rates = results.intrinsic_rates['stochastic']
if affine: aff_intrinsic_rates = results.intrinsic_rates['affine']
maxErrWeight=2 # hardcoded for now
datagen_ham_rates, datagen_sto_rates, datagen_aff_rates = \
idt.predicted_intrinsic_rates(nQubits, maxErrWeight, mdl_datagen, hamiltonian, stochastic, affine)
print("Predicted HAM = ",datagen_ham_rates)
print("Predicted STO = ",datagen_sto_rates)
print("Predicted AFF = ",datagen_aff_rates)
print("Intrinsic HAM = ",ham_intrinsic_rates)
print("Intrinsic STO = ",sto_intrinsic_rates)
print("Intrinsic AFF = ",aff_intrinsic_rates)
ham_diff = sto_diff = aff_diff = [0] # so max()=0 below for types we exclude
if hamiltonian: ham_diff = np.abs(ham_intrinsic_rates - datagen_ham_rates)
if stochastic: sto_diff = np.abs(sto_intrinsic_rates - datagen_sto_rates)
if affine: aff_diff = np.abs(aff_intrinsic_rates - datagen_aff_rates)
print("Err labels:", [ x.rep for x in results.error_list])
if hamiltonian: print("Ham diffs:", ham_diff)
if stochastic: print("Sto diffs:", sto_diff)
#if stochastic:
# for x,y in zip(sto_intrinsic_rates,datagen_sto_rates):
# print(" %g <--> %g" % (x,y))
if affine: print("Aff diffs:", aff_diff)
print("%s\n MAX DIFFS: " % fileroot, max(ham_diff),max(sto_diff),max(aff_diff))
return max(ham_diff),max(sto_diff),max(aff_diff)
#OLD - leftover from when we put data into a pandas data frame
# #add hamiltonian data to df
# N = len(labels) # number of hamiltonian/stochastic rates
# data = pd.DataFrame({'nQubits': [nQubits]*N, 'maxL':[maxLengths[-1]]*N,
# 'errMag': [errMag]*N, 'spamMag': [spamMag]*N,
# 'nSamples': [nSamples]*N,
# 'simtype': [simtype]*N, 'type': ['hamiltonian']*N,
# 'true_val': datagen_ham_rates, 'estimate': ham_intrinsic_rates,
# 'diff': ham_intrinsic_rates - datagen_ham_rates, 'abs_diff': ham_diff,
# 'fitOrder': [fitOrder]*N, 'idleErrorInFiducials': [idleErrorInFiducials]*N })
# df = df.append(data, ignore_index=True)
# #add stochastic data to df
# data = pd.DataFrame({'nQubits': [nQubits]*N, 'maxL':[maxLengths[-1]]*N,
# 'errMag': [errMag]*N, 'spamMag': [spamMag]*N,
# 'nSamples': [nSamples]*N,
# 'simtype': [simtype]*N, 'type': ['stochastic']*N,
# 'true_val': datagen_sto_rates, 'estimate': sto_intrinsic_rates,
# 'diff': sto_intrinsic_rates - datagen_sto_rates,'abs_diff': sto_diff,
# 'fitOrder': [fitOrder]*N, 'idleErrorInFiducials': [idleErrorInFiducials]*N })
# df = df.append(data, ignore_index=True)
# return df
class IDTTestCase(BaseTestCase):
def test_idletomography_1Q(self):
nQ = 1
#make perfect data - using termorder:1 here means the data is not CPTP and
# therefore won't be in [0,1], and creating a data set with sampleError="none"
# means that probabilities *won't* be clipped to [0,1] - so we get really
# funky and unphysical data here, but data that idle tomography should be
# able to fit *exactly* (with any errMags, so be pick a big one).
make_idle_tomography_data(nQ, maxLengths=(0,1,2,4), errMags=(0.01,), spamMag=0,
nSamplesList=('inf',), simtype="termorder") # how specify order
# Note: no spam error, as accounting for this isn't build into idle tomography yet.
maxH, maxS, maxA = helper_idle_tomography(nQ, maxLengths=(1,2,4), file_maxLen=4,
errMag=0.01, spamMag=0, nSamples='inf',
idleErrorInFiducials=False, fitOrder=1, simtype="termorder") # how specify order
#Make sure exact identification of errors was possible
self.assertLess(maxH, 1e-6)
self.assertLess(maxS, 1e-6)
self.assertLess(maxA, 1e-6)
def test_idletomography_2Q(self):
#Same thing but for 2 qubits
nQ = 2
make_idle_tomography_data(nQ, maxLengths=(0,1,2,4), errMags=(0.01,), spamMag=0,
nSamplesList=('inf',), simtype="termorder") #How specify order?
maxH, maxS, maxA = helper_idle_tomography(nQ, maxLengths=(1,2,4), file_maxLen=4,
errMag=0.01, spamMag=0, nSamples='inf',
idleErrorInFiducials=False, fitOrder=1, simtype="termorder") # how specify order?
self.assertLess(maxH, 1e-6)
self.assertLess(maxS, 1e-6)
self.assertLess(maxA, 1e-6)
def test_idletomog_gstdata_std1Q(self):
from pygsti.modelpacks.legacy import std1Q_XYI as std
std = pygsti.construction.stdmodule_to_smqmodule(std)
maxLens = [1,2,4]
expList = pygsti.construction.make_lsgst_experiment_list(std.target_model(), std.prepStrs,
std.effectStrs, std.germs_lite, maxLens)
ds = pygsti.construction.generate_fake_data(std.target_model().depolarize(0.01, 0.01),
expList, 1000, 'multinomial', seed=1234)
result = pygsti.do_long_sequence_gst(ds, std.target_model(), std.prepStrs, std.effectStrs, std.germs_lite, maxLens, verbosity=3)
#standard report will run idle tomography
pygsti.report.create_standard_report(result, temp_files + "/gstWithIdleTomogTestReportStd1Q",
"Test GST Report w/Idle Tomography Tab: StdXYI",
verbosity=3, auto_open=False)
def test_idletomog_gstdata_1Qofstd2Q(self):
# perform idle tomography on first qubit of 2Q
from pygsti.modelpacks.legacy import std2Q_XYICNOT as std2Q
from pygsti.modelpacks.legacy import std1Q_XYI as std
std2Q = pygsti.construction.stdmodule_to_smqmodule(std2Q)
std = pygsti.construction.stdmodule_to_smqmodule(std)
maxLens = [1,2,4]
expList = pygsti.construction.make_lsgst_experiment_list(std2Q.target_model(), std2Q.prepStrs,
std2Q.effectStrs, std2Q.germs_lite, maxLens)
mdl_datagen = std2Q.target_model().depolarize(0.01, 0.01)
ds2Q = pygsti.construction.generate_fake_data(mdl_datagen, expList, 1000, 'multinomial', seed=1234)
#Just analyze first qubit (qubit 0)
ds = pygsti.construction.filter_dataset(ds2Q, (0,))
start = std.target_model()
start.set_all_parameterizations("TP")
result = pygsti.do_long_sequence_gst(ds, start, std.prepStrs[0:4], std.effectStrs[0:4],
std.germs_lite, maxLens, verbosity=3, advancedOptions={'objective': 'chi2'})
#result = pygsti.do_model_test(start.depolarize(0.009,0.009), ds, std.target_model(), std.prepStrs[0:4],
# std.effectStrs[0:4], std.germs_lite, maxLens)
pygsti.report.create_standard_report(result, temp_files + "/gstWithIdleTomogTestReportStd1Qfrom2Q",
"Test GST Report w/Idle Tomog.: StdXYI from StdXYICNOT",
verbosity=3, auto_open=False)
def test_idletomog_gstdata_nQ(self):
try: from pygsti.objects import fastreplib
except ImportError:
warnings.warn("Skipping test_idletomog_gstdata_nQ b/c no fastreps!")
return
#Global dicts describing how to prep and measure in various bases
prepDict = { 'X': ('Gy',), 'Y': ('Gx',)*3, 'Z': (),
'-X': ('Gy',)*3, '-Y': ('Gx',), '-Z': ('Gx','Gx')}
measDict = { 'X': ('Gy',)*3, 'Y': ('Gx',), 'Z': (),
'-X': ('Gy',), '-Y': ('Gx',)*3, '-Z': ('Gx','Gx')}
nQubits = 2
maxLengths = [1,2,4]
## ----- Generate n-qubit operation sequences -----
if regenerate_references():
c = {} #Uncomment to re-generate cache SAVE
else:
c = pickle.load(open(compare_files+"/idt_nQsequenceCache.pkl", 'rb'))
t = time.time()
gss = pygsti.construction.create_XYCNOT_cloudnoise_sequences(
nQubits, maxLengths, 'line', [(0,1)], maxIdleWeight=2,
idleOnly=False, paramroot="H+S", cache=c, verbosity=3)
#print("GSS STRINGS: ")
#print('\n'.join(["%s: %s" % (s.str,str(s.tup)) for s in gss.allstrs]))
gss_strs = gss.allstrs
print("%.1fs" % (time.time()-t))
if regenerate_references():
pickle.dump(c, open(compare_files+"/idt_nQsequenceCache.pkl", 'wb'))
#Uncomment to re-generate cache
# To run idle tomography, we need "pauli fiducial pairs", so
# get fiducial pairs for Gi germ from gss and convert
# to "Pauli fidicual pairs" (which pauli state/basis is prepared or measured)
GiStr = pygsti.obj.Circuit(((),), num_lines=nQubits)
self.assertTrue(GiStr in gss.germs)
self.assertTrue(gss.Ls == maxLengths)
L0 = maxLengths[0] # all lengths should have same fidpairs, just take first one
plaq = gss.get_plaquette(L0, GiStr)
pauli_fidpairs = idt.fidpairs_to_pauli_fidpairs(plaq.fidpairs, (prepDict,measDict), nQubits)
print(plaq.fidpairs)
print()
print('\n'.join([ "%s, %s" % (p[0],p[1]) for p in pauli_fidpairs]))
self.assertEqual(len(plaq.fidpairs), len(pauli_fidpairs))
self.assertEqual(len(plaq.fidpairs), 16) # (will need to change this if use H+S+A above)
# ---- Create some fake data ----
target_model = build_XYCNOT_cloudnoise_model(nQubits, "line", [(0,1)], 2, 1,
sim_type="map", parameterization="H+S")
#Note: generate data with affine errors too (H+S+A used below)
mdl_datagen = build_XYCNOT_cloudnoise_model(nQubits, "line", [(0,1)], 2, 1,
sim_type="map", parameterization="H+S+A",
roughNoise=(1234,0.001))
#This *only* (re)sets Gi errors...
idt.set_idle_errors(nQubits, mdl_datagen, {}, rand_default=0.001,
hamiltonian=True, stochastic=True, affine=True) # no seed? FUTURE?
problemStr = pygsti.obj.Circuit([()], num_lines=nQubits)
print("Problem: ",problemStr.str)
assert(problemStr in gss.allstrs)
ds = pygsti.construction.generate_fake_data(mdl_datagen, gss.allstrs, 1000, 'multinomial', seed=1234)
# ----- Run idle tomography with our custom (GST) set of pauli fiducial pairs ----
advanced = {'pauli_fidpairs': pauli_fidpairs, 'jacobian mode': "together"}
idtresults = idt.do_idle_tomography(nQubits, ds, maxLengths, (prepDict,measDict), maxweight=2,
advancedOptions=advanced, include_hamiltonian='auto',
include_stochastic='auto', include_affine='auto')
#Note: inclue_affine="auto" should have detected that we don't have the sequences to
# determine the affine intrinsic rates:
self.assertEqual(set(idtresults.intrinsic_rates.keys()), set(['hamiltonian','stochastic']))
idt.create_idletomography_report(idtresults, temp_files + "/idleTomographyGSTSeqTestReport",
"Test idle tomography report w/GST seqs", auto_open=False)
#Run GST on the data (set tolerance high so this 2Q-GST run doesn't take long)
gstresults = pygsti.do_long_sequence_gst_base(ds, target_model, gss,
advancedOptions={'tolerance': 1e-1}, verbosity=3)
#In FUTURE, we shouldn't need to set need to set the basis of our nQ GST results in order to make a report
for estkey in gstresults.estimates: # 'default'
gstresults.estimates[estkey].models['go0'].basis = pygsti.obj.Basis.cast("pp",16)
gstresults.estimates[estkey].models['target'].basis = pygsti.obj.Basis.cast("pp",16)
#pygsti.report.create_standard_report(gstresults, temp_files + "/gstWithIdleTomogTestReport",
# "Test GST Report w/Idle Tomography Tab",
# verbosity=3, auto_open=False)
pygsti.report.create_nqnoise_report(gstresults, temp_files + "/gstWithIdleTomogTestReport",
"Test nQNoise Report w/Idle Tomography Tab",
verbosity=3, auto_open=False)
def test_automatic_paulidicts(self):
expected_prepDict = { 'X': ('Gy',), 'Y': ('Gx',)*3, 'Z': (),
'-X': ('Gy',)*3, '-Y': ('Gx',), '-Z': ('Gx','Gx')}
expected_measDict = { 'X': ('Gy',)*3, 'Y': ('Gx',), 'Z': (),
'-X': ('Gy',), '-Y': ('Gx',)*3, '-Z': ('Gx','Gx')}
target_model = build_XYCNOT_cloudnoise_model(3, "line", [(0,1)], 2, 1,
sim_type="map", parameterization="H+S+A")
prepDict, measDict = idt.determine_paulidicts(target_model)
self.assertEqual(prepDict, expected_prepDict)
self.assertEqual(measDict, expected_measDict)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"pygsti.obj.Circuit",
"numpy.abs",
"pygsti.do_long_sequence_gst",
"pygsti.construction.build_cloudnoise_model_from_hops_and_weights",
"pygsti.construction.filter_dataset",
"unittest.main",
"pygsti.extras.idletomography.predicted_intrinsic_rates",
"pygsti.do_long_sequence_gst_base",
"pygsti.modelpacks.legacy.std2Q_XYICNOT.target_model",
"pygsti.construction.stdmodule_to_smqmodule",
"pygsti.extras.idletomography.determine_paulidicts",
"pygsti.construction.create_XYCNOT_cloudnoise_sequences",
"pygsti.construction.generate_fake_data",
"pygsti.report.create_standard_report",
"pygsti.extras.idletomography.create_idletomography_report",
"pygsti.report.create_nqnoise_report",
"pygsti.obj.Basis.cast",
"pygsti.extras.idletomography.fidpairs_to_pauli_fidpairs",
"pygsti.extras.idletomography.do_idle_tomography",
"pygsti.extras.idletomography.set_idle_errors",
"pygsti.modelpacks.legacy.std1Q_XYI.target_model",
"time.time",
"warnings.warn"
] |
[((1386, 1764), 'pygsti.construction.build_cloudnoise_model_from_hops_and_weights', 'pygsti.construction.build_cloudnoise_model_from_hops_and_weights', (['nQubits', "['Gx', 'Gy', 'Gcnot']", 'nonstd_gate_unitaries', 'None', 'availability', 'None', 'geometry', 'maxIdleWeight', 'maxSpamWeight', 'maxhops', 'extraWeight1Hops', 'extraGateWeight', 'sparse', 'roughNoise', 'sim_type', 'parameterization', 'spamtype', 'addIdleNoiseToAllGates', 'errcomp_type', '(True)', 'return_clouds', 'verbosity'], {}), "(nQubits, [\n 'Gx', 'Gy', 'Gcnot'], nonstd_gate_unitaries, None, availability, None,\n geometry, maxIdleWeight, maxSpamWeight, maxhops, extraWeight1Hops,\n extraGateWeight, sparse, roughNoise, sim_type, parameterization,\n spamtype, addIdleNoiseToAllGates, errcomp_type, True, return_clouds,\n verbosity)\n", (1450, 1764), False, 'import pygsti\n'), ((7676, 7778), 'pygsti.extras.idletomography.predicted_intrinsic_rates', 'idt.predicted_intrinsic_rates', (['nQubits', 'maxErrWeight', 'mdl_datagen', 'hamiltonian', 'stochastic', 'affine'], {}), '(nQubits, maxErrWeight, mdl_datagen,\n hamiltonian, stochastic, affine)\n', (7705, 7778), True, 'from pygsti.extras import idletomography as idt\n'), ((20978, 21004), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (20991, 21004), False, 'import unittest\n'), ((4021, 4177), 'pygsti.extras.idletomography.set_idle_errors', 'idt.set_idle_errors', (['nQubits', 'gateset_idleInFids', 'debug_errdict'], {'rand_default': 'err_vec', 'hamiltonian': 'hamiltonian', 'stochastic': 'stochastic', 'affine': 'affine'}), '(nQubits, gateset_idleInFids, debug_errdict,\n rand_default=err_vec, hamiltonian=hamiltonian, stochastic=stochastic,\n affine=affine)\n', (4040, 4177), True, 'from pygsti.extras import idletomography as idt\n'), ((4204, 4362), 'pygsti.extras.idletomography.set_idle_errors', 'idt.set_idle_errors', (['nQubits', 'gateset_noIdleInFids', 'debug_errdict'], {'rand_default': 'err_vec', 'hamiltonian': 'hamiltonian', 'stochastic': 'stochastic', 'affine': 'affine'}), '(nQubits, gateset_noIdleInFids, debug_errdict,\n rand_default=err_vec, hamiltonian=hamiltonian, stochastic=stochastic,\n affine=affine)\n', (4223, 4362), True, 'from pygsti.extras import idletomography as idt\n'), ((8182, 8229), 'numpy.abs', 'np.abs', (['(ham_intrinsic_rates - datagen_ham_rates)'], {}), '(ham_intrinsic_rates - datagen_ham_rates)\n', (8188, 8229), True, 'import numpy as np\n'), ((8261, 8308), 'numpy.abs', 'np.abs', (['(sto_intrinsic_rates - datagen_sto_rates)'], {}), '(sto_intrinsic_rates - datagen_sto_rates)\n', (8267, 8308), True, 'import numpy as np\n'), ((8340, 8387), 'numpy.abs', 'np.abs', (['(aff_intrinsic_rates - datagen_aff_rates)'], {}), '(aff_intrinsic_rates - datagen_aff_rates)\n', (8346, 8387), True, 'import numpy as np\n'), ((12234, 12281), 'pygsti.construction.stdmodule_to_smqmodule', 'pygsti.construction.stdmodule_to_smqmodule', (['std'], {}), '(std)\n', (12276, 12281), False, 'import pygsti\n'), ((12899, 13084), 'pygsti.report.create_standard_report', 'pygsti.report.create_standard_report', (['result', "(temp_files + '/gstWithIdleTomogTestReportStd1Q')", '"""Test GST Report w/Idle Tomography Tab: StdXYI"""'], {'verbosity': '(3)', 'auto_open': '(False)'}), "(result, temp_files +\n '/gstWithIdleTomogTestReportStd1Q',\n 'Test GST Report w/Idle Tomography Tab: StdXYI', verbosity=3, auto_open\n =False)\n", (12935, 13084), False, 'import pygsti\n'), ((13412, 13461), 'pygsti.construction.stdmodule_to_smqmodule', 'pygsti.construction.stdmodule_to_smqmodule', (['std2Q'], {}), '(std2Q)\n', (13454, 13461), False, 'import pygsti\n'), ((13476, 13523), 'pygsti.construction.stdmodule_to_smqmodule', 'pygsti.construction.stdmodule_to_smqmodule', (['std'], {}), '(std)\n', (13518, 13523), False, 'import pygsti\n'), ((13845, 13941), 'pygsti.construction.generate_fake_data', 'pygsti.construction.generate_fake_data', (['mdl_datagen', 'expList', '(1000)', '"""multinomial"""'], {'seed': '(1234)'}), "(mdl_datagen, expList, 1000,\n 'multinomial', seed=1234)\n", (13883, 13941), False, 'import pygsti\n'), ((13996, 14042), 'pygsti.construction.filter_dataset', 'pygsti.construction.filter_dataset', (['ds2Q', '(0,)'], {}), '(ds2Q, (0,))\n', (14030, 14042), False, 'import pygsti\n'), ((14060, 14078), 'pygsti.modelpacks.legacy.std1Q_XYI.target_model', 'std.target_model', ([], {}), '()\n', (14076, 14078), True, 'from pygsti.modelpacks.legacy import std1Q_XYI as std\n'), ((14142, 14306), 'pygsti.do_long_sequence_gst', 'pygsti.do_long_sequence_gst', (['ds', 'start', 'std.prepStrs[0:4]', 'std.effectStrs[0:4]', 'std.germs_lite', 'maxLens'], {'verbosity': '(3)', 'advancedOptions': "{'objective': 'chi2'}"}), "(ds, start, std.prepStrs[0:4], std.effectStrs[0:\n 4], std.germs_lite, maxLens, verbosity=3, advancedOptions={'objective':\n 'chi2'})\n", (14169, 14306), False, 'import pygsti\n'), ((14549, 14747), 'pygsti.report.create_standard_report', 'pygsti.report.create_standard_report', (['result', "(temp_files + '/gstWithIdleTomogTestReportStd1Qfrom2Q')", '"""Test GST Report w/Idle Tomog.: StdXYI from StdXYICNOT"""'], {'verbosity': '(3)', 'auto_open': '(False)'}), "(result, temp_files +\n '/gstWithIdleTomogTestReportStd1Qfrom2Q',\n 'Test GST Report w/Idle Tomog.: StdXYI from StdXYICNOT', verbosity=3,\n auto_open=False)\n", (14585, 14747), False, 'import pygsti\n'), ((15700, 15711), 'time.time', 'time.time', ([], {}), '()\n', (15709, 15711), False, 'import time\n'), ((15726, 15899), 'pygsti.construction.create_XYCNOT_cloudnoise_sequences', 'pygsti.construction.create_XYCNOT_cloudnoise_sequences', (['nQubits', 'maxLengths', '"""line"""', '[(0, 1)]'], {'maxIdleWeight': '(2)', 'idleOnly': '(False)', 'paramroot': '"""H+S"""', 'cache': 'c', 'verbosity': '(3)'}), "(nQubits, maxLengths,\n 'line', [(0, 1)], maxIdleWeight=2, idleOnly=False, paramroot='H+S',\n cache=c, verbosity=3)\n", (15780, 15899), False, 'import pygsti\n'), ((16500, 16544), 'pygsti.obj.Circuit', 'pygsti.obj.Circuit', (['((),)'], {'num_lines': 'nQubits'}), '(((),), num_lines=nQubits)\n', (16518, 16544), False, 'import pygsti\n'), ((16792, 16868), 'pygsti.extras.idletomography.fidpairs_to_pauli_fidpairs', 'idt.fidpairs_to_pauli_fidpairs', (['plaq.fidpairs', '(prepDict, measDict)', 'nQubits'], {}), '(plaq.fidpairs, (prepDict, measDict), nQubits)\n', (16822, 16868), True, 'from pygsti.extras import idletomography as idt\n'), ((17783, 17900), 'pygsti.extras.idletomography.set_idle_errors', 'idt.set_idle_errors', (['nQubits', 'mdl_datagen', '{}'], {'rand_default': '(0.001)', 'hamiltonian': '(True)', 'stochastic': '(True)', 'affine': '(True)'}), '(nQubits, mdl_datagen, {}, rand_default=0.001,\n hamiltonian=True, stochastic=True, affine=True)\n', (17802, 17900), True, 'from pygsti.extras import idletomography as idt\n'), ((17955, 17998), 'pygsti.obj.Circuit', 'pygsti.obj.Circuit', (['[()]'], {'num_lines': 'nQubits'}), '([()], num_lines=nQubits)\n', (17973, 17998), False, 'import pygsti\n'), ((18096, 18196), 'pygsti.construction.generate_fake_data', 'pygsti.construction.generate_fake_data', (['mdl_datagen', 'gss.allstrs', '(1000)', '"""multinomial"""'], {'seed': '(1234)'}), "(mdl_datagen, gss.allstrs, 1000,\n 'multinomial', seed=1234)\n", (18134, 18196), False, 'import pygsti\n'), ((18389, 18583), 'pygsti.extras.idletomography.do_idle_tomography', 'idt.do_idle_tomography', (['nQubits', 'ds', 'maxLengths', '(prepDict, measDict)'], {'maxweight': '(2)', 'advancedOptions': 'advanced', 'include_hamiltonian': '"""auto"""', 'include_stochastic': '"""auto"""', 'include_affine': '"""auto"""'}), "(nQubits, ds, maxLengths, (prepDict, measDict),\n maxweight=2, advancedOptions=advanced, include_hamiltonian='auto',\n include_stochastic='auto', include_affine='auto')\n", (18411, 18583), True, 'from pygsti.extras import idletomography as idt\n'), ((18899, 19058), 'pygsti.extras.idletomography.create_idletomography_report', 'idt.create_idletomography_report', (['idtresults', "(temp_files + '/idleTomographyGSTSeqTestReport')", '"""Test idle tomography report w/GST seqs"""'], {'auto_open': '(False)'}), "(idtresults, temp_files +\n '/idleTomographyGSTSeqTestReport',\n 'Test idle tomography report w/GST seqs', auto_open=False)\n", (18931, 19058), True, 'from pygsti.extras import idletomography as idt\n'), ((19194, 19303), 'pygsti.do_long_sequence_gst_base', 'pygsti.do_long_sequence_gst_base', (['ds', 'target_model', 'gss'], {'advancedOptions': "{'tolerance': 0.1}", 'verbosity': '(3)'}), "(ds, target_model, gss, advancedOptions={\n 'tolerance': 0.1}, verbosity=3)\n", (19226, 19303), False, 'import pygsti\n'), ((19996, 20170), 'pygsti.report.create_nqnoise_report', 'pygsti.report.create_nqnoise_report', (['gstresults', "(temp_files + '/gstWithIdleTomogTestReport')", '"""Test nQNoise Report w/Idle Tomography Tab"""'], {'verbosity': '(3)', 'auto_open': '(False)'}), "(gstresults, temp_files +\n '/gstWithIdleTomogTestReport',\n 'Test nQNoise Report w/Idle Tomography Tab', verbosity=3, auto_open=False)\n", (20031, 20170), False, 'import pygsti\n'), ((20798, 20836), 'pygsti.extras.idletomography.determine_paulidicts', 'idt.determine_paulidicts', (['target_model'], {}), '(target_model)\n', (20822, 20836), True, 'from pygsti.extras import idletomography as idt\n'), ((3688, 3843), 'pygsti.extras.idletomography.set_idle_errors', 'idt.set_idle_errors', (['nQubits', 'gateset_idleInFids', 'debug_errdict'], {'rand_default': 'errMag', 'hamiltonian': 'hamiltonian', 'stochastic': 'stochastic', 'affine': 'affine'}), '(nQubits, gateset_idleInFids, debug_errdict,\n rand_default=errMag, hamiltonian=hamiltonian, stochastic=stochastic,\n affine=affine)\n', (3707, 3843), True, 'from pygsti.extras import idletomography as idt\n'), ((4664, 4800), 'pygsti.construction.generate_fake_data', 'pygsti.construction.generate_fake_data', (['gateset_idleInFids', 'listOfExperiments'], {'nSamples': 'Nsamp', 'sampleError': 'sampleError', 'seed': '(8675309)'}), '(gateset_idleInFids,\n listOfExperiments, nSamples=Nsamp, sampleError=sampleError, seed=8675309)\n', (4702, 4800), False, 'import pygsti\n'), ((5197, 5335), 'pygsti.construction.generate_fake_data', 'pygsti.construction.generate_fake_data', (['gateset_noIdleInFids', 'listOfExperiments'], {'nSamples': 'Nsamp', 'sampleError': 'sampleError', 'seed': '(8675309)'}), '(gateset_noIdleInFids,\n listOfExperiments, nSamples=Nsamp, sampleError=sampleError, seed=8675309)\n', (5235, 5335), False, 'import pygsti\n'), ((12374, 12392), 'pygsti.modelpacks.legacy.std1Q_XYI.target_model', 'std.target_model', ([], {}), '()\n', (12390, 12392), True, 'from pygsti.modelpacks.legacy import std1Q_XYI as std\n'), ((12752, 12770), 'pygsti.modelpacks.legacy.std1Q_XYI.target_model', 'std.target_model', ([], {}), '()\n', (12768, 12770), True, 'from pygsti.modelpacks.legacy import std1Q_XYI as std\n'), ((13616, 13636), 'pygsti.modelpacks.legacy.std2Q_XYICNOT.target_model', 'std2Q.target_model', ([], {}), '()\n', (13634, 13636), True, 'from pygsti.modelpacks.legacy import std2Q_XYICNOT as std2Q\n'), ((19597, 19628), 'pygsti.obj.Basis.cast', 'pygsti.obj.Basis.cast', (['"""pp"""', '(16)'], {}), "('pp', 16)\n", (19618, 19628), False, 'import pygsti\n'), ((19694, 19725), 'pygsti.obj.Basis.cast', 'pygsti.obj.Basis.cast', (['"""pp"""', '(16)'], {}), "('pp', 16)\n", (19715, 19725), False, 'import pygsti\n'), ((13786, 13806), 'pygsti.modelpacks.legacy.std2Q_XYICNOT.target_model', 'std2Q.target_model', ([], {}), '()\n', (13804, 13806), True, 'from pygsti.modelpacks.legacy import std2Q_XYICNOT as std2Q\n'), ((14960, 15028), 'warnings.warn', 'warnings.warn', (['"""Skipping test_idletomog_gstdata_nQ b/c no fastreps!"""'], {}), "('Skipping test_idletomog_gstdata_nQ b/c no fastreps!')\n", (14973, 15028), False, 'import warnings\n'), ((12566, 12584), 'pygsti.modelpacks.legacy.std1Q_XYI.target_model', 'std.target_model', ([], {}), '()\n', (12582, 12584), True, 'from pygsti.modelpacks.legacy import std1Q_XYI as std\n'), ((16085, 16096), 'time.time', 'time.time', ([], {}), '()\n', (16094, 16096), False, 'import time\n')]
|
import pytest
import mock
from marshmallow import ValidationError
from puzzle_engine.hitori.schemas import (
CellSchema,
BoardSchema,
HitoriSolutionSchema
)
class TestCellSchema:
@pytest.fixture
def data(self):
return {
'id': 1,
'row_number': 1,
'column_number': 2,
'value': 5
}
@pytest.fixture
def patched_cell(self):
patcher = mock.patch('puzzle_engine.hitori.schemas.Cell')
yield patcher.start()
patcher.stop()
def test_cell_schema_loads(self, data, patched_cell):
loaded_data = CellSchema(strict=True).load(data).data
assert loaded_data is patched_cell.return_value
patched_cell.assert_called_once_with(**data)
bad_data = [
{
'id': 1,
'row_number': 3,
'column_number': -1,
'value': 5
},
{
'id': 1,
'row_number': -3,
'column_number': 5,
'value': 5
},
{
'id': 1,
'row_number': -3,
'column_number': -5,
'value': 2
}
]
@pytest.mark.parametrize('data', bad_data)
def test_cell_schema_validates(self, data):
with pytest.raises(ValidationError):
CellSchema(strict=True).load(data)
class TestBoardSchema:
@pytest.fixture
def data(self):
return {
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 1,
'row_number': 1,
'column_number': 2,
'value': 3
}
]
}
@pytest.fixture
def patched_board(self):
patcher = mock.patch('puzzle_engine.hitori.schemas.Board')
yield patcher.start()
patcher.stop()
def test_board_schema_loads(self, data, patched_board):
loaded_data = BoardSchema(strict=True).load(data).data
assert patched_board.return_value is loaded_data
call = patched_board.call_args[1]
assert call['number_of_rows'] == data['number_of_rows']
assert call['number_of_columns'] == data['number_of_columns']
assert call['cells']
assert len(call['cells']) == 1
cell = call['cells'][0]
assert cell.row_number == 1
assert cell.column_number == 2
assert cell.value == 3
bad_data = [
{
'id': 1,
'number_of_rows': -5,
'number_of_columns': 5,
'cells': [
{
'id': 1,
'row_number': 1,
'column_number': 2,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': -5,
'cells': [
{
'id': 1,
'row_number': 1,
'column_number': 2,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': -5,
'number_of_columns': -5,
'cells': [
{
'id': 1,
'row_number': 1,
'column_number': 2,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 1,
'row_number': 10,
'column_number': 2,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 2,
'row_number': 3,
'column_number': 12,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 2,
'row_number': 10,
'column_number': 12,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 3,
'row_number': 1,
'column_number': 6,
'value': 3
},
{
'id': 5,
'row_number': 3,
'column_number': 2,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 1,
'row_number': 5,
'column_number': 3,
'value': 3
}
]
},
{
'id': 1,
'number_of_rows': 5,
'number_of_columns': 5,
'cells': [
{
'id': 2,
'row_number': 3,
'column_number': 5,
'value': 3
}
]
},
]
@pytest.mark.parametrize('data', bad_data)
def test_board_schema_validates(self, data):
with pytest.raises(ValidationError):
BoardSchema(strict=True).load(data)
class TestHitoriSolutionSchema:
@pytest.fixture
def cells_on(self):
return [
{
'id': 3,
'row_number': 1,
'column_number': 2,
'value': 3
},
{
'id': 5,
'row_number': 3,
'column_number': 2,
'value': 3
}
]
@pytest.fixture
def cells_off(self):
return [
{
'id': 3,
'row_number': 1,
'column_number': 6,
'value': 3
}
]
@pytest.fixture
def board(self):
return {'id': 2, 'cells': []}
@pytest.fixture
def hitori_solution(self, cells_on, cells_off, board):
return {
'cells_on': cells_on,
'cells_off': cells_off,
'board': board
}
@pytest.fixture
def expected_dumped_hitori_solution(self, cells_on, cells_off, board):
return {
'cells_on': [cell['id'] for cell in cells_on],
'cells_off': [cell['id'] for cell in cells_off],
'board': board['id']
}
def test_hitori_solution_schema_dump(self, hitori_solution, expected_dumped_hitori_solution):
data = HitoriSolutionSchema(strict=True).dump(hitori_solution).data
assert data == expected_dumped_hitori_solution
|
[
"puzzle_engine.hitori.schemas.CellSchema",
"mock.patch",
"puzzle_engine.hitori.schemas.BoardSchema",
"pytest.raises",
"pytest.mark.parametrize",
"puzzle_engine.hitori.schemas.HitoriSolutionSchema"
] |
[((1179, 1220), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', 'bad_data'], {}), "('data', bad_data)\n", (1202, 1220), False, 'import pytest\n'), ((5592, 5633), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', 'bad_data'], {}), "('data', bad_data)\n", (5615, 5633), False, 'import pytest\n'), ((436, 483), 'mock.patch', 'mock.patch', (['"""puzzle_engine.hitori.schemas.Cell"""'], {}), "('puzzle_engine.hitori.schemas.Cell')\n", (446, 483), False, 'import mock\n'), ((1821, 1869), 'mock.patch', 'mock.patch', (['"""puzzle_engine.hitori.schemas.Board"""'], {}), "('puzzle_engine.hitori.schemas.Board')\n", (1831, 1869), False, 'import mock\n'), ((1282, 1312), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (1295, 1312), False, 'import pytest\n'), ((5696, 5726), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (5709, 5726), False, 'import pytest\n'), ((618, 641), 'puzzle_engine.hitori.schemas.CellSchema', 'CellSchema', ([], {'strict': '(True)'}), '(strict=True)\n', (628, 641), False, 'from puzzle_engine.hitori.schemas import CellSchema, BoardSchema, HitoriSolutionSchema\n'), ((1326, 1349), 'puzzle_engine.hitori.schemas.CellSchema', 'CellSchema', ([], {'strict': '(True)'}), '(strict=True)\n', (1336, 1349), False, 'from puzzle_engine.hitori.schemas import CellSchema, BoardSchema, HitoriSolutionSchema\n'), ((2006, 2030), 'puzzle_engine.hitori.schemas.BoardSchema', 'BoardSchema', ([], {'strict': '(True)'}), '(strict=True)\n', (2017, 2030), False, 'from puzzle_engine.hitori.schemas import CellSchema, BoardSchema, HitoriSolutionSchema\n'), ((5740, 5764), 'puzzle_engine.hitori.schemas.BoardSchema', 'BoardSchema', ([], {'strict': '(True)'}), '(strict=True)\n', (5751, 5764), False, 'from puzzle_engine.hitori.schemas import CellSchema, BoardSchema, HitoriSolutionSchema\n'), ((7076, 7109), 'puzzle_engine.hitori.schemas.HitoriSolutionSchema', 'HitoriSolutionSchema', ([], {'strict': '(True)'}), '(strict=True)\n', (7096, 7109), False, 'from puzzle_engine.hitori.schemas import CellSchema, BoardSchema, HitoriSolutionSchema\n')]
|
#!/usr/bin/python3
import pandas as pd
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="xlsx sheet containing the time tracks of the installation")
parser.add_argument("-o", "--output", help="output file path")
parser.add_argument("-v", "--verbose", help="verbose flag", action='store_true')
args = parser.parse_args()
if not args.input:
print("*! please provide an input file")
exit()
if not args.output:
print("*! please provide an output file")
exit()
if args.verbose: print('* reading in time tracking sheet: {}'.format(args.input))
try:
data = pd.read_excel(args.input,
converters = {
'Tower Installation Start' : pd.to_datetime,
'Tower Installation End' : pd.to_datetime,
'Nacelle Installation Start' : pd.to_datetime,
'Nacelle Installation End' : pd.to_datetime,
'Blade Installation Start' : pd.to_datetime,
'Blade Installation End' : pd.to_datetime,
},
)
except Exception as e:
print("*! failed to read in ecxcel file: {}".format(e))
exit()
if args.verbose: print('* input file: {}'.format(args.input))
installationTimes = data[~data['Tower Installation End'].isnull()]
installationTimes.reset_index(inplace=True)
# installationTimes['Blade Installation Start'].apply(lambda dt: pd.to_datetime(dt))
installationTimes.insert(loc=len(installationTimes.columns),
column='deltaT TNH Configuration',
value=installationTimes['Blade Installation Start'] - installationTimes['Nacelle Installation End'],
)
selectionTNH = pd.DataFrame({'OWEC' : installationTimes['OWEC'],
'Nacelle Installation End' : installationTimes['Nacelle Installation End'],
'Blade Installation Start' : installationTimes['Blade Installation Start'],
'deltaT TNH Configuration' : installationTimes['deltaT TNH Configuration'],
})
try:
selectionTNH.to_pickle(args.output)
except Exception as e:
print('*! failed to export time selection pickle: {}'.format(e))
|
[
"pandas.DataFrame",
"argparse.ArgumentParser",
"pandas.read_excel"
] |
[((97, 122), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (120, 122), False, 'import argparse\n'), ((1997, 2288), 'pandas.DataFrame', 'pd.DataFrame', (["{'OWEC': installationTimes['OWEC'], 'Nacelle Installation End':\n installationTimes['Nacelle Installation End'],\n 'Blade Installation Start': installationTimes[\n 'Blade Installation Start'], 'deltaT TNH Configuration':\n installationTimes['deltaT TNH Configuration']}"], {}), "({'OWEC': installationTimes['OWEC'], 'Nacelle Installation End':\n installationTimes['Nacelle Installation End'],\n 'Blade Installation Start': installationTimes[\n 'Blade Installation Start'], 'deltaT TNH Configuration':\n installationTimes['deltaT TNH Configuration']})\n", (2009, 2288), True, 'import pandas as pd\n'), ((704, 1021), 'pandas.read_excel', 'pd.read_excel', (['args.input'], {'converters': "{'Tower Installation Start': pd.to_datetime, 'Tower Installation End': pd.\n to_datetime, 'Nacelle Installation Start': pd.to_datetime,\n 'Nacelle Installation End': pd.to_datetime, 'Blade Installation Start':\n pd.to_datetime, 'Blade Installation End': pd.to_datetime}"}), "(args.input, converters={'Tower Installation Start': pd.\n to_datetime, 'Tower Installation End': pd.to_datetime,\n 'Nacelle Installation Start': pd.to_datetime,\n 'Nacelle Installation End': pd.to_datetime, 'Blade Installation Start':\n pd.to_datetime, 'Blade Installation End': pd.to_datetime})\n", (717, 1021), True, 'import pandas as pd\n')]
|
import logging
import shutil
import subprocess
import uuid
import datetime
import json
from murakami.errors import RunnerError
from murakami.runner import MurakamiRunner
logger = logging.getLogger(__name__)
class Ndt5Client(MurakamiRunner):
"""Run NDT5 test."""
def __init__(self, config=None, data_cb=None,
location=None, network_type=None, connection_type=None,
device_id=None):
super().__init__(
title="ndt5",
description="The Network Diagnostic Tool v5 test.",
config=config,
data_cb=data_cb,
location=location,
network_type=network_type,
connection_type=connection_type,
device_id=device_id
)
def _start_test(self):
logger.info("Starting NDT5 test...")
if shutil.which("ndt5-client") is not None:
cmdargs = [
"ndt5-client",
"-format=json",
"-quiet"
]
if "host" in self._config:
cmdargs.append(self._config['host'])
insecure = self._config.get('insecure', True)
if insecure:
cmdargs.append('--insecure')
starttime = datetime.datetime.utcnow()
output = subprocess.run(
cmdargs,
text=True,
capture_output=True,
)
endtime = datetime.datetime.utcnow()
murakami_output = {
'TestName': "ndt5",
'TestStartTime': starttime.strftime('%Y-%m-%dT%H:%M:%S.%f'),
'TestEndTime': endtime.strftime('%Y-%m-%dT%H:%M:%S.%f'),
'MurakamiLocation': self._location,
'MurakamiConnectionType': self._connection_type,
'MurakamiNetworkType': self._network_type,
'MurakamiDeviceID': self._device_id,
}
if output.returncode == 0:
# Parse ndt5 summary.
summary = {}
try:
summary = json.loads(output.stdout)
except json.JSONDecodeError:
raise RunnerError(
'ndt5-client',
'ndt5-client did not return a valid JSON summary.')
logger.info("ndt5 test completed successfully.")
# Parse ndt7-client-go's summary JSON and generate Murakami's
# output format.
download = summary.get('Download')
upload = summary.get('Upload')
retrans = summary.get('DownloadRetrans')
min_rtt = summary.get('MinRTT')
murakami_output['ServerName'] = summary.get('ServerFQDN')
murakami_output['ServerIP'] = summary.get('ServerIP')
murakami_output['ClientIP'] = summary.get('ClientIP')
murakami_output['DownloadUUID'] = summary.get('DownloadUUID')
if download is not None:
murakami_output['DownloadValue'] = download.get('Value')
murakami_output['DownloadUnit'] = download.get('Unit')
if upload is not None:
murakami_output['UploadValue'] = upload.get('Value')
murakami_output['UploadUnit'] = upload.get('Unit')
if retrans is not None:
murakami_output['DownloadRetransValue'] = retrans.get('Value')
murakami_output['DownloadRetransUnit'] = retrans.get('Unit')
if min_rtt is not None:
murakami_output['MinRTTValue'] = min_rtt.get('Value')
murakami_output['MinRTTUnit'] = min_rtt.get('Unit')
else:
logger.warn("ndt5 test completed with errors.")
# Consider any output as 'TestError'.
murakami_output['TestError'] = output.stdout
# All the other fields are set to None (which will become null
# in the JSON.)
murakami_output['ServerName'] = None
murakami_output['ServerIP'] = None
murakami_output['ClientIP'] = None
murakami_output['DownloadUUID'] = None
murakami_output['DownloadValue'] = None
murakami_output['DownloadUnit'] = None
murakami_output['UploadValue'] = None
murakami_output['UploadUnit'] = None
murakami_output['DownloadRetransValue'] = None
murakami_output['DownloadRetransUnit'] = None
murakami_output['MinRTTValue'] = None
murakami_output['MinRTTUnit'] = None
return json.dumps(murakami_output)
else:
raise RunnerError(
"ndt5-client",
"Executable ndt5-client does not exist, please install ndt5-client-go.",
)
|
[
"subprocess.run",
"json.loads",
"shutil.which",
"json.dumps",
"datetime.datetime.utcnow",
"logging.getLogger",
"murakami.errors.RunnerError"
] |
[((181, 208), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (198, 208), False, 'import logging\n'), ((822, 849), 'shutil.which', 'shutil.which', (['"""ndt5-client"""'], {}), "('ndt5-client')\n", (834, 849), False, 'import shutil\n'), ((1247, 1273), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1271, 1273), False, 'import datetime\n'), ((1295, 1350), 'subprocess.run', 'subprocess.run', (['cmdargs'], {'text': '(True)', 'capture_output': '(True)'}), '(cmdargs, text=True, capture_output=True)\n', (1309, 1350), False, 'import subprocess\n'), ((1436, 1462), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1460, 1462), False, 'import datetime\n'), ((4737, 4764), 'json.dumps', 'json.dumps', (['murakami_output'], {}), '(murakami_output)\n', (4747, 4764), False, 'import json\n'), ((4797, 4900), 'murakami.errors.RunnerError', 'RunnerError', (['"""ndt5-client"""', '"""Executable ndt5-client does not exist, please install ndt5-client-go."""'], {}), "('ndt5-client',\n 'Executable ndt5-client does not exist, please install ndt5-client-go.')\n", (4808, 4900), False, 'from murakami.errors import RunnerError\n'), ((2083, 2108), 'json.loads', 'json.loads', (['output.stdout'], {}), '(output.stdout)\n', (2093, 2108), False, 'import json\n'), ((2180, 2258), 'murakami.errors.RunnerError', 'RunnerError', (['"""ndt5-client"""', '"""ndt5-client did not return a valid JSON summary."""'], {}), "('ndt5-client', 'ndt5-client did not return a valid JSON summary.')\n", (2191, 2258), False, 'from murakami.errors import RunnerError\n')]
|
# -*- encoding:utf-8 -*-
from kscore.session import get_session
if __name__ == "__main__":
s = get_session()
client = s.create_client("kec", "cn-beijing-6", use_ssl=False)
# https://docs.ksyun.com/read/latest/52/_book/oaDescribeInstances.html
client.describe_instances()
# https://docs.ksyun.com/read/latest/52/_book/oaRunInstances.html
client.run_instances(
MaxCount=50, MinCount=20, ImageId="3f3bddcf-4982-4ab4-a63d-795e8d74e9d5",
SubnetId="f1bd236b-7fd3-44d3-aef9-2d673a65466e", InstancePassword="<PASSWORD>",
SecurityGroupId="2f43a9e4-1a3c-448e-b661-efa6d04b82fc", DataDiskGb=50, ChargeType="Monthly",
InstanceType="C1.1A", PurchaseTime=1, InstanceName="test", InstanceNameSuffix="1")
# https://docs.ksyun.com/read/latest/52/_book/oaTerminateInstances.html
instances = ["2f43a9e4-1a3c-448e-b661-efa6d04b82fc", "2f43a9e4-1a3c-448e-b661-efa6d04b82fc"]
instances = dict(("InstanceId.{}".format(index), instance) for index, instance in enumerate(instances, 1))
client.terminate_instances(**instances)
|
[
"kscore.session.get_session"
] |
[((101, 114), 'kscore.session.get_session', 'get_session', ([], {}), '()\n', (112, 114), False, 'from kscore.session import get_session\n')]
|
from unittest import TestCase
from numpy import sort
from pm4py.objects.petri import importer
from pm4py.objects.log.importer.xes import factory as xes_importer
from da4py.main.analytics.amstc import Amstc, samplingVariantsForAmstc
class TestAmstc(TestCase):
'''
This class aims at testing amstc.py file.
'''
net, m0, mf = importer.factory.apply("../../examples/medium/model2.pnml")
log = xes_importer.apply("../../examples/medium/model2.xes")
def testSamplingVariantsForAmstcDistanceZero(self):
'''
Test classical clustering of Generalized Alignment-based Trace Clustering
'''
sampleSize=9
sizeOfRun=8
maxD=0
maxNbC=5
m=2
clustering=samplingVariantsForAmstc(self.net,self.m0,self.mf,self.log,\
sampleSize,sizeOfRun,maxD,maxNbC,m,maxCounter=1,silent_label="tau")
assert len(clustering)==4
size_of_clusters=sort([len(list) for (centroid,list) in clustering ])
assert ([2,2,2,3]==size_of_clusters).all()
def testSamplingVariantsForAmstcDistanceTwo(self):
'''
Test other parameters
:return:
'''
sampleSize=9
sizeOfRun=8
maxD=2
maxNbC=5
m=2
clustering=samplingVariantsForAmstc(self.net,self.m0,self.mf,self.log, \
sampleSize,sizeOfRun,maxD,maxNbC,m,maxCounter=1,silent_label="tau")
assert len(clustering)==3
size_of_clusters=sort([len(list) for (centroid,list) in clustering ])
assert ([1,3,5]==size_of_clusters).all()
|
[
"da4py.main.analytics.amstc.samplingVariantsForAmstc",
"pm4py.objects.petri.importer.factory.apply",
"pm4py.objects.log.importer.xes.factory.apply"
] |
[((343, 402), 'pm4py.objects.petri.importer.factory.apply', 'importer.factory.apply', (['"""../../examples/medium/model2.pnml"""'], {}), "('../../examples/medium/model2.pnml')\n", (365, 402), False, 'from pm4py.objects.petri import importer\n'), ((413, 467), 'pm4py.objects.log.importer.xes.factory.apply', 'xes_importer.apply', (['"""../../examples/medium/model2.xes"""'], {}), "('../../examples/medium/model2.xes')\n", (431, 467), True, 'from pm4py.objects.log.importer.xes import factory as xes_importer\n'), ((735, 875), 'da4py.main.analytics.amstc.samplingVariantsForAmstc', 'samplingVariantsForAmstc', (['self.net', 'self.m0', 'self.mf', 'self.log', 'sampleSize', 'sizeOfRun', 'maxD', 'maxNbC', 'm'], {'maxCounter': '(1)', 'silent_label': '"""tau"""'}), "(self.net, self.m0, self.mf, self.log, sampleSize,\n sizeOfRun, maxD, maxNbC, m, maxCounter=1, silent_label='tau')\n", (759, 875), False, 'from da4py.main.analytics.amstc import Amstc, samplingVariantsForAmstc\n'), ((1303, 1443), 'da4py.main.analytics.amstc.samplingVariantsForAmstc', 'samplingVariantsForAmstc', (['self.net', 'self.m0', 'self.mf', 'self.log', 'sampleSize', 'sizeOfRun', 'maxD', 'maxNbC', 'm'], {'maxCounter': '(1)', 'silent_label': '"""tau"""'}), "(self.net, self.m0, self.mf, self.log, sampleSize,\n sizeOfRun, maxD, maxNbC, m, maxCounter=1, silent_label='tau')\n", (1327, 1443), False, 'from da4py.main.analytics.amstc import Amstc, samplingVariantsForAmstc\n')]
|
import sys
import os
import arginfer
import argparse
import logging
import msprime
from arginfer.mcmc import *
from arginfer.plots import *
# import comparison.plot
'''
command line interface for arginfer
'''
logger = logging.getLogger(__name__)
log_format = "%(asctime)s %(levelname)s %(message)s"
def error_exit(message):
"""
Exit with the specified error message, setting error status.
"""
sys.exit("{}: {}".format(sys.argv[0], message))
def setup_logging(args):
log_level = "WARN"
if args.verbose:
log_level = "DEBUG"#"INFO"
logging.basicConfig(level=log_level, format=log_format)
def arginfer_cli_parser():
high_parser = argparse.ArgumentParser(prog="arginfer",
description="This is the command line interface for arginfer, "
"a probabilistic method to infer the Ancestral Recombination Graph.")
high_parser.add_argument(
"-V", "--version", action="version", version=f"%(prog)s {arginfer.__version__}")
subparsers = high_parser.add_subparsers(dest="subcommand")
subparsers.required = True
parser = subparsers.add_parser(
"infer",
help=(
"Takes the data or the ARG in tree sequence full_ARG format and "
"returns MCMC sampled ARGs."
),
)
parser.add_argument('--tsfull', type=argparse.FileType('r', encoding='UTF-8'), default=None,
help='an msprime .args file.'
' If None, build an ARG from haplotype data')
parser.add_argument('--input_path',type=str,
default=os.getcwd()+"/data", help='The path to input data, '
'this is the path to haplotype, ancestral allele, and snp_pos ')
parser.add_argument('--haplotype_name' , type = str,
default= None, help='the haplotype file name',#"haplotype_ready.txt"
required=False)
parser.add_argument('--ancAllele_name' , type = str,
default= "ancestral_allele_ready.txt",
help='a txt file of ancestral allele for each snp',
required=False)
parser.add_argument('--snpPos_name' , type = str,
default= "ancestral_allele_ready.txt",
help='a txt file of SNP chrom position',
required=False)
parser.add_argument('--iteration','-I', type=int, default=20,
help= 'the number of mcmc iterations')
parser.add_argument('--thin', type=int, default= 10, help=' thining steps')
parser.add_argument('--burn', '-b', type=int, default= 0, help=' The burn-in')
parser.add_argument('--sample_size', '-n', type=int, default= 5, help=' sample size')
parser.add_argument('--seq_length','-L', type=float, default=1e4,help='sequence length')
parser.add_argument('--Ne', type=int, default= 5000, help=' effective population size')
parser.add_argument('--recombination_rate', '-r', type=float, default=1e-8,
help=' the recombination rate per site per generation ')
parser.add_argument('--mutation_rate', '-mu', type=float, default=1e-8,
help='the mutation rate per site per generation')
parser.add_argument('--outpath', '-O',type=str,
default=os.getcwd()+"/output", help='The output path')
parser.add_argument( '-p','--plot', help="plot the output", action="store_true")
parser.add_argument("--random-seed", "-s", type = int, default=1)
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
parser.add_argument( "--verify", help="verify the output ARG", action="store_true")
parser.set_defaults(runner=run_mcmc)
#if you need any other subparsers, they are added here
return high_parser
def run_mcmc(args):
input_data_path = args.input_path
haplotype_data_name = args.haplotype_name
ancAllele_data_name = args.ancAllele_name
snpPos_data_name= args.snpPos_name
iteration = args.iteration
thin = args.thin
burn = args.burn
n = args.sample_size
seq_length = args.seq_length
mu = args.mutation_rate
r= args.recombination_rate
Ne= args.Ne
outpath = args.outpath
tsfull = None
if args.tsfull !=None:#else real data
try:
tsfull = msprime.load(args.tsfull.name) #trees is a fh
except AttributeError:
tsfull = msprime.load(args.tsfull)
# random.seed(args.random_seed)
# np.random.seed(args.random_seed+1)
mcmc = MCMC(tsfull, n, Ne, seq_length, mu, r,
input_data_path,
haplotype_data_name,
ancAllele_data_name,
snpPos_data_name, outpath, args.verbose)
mcmc.run(iteration, thin, burn, args.verify)
if args.plot:
# p= comparison.plot.Trace(outpath, name= "summary")
p= Trace(outpath)
p.arginfer_trace()
# if args.plot:
# p = plot_summary(outpath)
# p.plot()
if args.verbose:
mcmc.print_state()
def arginfer_main(arg_list=None):
parser = arginfer_cli_parser()
args = parser.parse_args(arg_list)
setup_logging(args)
args.runner(args)
# run_mcmc(args)
# if __name__=='__main__':
# arginfer_main()
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"os.getcwd",
"msprime.load",
"argparse.FileType",
"logging.getLogger"
] |
[((218, 245), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (235, 245), False, 'import logging\n'), ((566, 621), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'log_level', 'format': 'log_format'}), '(level=log_level, format=log_format)\n', (585, 621), False, 'import logging\n'), ((668, 849), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""arginfer"""', 'description': '"""This is the command line interface for arginfer, a probabilistic method to infer the Ancestral Recombination Graph."""'}), "(prog='arginfer', description=\n 'This is the command line interface for arginfer, a probabilistic method to infer the Ancestral Recombination Graph.'\n )\n", (691, 849), False, 'import argparse\n'), ((1329, 1369), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {'encoding': '"""UTF-8"""'}), "('r', encoding='UTF-8')\n", (1346, 1369), False, 'import argparse\n'), ((4427, 4457), 'msprime.load', 'msprime.load', (['args.tsfull.name'], {}), '(args.tsfull.name)\n', (4439, 4457), False, 'import msprime\n'), ((1635, 1646), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1644, 1646), False, 'import os\n'), ((3399, 3410), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3408, 3410), False, 'import os\n'), ((4525, 4550), 'msprime.load', 'msprime.load', (['args.tsfull'], {}), '(args.tsfull)\n', (4537, 4550), False, 'import msprime\n')]
|
# Generated by Django 3.2 on 2021-04-24 03:25
from django.db import migrations, models # pragma: no cover
import django.db.models.deletion # pragma: no cover
class Migration(migrations.Migration): # pragma: no cover
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Userprofile',
fields=[
('user_id', models.CharField(max_length=40, primary_key=True, serialize=False)),
('user_name', models.CharField(default='DATAHACK', max_length=30)),
('email_address', models.CharField(default='', max_length=200)),
('short_tax_rate', models.FloatField(default=0.0)),
('long_tax_rate', models.FloatField(default=0.0)),
('invest_horizon', models.FloatField(default=0.0)),
],
),
migrations.CreateModel(
name='Stock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=30)),
('name', models.CharField(max_length=50)),
('purchase_price', models.FloatField(default=-1.0)),
('target_price', models.FloatField(default=0.0)),
('expect_return_rate', models.FloatField(default=0.0)),
('purchase_date', models.CharField(max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stocks', to='api.userprofile')),
],
),
]
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.FloatField",
"django.db.models.AutoField"
] |
[((413, 479), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'primary_key': '(True)', 'serialize': '(False)'}), '(max_length=40, primary_key=True, serialize=False)\n', (429, 479), False, 'from django.db import migrations, models\n'), ((512, 563), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""DATAHACK"""', 'max_length': '(30)'}), "(default='DATAHACK', max_length=30)\n", (528, 563), False, 'from django.db import migrations, models\n'), ((600, 644), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(200)'}), "(default='', max_length=200)\n", (616, 644), False, 'from django.db import migrations, models\n'), ((682, 712), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (699, 712), False, 'from django.db import migrations, models\n'), ((749, 779), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (766, 779), False, 'from django.db import migrations, models\n'), ((817, 847), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (834, 847), False, 'from django.db import migrations, models\n'), ((978, 1071), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (994, 1071), False, 'from django.db import migrations, models\n'), ((1095, 1126), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (1111, 1126), False, 'from django.db import migrations, models\n'), ((1154, 1185), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1170, 1185), False, 'from django.db import migrations, models\n'), ((1223, 1254), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(-1.0)'}), '(default=-1.0)\n', (1240, 1254), False, 'from django.db import migrations, models\n'), ((1290, 1320), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (1307, 1320), False, 'from django.db import migrations, models\n'), ((1362, 1392), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (1379, 1392), False, 'from django.db import migrations, models\n'), ((1429, 1461), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1445, 1461), False, 'from django.db import migrations, models\n'), ((1489, 1601), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""stocks"""', 'to': '"""api.userprofile"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='stocks', to='api.userprofile')\n", (1506, 1601), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2019-2020 Arthur
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from datetime import datetime
from utilsx.console import Prettier, Colors
d = Colors.default.value
r = Colors.red.value
lr = Colors.light_red.value
b = Colors.blue.value
lb = Colors.light_blue.value
y = Colors.yellow.value
ly = Colors.light_yellow.value
# Handles most console messages.
class PrintHandler:
def __init__(self, prettier: Prettier, log: bool):
self.log = log
self.prettier = prettier
self.info_prefix = f"\b{b}[{lb}INFO{b}]{d} "
self.warning_prefix = f"\b{y}[{ly}WARN{y}]{d} "
self.fatal_prefix = f"\b{r}[{lr}FATAL{r}]{d} "
def printf(self, message: str) -> None:
"""
Format prints a message to the console.
(date + message)
:param message: The message that must be printed.
"""
self.prettier.print(message + d, datetime.now())
def info(self, message: str) -> None:
"""
Sends a message with the INFO prefix.
:param message: The message that must be printed.
"""
if self.log:
self.printf(self.info_prefix + message)
def warn(self, message: str) -> None:
"""
Sends a message with the WARN prefix.
:param message: The message that must be printed.
"""
if self.log:
self.printf(self.warning_prefix + message)
def fatal(self, message: str) -> None:
"""
Sends a message with the FATAL prefix.
:param message: The message that must be printed.
"""
if self.log:
self.printf(self.fatal_prefix + message)
|
[
"datetime.datetime.now"
] |
[((1931, 1945), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1943, 1945), False, 'from datetime import datetime\n')]
|
import argparse
import time
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms, models
from models import *
from diagnostics import do_diagnostics
# TODOs
# Fix hyperparameters to match previous literature
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help="enable gpu training and inference",
action="store_true", default=True)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--res', help="enable residual connections",
action="store_true", default=False)
parser.add_argument('--n_res', type=int, default=1) ## Number of layers
parser.add_argument('--p_ber', type=float, default=0.1)
##
parser.add_argument('--a1', type=float, default=10.0) #Parameter of first gamma
parser.add_argument('--a2', type=float, default=10.0) #Parameter of second gamma
parser.add_argument('--l2', type=float, default=1e-4)
#parser.add_argument('-s', '--samples', type=int, default=1)
parser.add_argument('--hid_dim', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--seed', type=int, default=0)
ds = ['mnist', 'cifar']
parser.add_argument('--dataset', choices=ds, default='mnist')
noises = ['none', 'bernoulli', 'cumulative_bern', 'decay_gauss', 'addexp', 'addgamm', 'cumgamm']
parser.add_argument('--noise', choices=noises, default='none')
args = parser.parse_args()
# Set the random seed, so the experiment is reproducible
torch.manual_seed(args.seed)
# For the moment, we will just train on CPU, so no cuda
use_cuda = args.gpu
device = torch.device("cuda" if use_cuda else "cpu")
def train(model, device, train_loader, optimizer, epoch, train_losses, criterion):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
#loss = F.nll_loss(output, target)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
def test(model, device, test_loader, criterion):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# sum up batch loss
#test_loss += F.nll_loss(output, target, reduction='sum').item()
test_loss += criterion(output, target).item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
if args.dataset == 'mnist':
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, num_workers=2)
# The size of the input. MNIST are greyscale images, 28x28 pixels each
in_size = 28*28
out_dim = 10
elif args.dataset == 'cifar':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = datasets.CIFAR10(root='../data', train=True,
download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, num_workers=2)
testset = datasets.CIFAR10(root='../data', train=False,
download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size,
shuffle=False, num_workers=2)
in_size = 32*32*3
out_dim = 10
if args.noise == 'none':
def dropout(x, context): return x
elif args.noise == 'bernoulli':
dropout = Dropout(p=args.p_ber).to(device)
elif args.noise == 'cumulative_bern':
dropout = CumulativeDropout().to(device)
elif args.noise == 'addexp':
dropout = GammaProcesses('exp', args.a1, args.a2, args.n_res)
elif args.noise == 'addgamm':
dropout = GammaProcesses('add', args.a1, args.a2, args.n_res)
elif args.noise == 'cumgamm':
dropout = GammaProcesses('mul', args.a1, args.a2, args.n_res)
elif args.noise == 'decay_gauss':
dropout = ExpDecayGauss().to(device)
model = MLP(in_size, out_dim, args.hid_dim, dropout, args).to(device)
model = models.resnet18(pretrained=False).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[150, 250], gamma=0.1)
print(model)
training_losses = []
for epoch in range(1, args.epochs + 1):
t0 = time.time()
train(model, device, train_loader, optimizer, epoch, training_losses, criterion)
t1 = time.time()
print('Epoch ', epoch, '\tdt = ', t1 - t0)
test(model, device, test_loader, criterion)
scheduler.step()
do_diagnostics(model, args)
|
[
"torchvision.models.resnet18",
"argparse.ArgumentParser",
"diagnostics.do_diagnostics",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomHorizontalFlip",
"torch.manual_seed",
"torch.nn.CrossEntropyLoss",
"time.time",
"torchvision.datasets.CIFAR10",
"torchvision.transforms.ToTensor",
"torch.device",
"torchvision.transforms.Normalize",
"torch.no_grad",
"torchvision.transforms.RandomCrop",
"torch.optim.lr_scheduler.MultiStepLR"
] |
[((340, 365), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (363, 365), False, 'import argparse\n'), ((1695, 1723), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1712, 1723), False, 'import torch\n'), ((1809, 1852), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (1821, 1852), False, 'import torch\n'), ((6048, 6069), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6067, 6069), True, 'import torch.nn as nn\n'), ((6159, 6234), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': '[150, 250]', 'gamma': '(0.1)'}), '(optimizer, milestones=[150, 250], gamma=0.1)\n', (6189, 6234), True, 'import torch.optim as optim\n'), ((6558, 6585), 'diagnostics.do_diagnostics', 'do_diagnostics', (['model', 'args'], {}), '(model, args)\n', (6572, 6585), False, 'from diagnostics import do_diagnostics\n'), ((6321, 6332), 'time.time', 'time.time', ([], {}), '()\n', (6330, 6332), False, 'import time\n'), ((6427, 6438), 'time.time', 'time.time', ([], {}), '()\n', (6436, 6438), False, 'import time\n'), ((2674, 2689), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2687, 2689), False, 'import torch\n'), ((4682, 4773), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': '"""../data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='../data', train=True, download=True, transform=\n transform_train)\n", (4698, 4773), False, 'from torchvision import datasets, transforms, models\n'), ((4832, 4931), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(2)'}), '(trainset, batch_size=args.batch_size, shuffle=\n True, num_workers=2)\n', (4859, 4931), False, 'import torch\n'), ((4986, 5077), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': '"""../data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='../data', train=False, download=True, transform=\n transform_test)\n", (5002, 5077), False, 'from torchvision import datasets, transforms, models\n'), ((5131, 5234), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(testset, batch_size=args.test_batch_size,\n shuffle=False, num_workers=2)\n', (5158, 5234), False, 'import torch\n'), ((5990, 6023), 'torchvision.models.resnet18', 'models.resnet18', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (6005, 6023), False, 'from torchvision import datasets, transforms, models\n'), ((4302, 4338), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (4323, 4338), False, 'from torchvision import datasets, transforms, models\n'), ((4348, 4381), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4379, 4381), False, 'from torchvision import datasets, transforms, models\n'), ((4391, 4412), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4410, 4412), False, 'from torchvision import datasets, transforms, models\n'), ((4422, 4493), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (4442, 4493), False, 'from torchvision import datasets, transforms, models\n'), ((4554, 4575), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4573, 4575), False, 'from torchvision import datasets, transforms, models\n'), ((4585, 4656), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (4605, 4656), False, 'from torchvision import datasets, transforms, models\n'), ((3622, 3643), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3641, 3643), False, 'from torchvision import datasets, transforms, models\n'), ((3672, 3714), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (3692, 3714), False, 'from torchvision import datasets, transforms, models\n'), ((3946, 3967), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3965, 3967), False, 'from torchvision import datasets, transforms, models\n'), ((3981, 4023), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (4001, 4023), False, 'from torchvision import datasets, transforms, models\n')]
|
import os
from pip.req import parse_requirements
from setuptools import find_packages, setup
from typing import List
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def long_description() -> str:
path = os.path.join(BASE_DIR, 'README.rst')
with open(path, 'r') as f:
long_description = f.read()
return long_description
def requirements() -> List[str]:
path = os.path.join(BASE_DIR, 'requirements', 'production.txt')
return [str(r.req) for r in parse_requirements(path, session=False)]
setup(
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
],
description='',
install_requires=requirements(),
keywords='rust',
license='MIT',
long_description=long_description(),
name='rustypy',
packages=find_packages(exclude=['tests', '*.tests', '*.tests.*']),
test_suite='tests',
url='https://www.github.com/bcmyers/rust_extension',
version='0.1.0',
)
|
[
"os.path.abspath",
"os.path.join",
"setuptools.find_packages",
"pip.req.parse_requirements"
] |
[((145, 170), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (160, 170), False, 'import os\n'), ((216, 252), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""README.rst"""'], {}), "(BASE_DIR, 'README.rst')\n", (228, 252), False, 'import os\n'), ((394, 450), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""requirements"""', '"""production.txt"""'], {}), "(BASE_DIR, 'requirements', 'production.txt')\n", (406, 450), False, 'import os\n'), ((1109, 1165), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests', '*.tests', '*.tests.*']"}), "(exclude=['tests', '*.tests', '*.tests.*'])\n", (1122, 1165), False, 'from setuptools import find_packages, setup\n'), ((483, 522), 'pip.req.parse_requirements', 'parse_requirements', (['path'], {'session': '(False)'}), '(path, session=False)\n', (501, 522), False, 'from pip.req import parse_requirements\n')]
|
from flask import Flask
app = Flask(__name__)
@app.route('/<name>')
def hello_name(name):
return 'Hello %s!' % name
@app.route('/<int:postID>')
def show_blog(postID):
return 'Blog Number %d' % postID
@app.route('/<float:revNo>')
def revision(revNo):
return 'Revision Number %f' % revNo
if __name__ == '__main__':
app.run(debug=True)
|
[
"flask.Flask"
] |
[((31, 46), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (36, 46), False, 'from flask import Flask\n')]
|
import unittest
import itertools
import ir_measures
class TestPytrecEval(unittest.TestCase):
def test_nDCG(self):
qrels = list(ir_measures.read_trec_qrels('''
0 0 D0 0
0 0 D1 1
0 0 D2 1
0 0 D3 2
0 0 D4 0
1 0 D0 1
1 0 D3 2
1 0 D5 2
'''))
run = list(ir_measures.read_trec_run('''
0 0 D0 1 0.8 run
0 0 D2 2 0.7 run
0 0 D1 3 0.3 run
0 0 D3 4 0.4 run
0 0 D4 5 0.1 run
1 0 D1 1 0.8 run
1 0 D3 2 0.7 run
1 0 D4 3 0.3 run
1 0 D2 4 0.4 run
'''))
provider = ir_measures.gdeval
measure = ir_measures.nDCG@20
result = list(provider.iter_calc([measure], qrels, run))
self.assertEqual(result[0].query_id, "0")
self.assertEqual(result[0].value, 0.6201)
self.assertEqual(result[1].query_id, "1")
self.assertEqual(result[1].value, 0.35099)
self.assertEqual(provider.calc_aggregate([measure], qrels, run)[measure], 0.485545)
self.assertEqual(provider.evaluator([measure], qrels).calc_aggregate(run)[measure], 0.485545)
measure = ir_measures.nDCG@2
result = list(provider.iter_calc([measure], qrels, run))
self.assertEqual(result[0].query_id, "0")
self.assertEqual(result[0].value, 0.17377)
self.assertEqual(result[1].query_id, "1")
self.assertEqual(result[1].value, 0.38685)
self.assertEqual(provider.calc_aggregate([measure], qrels, run)[measure], 0.28031)
ev = provider.evaluator([ir_measures.nDCG@20, ir_measures.nDCG@2], qrels)
res = ev.calc_aggregate(run)
self.assertEqual(res[ir_measures.nDCG@20], 0.485545)
self.assertEqual(res[ir_measures.nDCG@2], 0.28031)
res = ev.calc_aggregate(run)
self.assertEqual(res[ir_measures.nDCG@20], 0.485545)
self.assertEqual(res[ir_measures.nDCG@2], 0.28031)
def test_ERR(self):
qrels = list(ir_measures.read_trec_qrels('''
0 0 D0 0
0 0 D1 1
0 0 D2 1
0 0 D3 2
0 0 D4 0
1 0 D0 1
1 0 D3 2
1 0 D5 2
'''))
run = list(ir_measures.read_trec_run('''
0 0 D0 1 0.8 run
0 0 D2 2 0.7 run
0 0 D1 3 0.3 run
0 0 D3 4 0.4 run
0 0 D4 5 0.1 run
1 0 D1 1 0.8 run
1 0 D3 2 0.7 run
1 0 D4 3 0.3 run
1 0 D2 4 0.4 run
'''))
provider = ir_measures.gdeval
measure = ir_measures.ERR@20
result = list(provider.iter_calc([measure], qrels, run))
self.assertEqual(result[0].query_id, "0")
self.assertEqual(result[0].value, 0.10175)
self.assertEqual(result[1].query_id, "1")
self.assertEqual(result[1].value, 0.09375)
self.assertEqual(provider.calc_aggregate([measure], qrels, run)[measure], 0.09775)
measure = ir_measures.ERR@2
result = list(provider.iter_calc([measure], qrels, run))
self.assertEqual(result[0].query_id, "0")
self.assertEqual(result[0].value, 0.03125)
self.assertEqual(result[1].query_id, "1")
self.assertEqual(result[1].value, 0.09375)
self.assertEqual(provider.calc_aggregate([measure], qrels, run)[measure], 0.0625)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"ir_measures.read_trec_run",
"ir_measures.read_trec_qrels"
] |
[((3014, 3029), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3027, 3029), False, 'import unittest\n'), ((142, 260), 'ir_measures.read_trec_qrels', 'ir_measures.read_trec_qrels', (['"""\n0 0 D0 0\n0 0 D1 1\n0 0 D2 1\n0 0 D3 2\n0 0 D4 0\n1 0 D0 1\n1 0 D3 2\n1 0 D5 2\n"""'], {}), '(\n """\n0 0 D0 0\n0 0 D1 1\n0 0 D2 1\n0 0 D3 2\n0 0 D4 0\n1 0 D0 1\n1 0 D3 2\n1 0 D5 2\n"""\n )\n', (169, 260), False, 'import ir_measures\n'), ((271, 468), 'ir_measures.read_trec_run', 'ir_measures.read_trec_run', (['"""\n0 0 D0 1 0.8 run\n0 0 D2 2 0.7 run\n0 0 D1 3 0.3 run\n0 0 D3 4 0.4 run\n0 0 D4 5 0.1 run\n1 0 D1 1 0.8 run\n1 0 D3 2 0.7 run\n1 0 D4 3 0.3 run\n1 0 D2 4 0.4 run\n"""'], {}), '(\n """\n0 0 D0 1 0.8 run\n0 0 D2 2 0.7 run\n0 0 D1 3 0.3 run\n0 0 D3 4 0.4 run\n0 0 D4 5 0.1 run\n1 0 D1 1 0.8 run\n1 0 D3 2 0.7 run\n1 0 D4 3 0.3 run\n1 0 D2 4 0.4 run\n"""\n )\n', (296, 468), False, 'import ir_measures\n'), ((1835, 1953), 'ir_measures.read_trec_qrels', 'ir_measures.read_trec_qrels', (['"""\n0 0 D0 0\n0 0 D1 1\n0 0 D2 1\n0 0 D3 2\n0 0 D4 0\n1 0 D0 1\n1 0 D3 2\n1 0 D5 2\n"""'], {}), '(\n """\n0 0 D0 0\n0 0 D1 1\n0 0 D2 1\n0 0 D3 2\n0 0 D4 0\n1 0 D0 1\n1 0 D3 2\n1 0 D5 2\n"""\n )\n', (1862, 1953), False, 'import ir_measures\n'), ((1964, 2161), 'ir_measures.read_trec_run', 'ir_measures.read_trec_run', (['"""\n0 0 D0 1 0.8 run\n0 0 D2 2 0.7 run\n0 0 D1 3 0.3 run\n0 0 D3 4 0.4 run\n0 0 D4 5 0.1 run\n1 0 D1 1 0.8 run\n1 0 D3 2 0.7 run\n1 0 D4 3 0.3 run\n1 0 D2 4 0.4 run\n"""'], {}), '(\n """\n0 0 D0 1 0.8 run\n0 0 D2 2 0.7 run\n0 0 D1 3 0.3 run\n0 0 D3 4 0.4 run\n0 0 D4 5 0.1 run\n1 0 D1 1 0.8 run\n1 0 D3 2 0.7 run\n1 0 D4 3 0.3 run\n1 0 D2 4 0.4 run\n"""\n )\n', (1989, 2161), False, 'import ir_measures\n')]
|
# pyOCD debugger
# Copyright (c) 2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cortex_m import CortexM
from ..core import exceptions
import logging
LOG = logging.getLogger(__name__)
# pylint: disable=invalid_name
# CPUID PARTNO values
ARM_CortexM23 = 0xD20
ARM_CortexM33 = 0xD21
ARM_CortexM35P = 0xD22
# pylint: enable=invalid_name
# User-friendly names for core types.
CORE_TYPE_NAME = {
ARM_CortexM23 : "Cortex-M23",
ARM_CortexM33 : "Cortex-M33",
ARM_CortexM35P : "Cortex-M35P",
}
class CortexM_v8M(CortexM):
"""! @brief Component class for a v8-M architecture Cortex-M core."""
ARMv8M_BASE = 0xC
ARMv8M_MAIN = 0xF
# Processor Feature Register 1
PFR1 = 0xE000ED44
PFR1_SECURITY_MASK = 0x000000f0
PFR1_SECURITY_SHIFT = 4
def __init__(self, rootTarget, ap, memoryMap=None, core_num=0, cmpid=None, address=None):
super(CortexM_v8M, self).__init__(rootTarget, ap, memoryMap, core_num, cmpid, address)
# Only v7-M supports VECTRESET.
self._supports_vectreset = False
def _read_core_type(self):
"""! @brief Read the CPUID register and determine core type and architecture."""
# Read CPUID register
cpuid = self.read32(CortexM.CPUID)
implementer = (cpuid & CortexM.CPUID_IMPLEMENTER_MASK) >> CortexM.CPUID_IMPLEMENTER_POS
if implementer != CortexM.CPUID_IMPLEMENTER_ARM:
LOG.warning("CPU implementer is not ARM!")
self.arch = (cpuid & CortexM.CPUID_ARCHITECTURE_MASK) >> CortexM.CPUID_ARCHITECTURE_POS
self.core_type = (cpuid & CortexM.CPUID_PARTNO_MASK) >> CortexM.CPUID_PARTNO_POS
self.cpu_revision = (cpuid & CortexM.CPUID_VARIANT_MASK) >> CortexM.CPUID_VARIANT_POS
self.cpu_patch = (cpuid & CortexM.CPUID_REVISION_MASK) >> CortexM.CPUID_REVISION_POS
pfr1 = self.read32(self.PFR1)
self.has_security_extension = ((pfr1 & self.PFR1_SECURITY_MASK) >> self.PFR1_SECURITY_SHIFT) == 1
if self.core_type in CORE_TYPE_NAME:
if self.has_security_extension:
LOG.info("CPU core #%d is %s r%dp%d (security ext present)", self.core_number, CORE_TYPE_NAME[self.core_type], self.cpu_revision, self.cpu_patch)
else:
LOG.info("CPU core #%d is %s r%dp%d", self.core_number, CORE_TYPE_NAME[self.core_type], self.cpu_revision, self.cpu_patch)
else:
LOG.warning("CPU core #%d type is unrecognized", self.core_number)
|
[
"logging.getLogger"
] |
[((717, 744), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (734, 744), False, 'import logging\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-09 20:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('query_processor', '0019_auto_20160709_1732'),
]
operations = [
migrations.RemoveField(
model_name='platformresponse',
name='request',
),
migrations.DeleteModel(
name='PlatformRequest',
),
migrations.DeleteModel(
name='PlatformResponse',
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.migrations.DeleteModel"
] |
[((299, 368), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""platformresponse"""', 'name': '"""request"""'}), "(model_name='platformresponse', name='request')\n", (321, 368), False, 'from django.db import migrations\n'), ((413, 459), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""PlatformRequest"""'}), "(name='PlatformRequest')\n", (435, 459), False, 'from django.db import migrations\n'), ((492, 539), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""PlatformResponse"""'}), "(name='PlatformResponse')\n", (514, 539), False, 'from django.db import migrations\n')]
|
# Source from Codecademy
from tree import build_tree, print_tree, car_data, car_labels, classify
import random
random.seed(4)
# The features are the price of the car, the cost of maintenance, the number of doors, the number of people the car can hold, the size of the trunk, and the safety rating
unlabeled_point = ['high', 'vhigh', '3', 'more', 'med', 'med']
predictions = []
for i in range(20):
indices = [random.randint(0, 999) for i in range(1000)]
data_subset = [car_data[index] for index in indices]
labels_subset = [car_labels[index] for index in indices]
subset_tree = build_tree(data_subset, labels_subset)
result = classify(unlabeled_point, subset_tree)
predictions.append(result)
print(predictions)
final_prediction = max(predictions, key=predictions.count)
print(final_prediction)
|
[
"tree.build_tree",
"random.seed",
"random.randint",
"tree.classify"
] |
[((111, 125), 'random.seed', 'random.seed', (['(4)'], {}), '(4)\n', (122, 125), False, 'import random\n'), ((587, 625), 'tree.build_tree', 'build_tree', (['data_subset', 'labels_subset'], {}), '(data_subset, labels_subset)\n', (597, 625), False, 'from tree import build_tree, print_tree, car_data, car_labels, classify\n'), ((637, 675), 'tree.classify', 'classify', (['unlabeled_point', 'subset_tree'], {}), '(unlabeled_point, subset_tree)\n', (645, 675), False, 'from tree import build_tree, print_tree, car_data, car_labels, classify\n'), ((412, 434), 'random.randint', 'random.randint', (['(0)', '(999)'], {}), '(0, 999)\n', (426, 434), False, 'import random\n')]
|
import requests
from bs4 import BeautifulSoup
import pandas
data = requests.get("https://www.imdb.com/chart/toptv/?ref_=nv_tvv_250",
headers={"Accept-language": "en-US"})
soup = BeautifulSoup(data.text, "html.parser")
tbl = soup.find("table", {"class": "chart full-width"})
tbody = tbl.find("tbody")
ranks = [tr.select(".titleColumn")[0].get_text().split("\n")[1].strip()
for tr in tbody.find_all("tr")]
titles = [tr.select(".titleColumn a")[0].get_text() for tr in tbody.find_all("tr")]
released = [tr.select(".titleColumn .secondaryInfo")[0].get_text().strip("()")
for tr in tbody.find_all("tr")]
ratings = [tr.select(".ratingColumn strong")[0].get_text() for tr in tbody.find_all("tr")]
votes = [tr.find("td", {"class": "ratingColumn"}).strong.get("title").split()[3]
for tr in tbody.find_all("tr")]
series = pandas.DataFrame({
"Rank": ranks,
"Title": titles,
"Year": released,
"Rating": ratings,
"Votes": votes
}, index=ranks)
print("\n", series)
series.to_excel("top_rated_series.xlsx", index=False)
|
[
"bs4.BeautifulSoup",
"pandas.DataFrame",
"requests.get"
] |
[((68, 176), 'requests.get', 'requests.get', (['"""https://www.imdb.com/chart/toptv/?ref_=nv_tvv_250"""'], {'headers': "{'Accept-language': 'en-US'}"}), "('https://www.imdb.com/chart/toptv/?ref_=nv_tvv_250', headers={\n 'Accept-language': 'en-US'})\n", (80, 176), False, 'import requests\n'), ((199, 238), 'bs4.BeautifulSoup', 'BeautifulSoup', (['data.text', '"""html.parser"""'], {}), "(data.text, 'html.parser')\n", (212, 238), False, 'from bs4 import BeautifulSoup\n'), ((866, 986), 'pandas.DataFrame', 'pandas.DataFrame', (["{'Rank': ranks, 'Title': titles, 'Year': released, 'Rating': ratings,\n 'Votes': votes}"], {'index': 'ranks'}), "({'Rank': ranks, 'Title': titles, 'Year': released,\n 'Rating': ratings, 'Votes': votes}, index=ranks)\n", (882, 986), False, 'import pandas\n')]
|
#
# Author : <NAME>
# Copyright (c) 2020 <NAME>. All rights reserved.
# Licensed under the MIT License. See LICENSE file in the project root for full license information.
#
#
# Test function helpers.
#
import numpy as np
def constantode(t,x):
"""Function containing a constant ODE x' = 1.
"""
xprime = np.empty([1], float);
xprime[0] = 1;
return xprime;
def constantodeJ(t, x):
"""Function containing the Jacobian of constantode.
"""
df = np.empty([1,1], float);
df[0,0] = 0;
return df;
def stableode(t,x):
"""Function containing the ODE x' = -x.
"""
xprime = np.empty([1], float);
xprime[0] = -x[0];
return xprime;
def stableodeJ(t, x):
"""Function containing the Jacobian of stableode.
"""
df = np.empty([1,1], float);
df[0,0] = -1;
return df;
def multivariableode(t,x):
"""Function containing the ODE x_1' = -x_1 + x_2
x_2' = -x_2 .
"""
xprime = np.empty([2], float);
xprime[0] = -x[0] + x[1];
xprime[1] = -x[1];
return xprime;
def multivariableodeJ(t, x):
"""Function containing the Jacobian of multivariableode.
"""
df = np.empty([2,2], float);
df[0,0] = -1;
df[0,1] = +1;
df[1,0] = 0;
df[1,1] = -1;
return df;
def stiffode(t, x):
"""Function containing the stiff ODE x_1' = -x_1
x_2' = -100(x_2 - sin(t)) + cos(t).
"""
xprime = np.empty([2], float);
xprime[0] = -x[0];
xprime[1] = -100*(x[1] - np.sin(t)) + np.cos(t);
return xprime;
def stiffodeJ(t, x):
"""Function containing the Jacobian of stiffode.
"""
df = np.empty([2,2], float);
df[0,0] = -1;
df[0,1] = 0;
df[1,0] = 0;
df[1,1] = -100;
return df;
|
[
"numpy.empty",
"numpy.sin",
"numpy.cos"
] |
[((312, 332), 'numpy.empty', 'np.empty', (['[1]', 'float'], {}), '([1], float)\n', (320, 332), True, 'import numpy as np\n'), ((456, 479), 'numpy.empty', 'np.empty', (['[1, 1]', 'float'], {}), '([1, 1], float)\n', (464, 479), True, 'import numpy as np\n'), ((587, 607), 'numpy.empty', 'np.empty', (['[1]', 'float'], {}), '([1], float)\n', (595, 607), True, 'import numpy as np\n'), ((731, 754), 'numpy.empty', 'np.empty', (['[1, 1]', 'float'], {}), '([1, 1], float)\n', (739, 754), True, 'import numpy as np\n'), ((903, 923), 'numpy.empty', 'np.empty', (['[2]', 'float'], {}), '([2], float)\n', (911, 923), True, 'import numpy as np\n'), ((1088, 1111), 'numpy.empty', 'np.empty', (['[2, 2]', 'float'], {}), '([2, 2], float)\n', (1096, 1111), True, 'import numpy as np\n'), ((1321, 1341), 'numpy.empty', 'np.empty', (['[2]', 'float'], {}), '([2], float)\n', (1329, 1341), True, 'import numpy as np\n'), ((1515, 1538), 'numpy.empty', 'np.empty', (['[2, 2]', 'float'], {}), '([2, 2], float)\n', (1523, 1538), True, 'import numpy as np\n'), ((1403, 1412), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (1409, 1412), True, 'import numpy as np\n'), ((1390, 1399), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1396, 1399), True, 'import numpy as np\n')]
|
import pytest
from flask import Flask
from hades_logs import HadesLogs
from tests.hades_logs import get_hades_logs_config
@pytest.fixture(scope='session')
def hades_logs_config():
return get_hades_logs_config()
@pytest.fixture(scope='session')
def app(hades_logs_config):
app = Flask('test')
app.config.update(hades_logs_config)
return app
@pytest.fixture(scope='session')
def app_longer_timeout(hades_logs_config):
app = Flask('test')
app.config.update(hades_logs_config | {'HADES_TIMEOUT': 15})
return app
@pytest.fixture(scope='session')
def hades_logs(app):
return HadesLogs(app)
@pytest.fixture(scope='session')
def valid_kwargs():
return {'nasipaddress': '192.168.3.11', 'nasportid': 'C6'}
|
[
"flask.Flask",
"pytest.fixture",
"tests.hades_logs.get_hades_logs_config",
"hades_logs.HadesLogs"
] |
[((126, 157), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (140, 157), False, 'import pytest\n'), ((221, 252), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (235, 252), False, 'import pytest\n'), ((364, 395), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (378, 395), False, 'import pytest\n'), ((546, 577), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (560, 577), False, 'import pytest\n'), ((628, 659), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (642, 659), False, 'import pytest\n'), ((194, 217), 'tests.hades_logs.get_hades_logs_config', 'get_hades_logs_config', ([], {}), '()\n', (215, 217), False, 'from tests.hades_logs import get_hades_logs_config\n'), ((291, 304), 'flask.Flask', 'Flask', (['"""test"""'], {}), "('test')\n", (296, 304), False, 'from flask import Flask\n'), ((449, 462), 'flask.Flask', 'Flask', (['"""test"""'], {}), "('test')\n", (454, 462), False, 'from flask import Flask\n'), ((610, 624), 'hades_logs.HadesLogs', 'HadesLogs', (['app'], {}), '(app)\n', (619, 624), False, 'from hades_logs import HadesLogs\n')]
|
"""
Serializers for chain blocks and tree nodes.
.. warning::
You need to take extra care when defining custom serializations. Be
sure that your serialization includes all the fields in the original
structure. E.g., for chain blocks:
- ``self.index``
- ``self.fingers``
- Your payload
Unless this is done, the integrity of the data structures is screwed, since
it's the serialized versions of nodes and blocks that are hashed.
"""
from warnings import warn
from defaultcontext import with_default_context
import attr
import msgpack
from .struct import ChainBlock, TreeNode, TreeLeaf
PROTO_VERSION = 1
CHAIN_BLOCK_MARKER = 0
TREE_NODE_MARKER = 1
TREE_LEAF_MARKER = 2
OTHER_MARKER = 3
def msgpack_encoder(obj):
"""Represent structure as tuple and serialize using msgpack.
Default encoder.
"""
if isinstance(obj, ChainBlock):
marker = CHAIN_BLOCK_MARKER
obj_repr = (obj.index, obj.fingers, obj.payload)
elif isinstance(obj, TreeNode):
marker = TREE_NODE_MARKER
obj_repr = (obj.pivot_prefix, obj.left_hash, obj.right_hash)
elif isinstance(obj, TreeLeaf):
marker = TREE_LEAF_MARKER
obj_repr = (obj.lookup_key, obj.payload_hash)
else:
marker = OTHER_MARKER
obj_repr = (obj,)
return msgpack.packb((PROTO_VERSION, marker, obj_repr),
use_bin_type=True)
def msgpack_decoder(serialized_obj):
"""Deserialize structure from msgpack-encoded tuple.
Default decoder.
"""
try:
proto_version, marker, obj_repr = msgpack.unpackb(
serialized_obj,
raw=False)
except Exception as e:
raise ValueError('Object could not be decoded: %s' % e)
if proto_version != PROTO_VERSION:
warn('Serialization protocol version mismatch. '
'Expected: %s, got: %s' % (PROTO_VERSION, proto_version))
if marker == CHAIN_BLOCK_MARKER:
index, fingers, payload = obj_repr
return ChainBlock(payload=payload, index=index, fingers=fingers)
elif marker == TREE_NODE_MARKER:
pivot_prefix, left_hash, right_hash = obj_repr
return TreeNode(pivot_prefix=pivot_prefix, left_hash=left_hash,
right_hash=right_hash)
elif marker == TREE_LEAF_MARKER:
lookup_key, payload_hash = obj_repr
return TreeLeaf(lookup_key=lookup_key, payload_hash=payload_hash)
else:
return obj_repr[0]
@with_default_context(use_empty_init=True)
@attr.s
class EncodingParams(object):
"""Thread-local container for default encoder and decoder funcs.
:param encoder: Default encoder
:param decoder: Default decoder
This is how you can override the defaults using this class:
>>> my_params = EncodingParams()
>>> my_params.encoder = lambda obj: b'encoded!'
>>> my_params.decoder = lambda encoded: b'decoded!'
>>> EncodingParams.set_global_default(my_params)
>>> encode(b'dummy') == b'encoded!'
True
>>> decode(b'encoded!') == b'decoded!'
True
>>> EncodingParams.reset_defaults()
"""
encoder = attr.ib(default=attr.Factory(lambda: msgpack_encoder))
decoder = attr.ib(default=attr.Factory(lambda: msgpack_decoder))
def encode(obj, encoder=None):
"""Serialize object.
:param obj: Chain block, tree node, or bytes
:param encoder: Custom serializer
"""
if encoder is None:
encoder = EncodingParams.get_default().encoder
return encoder(obj)
def decode(serialized, decoder=None):
"""Deserialize object.
:param serialized: Encoded structure
:param encoder: Custom de-serializer
"""
if decoder is None:
decoder = EncodingParams.get_default().decoder
return decoder(serialized)
|
[
"defaultcontext.with_default_context",
"attr.Factory",
"msgpack.unpackb",
"warnings.warn",
"msgpack.packb"
] |
[((2491, 2532), 'defaultcontext.with_default_context', 'with_default_context', ([], {'use_empty_init': '(True)'}), '(use_empty_init=True)\n', (2511, 2532), False, 'from defaultcontext import with_default_context\n'), ((1322, 1389), 'msgpack.packb', 'msgpack.packb', (['(PROTO_VERSION, marker, obj_repr)'], {'use_bin_type': '(True)'}), '((PROTO_VERSION, marker, obj_repr), use_bin_type=True)\n', (1335, 1389), False, 'import msgpack\n'), ((1593, 1635), 'msgpack.unpackb', 'msgpack.unpackb', (['serialized_obj'], {'raw': '(False)'}), '(serialized_obj, raw=False)\n', (1608, 1635), False, 'import msgpack\n'), ((1808, 1916), 'warnings.warn', 'warn', (["('Serialization protocol version mismatch. Expected: %s, got: %s' % (\n PROTO_VERSION, proto_version))"], {}), "('Serialization protocol version mismatch. Expected: %s, got: %s' % (\n PROTO_VERSION, proto_version))\n", (1812, 1916), False, 'from warnings import warn\n'), ((3156, 3194), 'attr.Factory', 'attr.Factory', (['(lambda : msgpack_encoder)'], {}), '(lambda : msgpack_encoder)\n', (3168, 3194), False, 'import attr\n'), ((3225, 3263), 'attr.Factory', 'attr.Factory', (['(lambda : msgpack_decoder)'], {}), '(lambda : msgpack_decoder)\n', (3237, 3263), False, 'import attr\n')]
|
#!/bin/env python
from nand import Chip, Nand
ab = ["a", "b"]
x = ["x"]
out = ["out"]
sel = ["sel"]
Not = Chip("Not")
Not.inputs = x
Not.outputs = out
Not.add(Nand, a="x", b="x", out="out")
And = Chip("And")
And.inputs = ab
And.outputs = out
And.add(Nand, a="a", b="b", out="aNandB")
And.add(Not, x="aNandB", out="out")
Or = Chip("Or")
Or.inputs = ab
Or.outputs = out
Or.add(Not, x="a", out="notA")
Or.add(Not, x="b", out="notB")
Or.add(And, a="notA", b="notB", out="notAAndNotB")
Or.add(Not, x="notAAndNotB", out="out")
Xor = Chip("Xor")
Xor.inputs = ab
Xor.outputs = out
Xor.add(Not, x="a", out="notA")
Xor.add(Not, x="b", out="notB")
Xor.add(And, a="notA", b="b", out="notAAndB")
Xor.add(And, a="a", b="notB", out="aAndNotB")
Xor.add(Or, a="notAAndB", b="aAndNotB", out="out")
Mux = Chip("Mux")
Mux.inputs = ab + sel
Mux.outputs = out
Mux.add(Not, x="sel", out="notSel")
Mux.add(And, a="a", b="notSel", out="aAndNotSel")
Mux.add(And, a="b", b="sel", out="bAndSel")
Mux.add(Or, a="aAndNotSel", b="bAndSel", out="out")
DMux = Chip("DMux")
DMux.inputs = x + sel
DMux.outputs = ab
DMux.add(Not, x="sel", out="notSel")
DMux.add(And, a="notSel", b="x", out="a")
DMux.add(And, a="sel", b="x", out="b")
if __name__ == "__main__":
print(Not.truth())
print(And.truth())
print(Or.truth())
print(Xor.truth())
print(Mux.truth())
print(DMux.truth())
|
[
"nand.Chip"
] |
[((109, 120), 'nand.Chip', 'Chip', (['"""Not"""'], {}), "('Not')\n", (113, 120), False, 'from nand import Chip, Nand\n'), ((200, 211), 'nand.Chip', 'Chip', (['"""And"""'], {}), "('And')\n", (204, 211), False, 'from nand import Chip, Nand\n'), ((330, 340), 'nand.Chip', 'Chip', (['"""Or"""'], {}), "('Or')\n", (334, 340), False, 'from nand import Chip, Nand\n'), ((533, 544), 'nand.Chip', 'Chip', (['"""Xor"""'], {}), "('Xor')\n", (537, 544), False, 'from nand import Chip, Nand\n'), ((793, 804), 'nand.Chip', 'Chip', (['"""Mux"""'], {}), "('Mux')\n", (797, 804), False, 'from nand import Chip, Nand\n'), ((1035, 1047), 'nand.Chip', 'Chip', (['"""DMux"""'], {}), "('DMux')\n", (1039, 1047), False, 'from nand import Chip, Nand\n')]
|
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from <EMAIL>. #
##############################################################################
import unittest
import requests
import time
import json
import helper
import config
class GroupTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(GroupTest, self).__init__(*args, **kwargs)
self.base_domain = helper.getTestDomainName(self.__class__.__name__)
helper.setupDomain(self.base_domain)
# main
def testGetRootGroup(self):
print("testGetRootGroup", self.base_domain)
headers = helper.getRequestHeaders(domain=self.base_domain)
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
req = helper.getEndpoint() + '/groups/' + root_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("id" in rspJson)
group_id = rspJson["id"]
helper.validateId(group_id)
self.assertTrue("root" in rspJson)
root_id = rspJson["root"]
self.assertEqual(group_id, root_id)
self.assertTrue("domain" in rspJson)
#self.assertEqual(rspJson["domain"], self.base_domain) #TBD
self.assertTrue("created" in rspJson)
self.assertTrue("lastModified" in rspJson)
self.assertTrue("linkCount" in rspJson)
self.assertTrue("attributeCount" in rspJson)
# try get with a different user (who has read permission)
headers = helper.getRequestHeaders(domain=self.base_domain, username="test_user2")
rsp = requests.get(req, headers=headers)
if config.get("default_public"):
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["root"], root_uuid)
else:
self.assertEqual(rsp.status_code, 403)
# try to do a GET with a different domain (should fail)
another_domain = helper.getParentDomain(self.base_domain)
headers = helper.getRequestHeaders(domain=another_domain)
req = helper.getEndpoint() + '/groups/' + root_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 400)
def testGet(self):
domain = helper.getTestDomain("tall.h5")
headers = helper.getRequestHeaders(domain=domain)
# verify domain exists
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
if rsp.status_code != 200:
print("WARNING: Failed to get domain: {}. Is test data setup?".format(domain))
return # abort rest of test
rspJson = json.loads(rsp.text)
grp_uuid = root_uuid = rspJson["root"]
self.assertTrue(grp_uuid.startswith("g-"))
# get the group json
req = helper.getEndpoint() + '/groups/' + grp_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
for name in ("id", "hrefs", "attributeCount", "linkCount",
"domain", "root", "created", "lastModified"):
self.assertTrue(name in rspJson)
self.assertEqual(rspJson["id"], grp_uuid)
hrefs = rspJson["hrefs"]
self.assertEqual(len(hrefs), 5)
self.assertEqual(rspJson["id"], grp_uuid)
self.assertEqual(rspJson["attributeCount"], 2)
self.assertEqual(rspJson["linkCount"], 2)
self.assertEqual(rspJson["root"], root_uuid)
self.assertEqual(rspJson["domain"], domain)
# attribute should only be here if include_attrs is used
self.assertFalse("attributes" in rspJson)
# links should onnly be here if include_links is used
self.assertFalse("links" in rspJson)
now = time.time()
# the object shouldn't have been just created or updated
self.assertTrue(rspJson["created"] < now - 10)
self.assertTrue(rspJson["lastModified"] < now - 10)
# request the group path
req = helper.getEndpoint() + '/groups/' + grp_uuid
params = {"getalias": 1}
rsp = requests.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("alias" in rspJson)
self.assertEqual(rspJson["alias"], ['/'])
# do a get including the links
params = {"include_links": 1}
rsp = requests.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("links" in rspJson)
links = rspJson["links"]
self.assertTrue("g1" in links)
self.assertTrue("g2" in links)
# do a get including attributes
params = {"include_attrs": 1}
rsp = requests.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("attributes" in rspJson)
attrs = rspJson["attributes"]
self.assertTrue("attr1" in attrs)
self.assertTrue("attr2" in attrs)
# verify trying to read this group from a different domain fails
headers = helper.getRequestHeaders(domain=self.base_domain)
req = helper.getEndpoint() + '/groups/' + grp_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 400)
def testGetInvalidUUID(self):
print("testGetInvalidUUID", self.base_domain)
headers = helper.getRequestHeaders(domain=self.base_domain)
req = helper.getEndpoint() + '/'
invalid_uuid = "foobar"
req = helper.getEndpoint() + "/groups/" + invalid_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 400)
import uuid
bad_uuid = "g-" + str(uuid.uuid1())
req = helper.getEndpoint() + "/groups/" + bad_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 404)
def testPost(self):
# test POST group
print("testPost", self.base_domain)
headers = helper.getRequestHeaders(domain=self.base_domain)
req = helper.getEndpoint() + '/groups'
# create a new group
rsp = requests.post(req, headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["linkCount"], 0)
self.assertEqual(rspJson["attributeCount"], 0)
group_id = rspJson["id"]
self.assertTrue(helper.validateId(group_id))
# verify we can do a get on the new group
req = helper.getEndpoint() + '/groups/' + group_id
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("id" in rspJson)
self.assertEqual(rspJson["id"], group_id)
self.assertTrue("root" in rspJson)
self.assertTrue(rspJson["root"] != group_id)
self.assertTrue("domain" in rspJson)
#self.assertEqual(rspJson["domain"], domain) # TBD
# try getting the path of the group
params = {"getalias": 1}
rsp = requests.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("alias" in rspJson)
self.assertEqual(rspJson["alias"], [])
# try POST with user who doesn't have create permission on this domain
headers = helper.getRequestHeaders(domain=self.base_domain, username="test_user2")
req = helper.getEndpoint() + '/groups'
rsp = requests.post(req, headers=headers)
self.assertEqual(rsp.status_code, 403) # forbidden
def testPostWithLink(self):
# test PUT_root
print("testPostWithLink", self.base_domain)
headers = helper.getRequestHeaders(domain=self.base_domain)
# get root id
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
# delete the domain
rsp = requests.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
# try getting the domain
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 410)
# try re-creating a domain
rsp = requests.put(req, headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
new_root_id = rspJson["root"]
self.assertTrue(new_root_id != root_uuid)
root_uuid = new_root_id
# get root group and verify link count is 0
req = helper.getEndpoint() + '/groups/' + root_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["linkCount"], 0)
# create new group
payload = { 'link': { 'id': root_uuid, 'name': 'linked_group' } }
req = helper.getEndpoint() + "/groups"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["linkCount"], 0)
self.assertEqual(rspJson["attributeCount"], 0)
new_group_id = rspJson["id"]
self.assertTrue(helper.validateId(rspJson["id"]) )
self.assertTrue(new_group_id != root_uuid)
# get root group and verify link count is 1
req = helper.getEndpoint() + '/groups/' + root_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["linkCount"], 1)
# read the link back and verify
req = helper.getEndpoint() + "/groups/" + root_uuid + "/links/linked_group"
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200) # link doesn't exist yet
rspJson = json.loads(rsp.text)
self.assertTrue("link" in rspJson)
link_json = rspJson["link"]
self.assertEqual(link_json["collection"], "groups")
self.assertEqual(link_json["class"], "H5L_TYPE_HARD")
self.assertEqual(link_json["title"], "linked_group")
self.assertEqual(link_json["id"], new_group_id)
# try getting the path of the group
req = helper.getEndpoint() + "/groups/" + new_group_id
params = {"getalias": 1}
rsp = requests.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("alias" in rspJson)
self.assertEqual(rspJson["alias"], ['/linked_group',])
def testDelete(self):
# test Delete
print("testDelete", self.base_domain)
headers = helper.getRequestHeaders(domain=self.base_domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_id = rspJson["root"]
req = helper.getEndpoint() + '/groups'
# create a new group
rsp = requests.post(req, headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
self.assertTrue("id" in rspJson)
group_id = rspJson["id"]
self.assertTrue(helper.validateId(group_id))
# verify we can do a get on the new group
req = helper.getEndpoint() + '/groups/' + group_id
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("id" in rspJson)
self.assertEqual(rspJson["id"], group_id)
self.assertTrue("root" in rspJson)
self.assertTrue(rspJson["root"] != group_id)
self.assertTrue("domain" in rspJson)
#self.assertEqual(rspJson["domain"], self.base_domain) #TBD
# try DELETE with user who doesn't have create permission on this domain
headers = helper.getRequestHeaders(domain=self.base_domain, username="test_user2")
rsp = requests.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 403) # forbidden
# try to do a DELETE with a different domain (should fail)
another_domain = helper.getParentDomain(self.base_domain)
headers = helper.getRequestHeaders(domain=another_domain)
req = helper.getEndpoint() + '/groups/' + group_id
rsp = requests.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 400)
# delete the new group
headers = helper.getRequestHeaders(domain=self.base_domain)
rsp = requests.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue(rspJson is not None)
# a get for the group should now return 410 (GONE)
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 410)
# try deleting the root group
req = helper.getEndpoint() + '/groups/' + root_id
rsp = requests.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 403) # Forbidden
def testGetByPath(self):
domain = helper.getTestDomain("tall.h5")
print("testGetByPath", domain)
headers = helper.getRequestHeaders(domain=domain)
# verify domain exists
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
if rsp.status_code != 200:
print("WARNING: Failed to get domain: {}. Is test data setup?".format(domain))
return # abort rest of test
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
# get the group at "/g1/g1.1"
h5path = "/g1/g1.1"
req = helper.getEndpoint() + "/groups/"
params = {"h5path": h5path}
rsp = requests.get(req, headers=headers, params=params)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
for name in ("id", "hrefs", "attributeCount", "linkCount",
"domain", "root", "created", "lastModified"):
self.assertTrue(name in rspJson)
# verify we get the same id when following the path via service calls
g11id = helper.getUUIDByPath(domain, "/g1/g1.1")
self.assertEqual(g11id, rspJson["id"])
# Try with a trailing slash
h5path = "/g1/g1.1/"
req = helper.getEndpoint() + "/groups/"
params = {"h5path": h5path}
rsp = requests.get(req, headers=headers, params=params)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertEqual(g11id, rspJson["id"])
# try relative h5path
g1id = helper.getUUIDByPath(domain, "/g1/")
h5path = "./g1.1"
req = helper.getEndpoint() + "/groups/" + g1id
params = {"h5path": h5path}
rsp = requests.get(req, headers=headers, params=params)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertEqual(g11id, rspJson["id"])
# try a invalid link and verify a 404 is returened
h5path = "/g1/foobar"
req = helper.getEndpoint() + "/groups/"
params = {"h5path": h5path}
rsp = requests.get(req, headers=headers, params=params)
self.assertEqual(rsp.status_code, 404)
# try passing a path to a dataset and verify we get 404
h5path = "/g1/g1.1/dset1.1.1"
req = helper.getEndpoint() + "/groups/"
params = {"h5path": h5path}
rsp = requests.get(req, headers=headers, params=params)
self.assertEqual(rsp.status_code, 404)
# try getting the path of the group
req = helper.getEndpoint() + "/groups/" + g11id
params = {"getalias": 1}
rsp = requests.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("alias" in rspJson)
self.assertEqual(rspJson["alias"], ['/g1/g1.1',])
if __name__ == '__main__':
#setup test files
unittest.main()
|
[
"unittest.main",
"helper.getEndpoint",
"json.loads",
"config.get",
"json.dumps",
"time.time",
"helper.getUUIDByPath",
"uuid.uuid1",
"requests.delete",
"requests.get",
"helper.getTestDomain",
"helper.getRequestHeaders",
"helper.getParentDomain",
"requests.post",
"requests.put",
"helper.setupDomain",
"helper.getTestDomainName",
"helper.validateId"
] |
[((17655, 17670), 'unittest.main', 'unittest.main', ([], {}), '()\n', (17668, 17670), False, 'import unittest\n'), ((1106, 1155), 'helper.getTestDomainName', 'helper.getTestDomainName', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (1130, 1155), False, 'import helper\n'), ((1164, 1200), 'helper.setupDomain', 'helper.setupDomain', (['self.base_domain'], {}), '(self.base_domain)\n', (1182, 1200), False, 'import helper\n'), ((1333, 1382), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'self.base_domain'}), '(domain=self.base_domain)\n', (1357, 1382), False, 'import helper\n'), ((1439, 1473), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (1451, 1473), False, 'import requests\n'), ((1539, 1559), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (1549, 1559), False, 'import json\n'), ((1604, 1632), 'helper.validateId', 'helper.validateId', (['root_uuid'], {}), '(root_uuid)\n', (1621, 1632), False, 'import helper\n'), ((1707, 1741), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (1719, 1741), False, 'import requests\n'), ((1807, 1827), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (1817, 1827), False, 'import json\n'), ((1910, 1937), 'helper.validateId', 'helper.validateId', (['group_id'], {}), '(group_id)\n', (1927, 1937), False, 'import helper\n'), ((2455, 2527), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'self.base_domain', 'username': '"""test_user2"""'}), "(domain=self.base_domain, username='test_user2')\n", (2479, 2527), False, 'import helper\n'), ((2542, 2576), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (2554, 2576), False, 'import requests\n'), ((2588, 2616), 'config.get', 'config.get', (['"""default_public"""'], {}), "('default_public')\n", (2598, 2616), False, 'import config\n'), ((2924, 2964), 'helper.getParentDomain', 'helper.getParentDomain', (['self.base_domain'], {}), '(self.base_domain)\n', (2946, 2964), False, 'import helper\n'), ((2983, 3030), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'another_domain'}), '(domain=another_domain)\n', (3007, 3030), False, 'import helper\n'), ((3105, 3139), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (3117, 3139), False, 'import requests\n'), ((3228, 3259), 'helper.getTestDomain', 'helper.getTestDomain', (['"""tall.h5"""'], {}), "('tall.h5')\n", (3248, 3259), False, 'import helper\n'), ((3287, 3326), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'domain'}), '(domain=domain)\n', (3311, 3326), False, 'import helper\n'), ((3422, 3456), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (3434, 3456), False, 'import requests\n'), ((3652, 3672), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (3662, 3672), False, 'import json\n'), ((3884, 3918), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (3896, 3918), False, 'import requests\n'), ((3984, 4004), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (3994, 4004), False, 'import json\n'), ((4808, 4819), 'time.time', 'time.time', ([], {}), '()\n', (4817, 4819), False, 'import time\n'), ((5140, 5189), 'requests.get', 'requests.get', (['req'], {'params': 'params', 'headers': 'headers'}), '(req, params=params, headers=headers)\n', (5152, 5189), False, 'import requests\n'), ((5255, 5275), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (5265, 5275), False, 'import json\n'), ((5462, 5511), 'requests.get', 'requests.get', (['req'], {'params': 'params', 'headers': 'headers'}), '(req, params=params, headers=headers)\n', (5474, 5511), False, 'import requests\n'), ((5577, 5597), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (5587, 5597), False, 'import json\n'), ((5846, 5895), 'requests.get', 'requests.get', (['req'], {'params': 'params', 'headers': 'headers'}), '(req, params=params, headers=headers)\n', (5858, 5895), False, 'import requests\n'), ((5961, 5981), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (5971, 5981), False, 'import json\n'), ((6245, 6294), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'self.base_domain'}), '(domain=self.base_domain)\n', (6269, 6294), False, 'import helper\n'), ((6368, 6402), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (6380, 6402), False, 'import requests\n'), ((6558, 6607), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'self.base_domain'}), '(domain=self.base_domain)\n', (6582, 6607), False, 'import helper\n'), ((6762, 6796), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (6774, 6796), False, 'import requests\n'), ((6986, 7020), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (6998, 7020), False, 'import requests\n'), ((7181, 7230), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'self.base_domain'}), '(domain=self.base_domain)\n', (7205, 7230), False, 'import helper\n'), ((7324, 7359), 'requests.post', 'requests.post', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (7337, 7359), False, 'import requests\n'), ((7426, 7446), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (7436, 7446), False, 'import json\n'), ((7768, 7802), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (7780, 7802), False, 'import requests\n'), ((7868, 7888), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (7878, 7888), False, 'import json\n'), ((8273, 8322), 'requests.get', 'requests.get', (['req'], {'params': 'params', 'headers': 'headers'}), '(req, params=params, headers=headers)\n', (8285, 8322), False, 'import requests\n'), ((8388, 8408), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (8398, 8408), False, 'import json\n'), ((8599, 8671), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'self.base_domain', 'username': '"""test_user2"""'}), "(domain=self.base_domain, username='test_user2')\n", (8623, 8671), False, 'import helper\n'), ((8733, 8768), 'requests.post', 'requests.post', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (8746, 8768), False, 'import requests\n'), ((8959, 9008), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'self.base_domain'}), '(domain=self.base_domain)\n', (8983, 9008), False, 'import helper\n'), ((9087, 9121), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (9099, 9121), False, 'import requests\n'), ((9187, 9207), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (9197, 9207), False, 'import json\n'), ((9252, 9280), 'helper.validateId', 'helper.validateId', (['root_uuid'], {}), '(root_uuid)\n', (9269, 9280), False, 'import helper\n'), ((9332, 9369), 'requests.delete', 'requests.delete', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (9347, 9369), False, 'import requests\n'), ((9465, 9499), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (9477, 9499), False, 'import requests\n'), ((9597, 9631), 'requests.put', 'requests.put', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (9609, 9631), False, 'import requests\n'), ((9697, 9717), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (9707, 9717), False, 'import json\n'), ((9974, 10008), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (9986, 10008), False, 'import requests\n'), ((10074, 10094), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (10084, 10094), False, 'import json\n'), ((10446, 10466), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (10456, 10466), False, 'import json\n'), ((10846, 10880), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (10858, 10880), False, 'import requests\n'), ((10946, 10966), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (10956, 10966), False, 'import json\n'), ((11156, 11190), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (11168, 11190), False, 'import requests\n'), ((11282, 11302), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (11292, 11302), False, 'import json\n'), ((11778, 11827), 'requests.get', 'requests.get', (['req'], {'params': 'params', 'headers': 'headers'}), '(req, params=params, headers=headers)\n', (11790, 11827), False, 'import requests\n'), ((11893, 11913), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (11903, 11913), False, 'import json\n'), ((12134, 12183), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'self.base_domain'}), '(domain=self.base_domain)\n', (12158, 12183), False, 'import helper\n'), ((12261, 12295), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (12273, 12295), False, 'import requests\n'), ((12314, 12334), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (12324, 12334), False, 'import json\n'), ((12514, 12549), 'requests.post', 'requests.post', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (12527, 12549), False, 'import requests\n'), ((12616, 12636), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (12626, 12636), False, 'import json\n'), ((12888, 12922), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (12900, 12922), False, 'import requests\n'), ((12988, 13008), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (12998, 13008), False, 'import json\n'), ((13410, 13482), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'self.base_domain', 'username': '"""test_user2"""'}), "(domain=self.base_domain, username='test_user2')\n", (13434, 13482), False, 'import helper\n'), ((13497, 13534), 'requests.delete', 'requests.delete', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (13512, 13534), False, 'import requests\n'), ((13687, 13727), 'helper.getParentDomain', 'helper.getParentDomain', (['self.base_domain'], {}), '(self.base_domain)\n', (13709, 13727), False, 'import helper\n'), ((13746, 13793), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'another_domain'}), '(domain=another_domain)\n', (13770, 13793), False, 'import helper\n'), ((13867, 13904), 'requests.delete', 'requests.delete', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (13882, 13904), False, 'import requests\n'), ((14013, 14062), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'self.base_domain'}), '(domain=self.base_domain)\n', (14037, 14062), False, 'import helper\n'), ((14077, 14114), 'requests.delete', 'requests.delete', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (14092, 14114), False, 'import requests\n'), ((14180, 14200), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (14190, 14200), False, 'import json\n'), ((14320, 14354), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (14332, 14354), False, 'import requests\n'), ((14513, 14550), 'requests.delete', 'requests.delete', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (14528, 14550), False, 'import requests\n'), ((14662, 14693), 'helper.getTestDomain', 'helper.getTestDomain', (['"""tall.h5"""'], {}), "('tall.h5')\n", (14682, 14693), False, 'import helper\n'), ((14751, 14790), 'helper.getRequestHeaders', 'helper.getRequestHeaders', ([], {'domain': 'domain'}), '(domain=domain)\n', (14775, 14790), False, 'import helper\n'), ((14886, 14920), 'requests.get', 'requests.get', (['req'], {'headers': 'headers'}), '(req, headers=headers)\n', (14898, 14920), False, 'import requests\n'), ((15107, 15127), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (15117, 15127), False, 'import json\n'), ((15329, 15378), 'requests.get', 'requests.get', (['req'], {'headers': 'headers', 'params': 'params'}), '(req, headers=headers, params=params)\n', (15341, 15378), False, 'import requests\n'), ((15454, 15474), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (15464, 15474), False, 'import json\n'), ((15741, 15781), 'helper.getUUIDByPath', 'helper.getUUIDByPath', (['domain', '"""/g1/g1.1"""'], {}), "(domain, '/g1/g1.1')\n", (15761, 15781), False, 'import helper\n'), ((15993, 16042), 'requests.get', 'requests.get', (['req'], {'headers': 'headers', 'params': 'params'}), '(req, headers=headers, params=params)\n', (16005, 16042), False, 'import requests\n'), ((16118, 16138), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (16128, 16138), False, 'import json\n'), ((16232, 16268), 'helper.getUUIDByPath', 'helper.getUUIDByPath', (['domain', '"""/g1/"""'], {}), "(domain, '/g1/')\n", (16252, 16268), False, 'import helper\n'), ((16400, 16449), 'requests.get', 'requests.get', (['req'], {'headers': 'headers', 'params': 'params'}), '(req, headers=headers, params=params)\n', (16412, 16449), False, 'import requests\n'), ((16515, 16535), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (16525, 16535), False, 'import json\n'), ((16771, 16820), 'requests.get', 'requests.get', (['req'], {'headers': 'headers', 'params': 'params'}), '(req, headers=headers, params=params)\n', (16783, 16820), False, 'import requests\n'), ((17069, 17118), 'requests.get', 'requests.get', (['req'], {'headers': 'headers', 'params': 'params'}), '(req, headers=headers, params=params)\n', (17081, 17118), False, 'import requests\n'), ((17316, 17365), 'requests.get', 'requests.get', (['req'], {'params': 'params', 'headers': 'headers'}), '(req, params=params, headers=headers)\n', (17328, 17365), False, 'import requests\n'), ((17431, 17451), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (17441, 17451), False, 'import json\n'), ((1397, 1417), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (1415, 1417), False, 'import helper\n'), ((2691, 2711), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (2701, 2711), False, 'import json\n'), ((3381, 3401), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (3399, 3401), False, 'import helper\n'), ((6622, 6642), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (6640, 6642), False, 'import helper\n'), ((7245, 7265), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (7263, 7265), False, 'import helper\n'), ((7615, 7642), 'helper.validateId', 'helper.validateId', (['group_id'], {}), '(group_id)\n', (7632, 7642), False, 'import helper\n'), ((8686, 8706), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (8704, 8706), False, 'import helper\n'), ((9046, 9066), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (9064, 9066), False, 'import helper\n'), ((10271, 10291), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (10289, 10291), False, 'import helper\n'), ((10633, 10665), 'helper.validateId', 'helper.validateId', (["rspJson['id']"], {}), "(rspJson['id'])\n", (10650, 10665), False, 'import helper\n'), ((12220, 12240), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (12238, 12240), False, 'import helper\n'), ((12427, 12447), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (12445, 12447), False, 'import helper\n'), ((12735, 12762), 'helper.validateId', 'helper.validateId', (['group_id'], {}), '(group_id)\n', (12752, 12762), False, 'import helper\n'), ((14845, 14865), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (14863, 14865), False, 'import helper\n'), ((15245, 15265), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (15263, 15265), False, 'import helper\n'), ((15909, 15929), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (15927, 15929), False, 'import helper\n'), ((16687, 16707), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (16705, 16707), False, 'import helper\n'), ((16985, 17005), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (17003, 17005), False, 'import helper\n'), ((1647, 1667), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (1665, 1667), False, 'import helper\n'), ((3045, 3065), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (3063, 3065), False, 'import helper\n'), ((3825, 3845), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (3843, 3845), False, 'import helper\n'), ((5048, 5068), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (5066, 5068), False, 'import helper\n'), ((6309, 6329), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (6327, 6329), False, 'import helper\n'), ((6699, 6719), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (6717, 6719), False, 'import helper\n'), ((6895, 6907), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (6905, 6907), False, 'import uuid\n'), ((6927, 6947), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (6945, 6947), False, 'import helper\n'), ((7709, 7729), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (7727, 7729), False, 'import helper\n'), ((9914, 9934), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (9932, 9934), False, 'import helper\n'), ((10342, 10361), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (10352, 10361), False, 'import json\n'), ((10786, 10806), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (10804, 10806), False, 'import helper\n'), ((11681, 11701), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (11699, 11701), False, 'import helper\n'), ((12829, 12849), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (12847, 12849), False, 'import helper\n'), ((13808, 13828), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (13826, 13828), False, 'import helper\n'), ((14455, 14475), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (14473, 14475), False, 'import helper\n'), ((16309, 16329), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (16327, 16329), False, 'import helper\n'), ((17226, 17246), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (17244, 17246), False, 'import helper\n'), ((11072, 11092), 'helper.getEndpoint', 'helper.getEndpoint', ([], {}), '()\n', (11090, 11092), False, 'import helper\n')]
|
from pyinspect import install_traceback
from rich import pretty
install_traceback()
pretty.install()
from loguru import logger
import sys
# comment these two lines out to show logging info
logger.remove()
logger.add(sys.stderr, level="INFO")
logger.level("EXPRESSION", no=15, color="<yellow>", icon="🖇")
logger.level("MATH", no=15, color="<green>", icon="🖇")
from mathcli.math import calc, solve, simplify, derivative
|
[
"pyinspect.install_traceback",
"loguru.logger.level",
"loguru.logger.add",
"loguru.logger.remove",
"rich.pretty.install"
] |
[((65, 84), 'pyinspect.install_traceback', 'install_traceback', ([], {}), '()\n', (82, 84), False, 'from pyinspect import install_traceback\n'), ((85, 101), 'rich.pretty.install', 'pretty.install', ([], {}), '()\n', (99, 101), False, 'from rich import pretty\n'), ((192, 207), 'loguru.logger.remove', 'logger.remove', ([], {}), '()\n', (205, 207), False, 'from loguru import logger\n'), ((208, 244), 'loguru.logger.add', 'logger.add', (['sys.stderr'], {'level': '"""INFO"""'}), "(sys.stderr, level='INFO')\n", (218, 244), False, 'from loguru import logger\n'), ((246, 307), 'loguru.logger.level', 'logger.level', (['"""EXPRESSION"""'], {'no': '(15)', 'color': '"""<yellow>"""', 'icon': '"""🖇"""'}), "('EXPRESSION', no=15, color='<yellow>', icon='🖇')\n", (258, 307), False, 'from loguru import logger\n'), ((308, 362), 'loguru.logger.level', 'logger.level', (['"""MATH"""'], {'no': '(15)', 'color': '"""<green>"""', 'icon': '"""🖇"""'}), "('MATH', no=15, color='<green>', icon='🖇')\n", (320, 362), False, 'from loguru import logger\n')]
|
from __init__ import *
import sys
from threading import Thread
from StartScreen.start_screen import StartScreen
from GameScreen.game_screen import GameScreen
from Options.options import Options
from WaitingRoom.waiting_room import WaitingRoom
from Results.results import Results
from style_sheets import Theme
class PKNGame(QMainWindow):
"""
The main window of the application.
Inherits from QMainWindow and extends its functionality
as an overseer. This object coordinates the views, pop-ups and themes.
After all preperations and initializations launches two parallel threads
to simunainously show the main menu (start screen) and, if there is no registered name,
launch the pop-up to make sure that user has choosen the name.
"""
def __init__(self):
super(PKNGame, self).__init__()
self.screen_size = QDesktopWidget().screenGeometry()
self.config = Options.Module.get_config()
self.theme = self.config["theme"]
if self.theme:
Theme.LightTheme.widget(self)
else:
Theme.DarkTheme.widget(self)
# Placeholder for all views
self.screens = {
"start": StartScreen.Controller(self),
"wait": WaitingRoom.Controller(self),
#"results": Results.Controller(self),
"game": GameScreen.Controller(self),
"options": Options.Controller(self)
}
self.current_screen = None
self.__hide_all()
self.__start_screen()
Thread(target = self.show())
Thread(target = self.login_popup())
@staticmethod
def login_popup_static(config: dict, parent_window: QMainWindow):
"""
Takes care of pop-up in the Option view after reseting the options to default
which also resets the username.
parameters:
config - configuration dictionary objecct taken from the json during initialization of the main window.
parent_window - The main window of the entire application.
returns:
None
"""
if config["username"] == "":
while config["username"] == "":
text = QInputDialog.getText(parent_window, "Login", "Choose your username:")[0]
config["username"] = text
def login_popup(self):
"""
Takes care of pop-up at the start of the application.
This dialog is not letting unnamed user to play.
"""
if self.config["username"] == "":
text = QInputDialog.getText(self, "Login", "Choose your username:")[0]
self.config["username"] = text
while self.config["username"] == "":
text = QInputDialog.getText(self, "Login", "Choose your username:\nYou need to enter something!")[0]
self.config["username"] = text
Options.Module.overwrite_config(self.config)
def __hide_all(self):
for screen in self.screens.values():
screen.view.hide()
def __start_screen(self):
"""
Launches the first screen
"""
self.current_screen = self.screens["start"].view
self.setCentralWidget(self.current_screen)
self.current_screen.show()
def change_to(self, from_screen: str, to_screen: str) -> None:
"""
Manages the change of views
:param from_screen: str
:param to_screen: str
:return: None
"""
self.login_popup_static(self.config, self)
self.current_screen.hide()
# Reinitialization of screen to handle the Buffer Stack Overflow qt error ( -1073740791 (0xC0000409) )
self.screens[from_screen].__init__(self)
self.current_screen = self.screens[to_screen].view
self.setCentralWidget(self.current_screen)
self.current_screen.show()
def exit(self):
self.close()
if __name__ == "__main__":
snake = QApplication(sys.argv)
PKNGame()
sys.exit(snake.exec_())
|
[
"Options.options.Options.Module.overwrite_config",
"GameScreen.game_screen.GameScreen.Controller",
"Options.options.Options.Controller",
"style_sheets.Theme.DarkTheme.widget",
"WaitingRoom.waiting_room.WaitingRoom.Controller",
"style_sheets.Theme.LightTheme.widget",
"StartScreen.start_screen.StartScreen.Controller",
"Options.options.Options.Module.get_config"
] |
[((917, 944), 'Options.options.Options.Module.get_config', 'Options.Module.get_config', ([], {}), '()\n', (942, 944), False, 'from Options.options import Options\n'), ((1023, 1052), 'style_sheets.Theme.LightTheme.widget', 'Theme.LightTheme.widget', (['self'], {}), '(self)\n', (1046, 1052), False, 'from style_sheets import Theme\n'), ((1079, 1107), 'style_sheets.Theme.DarkTheme.widget', 'Theme.DarkTheme.widget', (['self'], {}), '(self)\n', (1101, 1107), False, 'from style_sheets import Theme\n'), ((1192, 1220), 'StartScreen.start_screen.StartScreen.Controller', 'StartScreen.Controller', (['self'], {}), '(self)\n', (1214, 1220), False, 'from StartScreen.start_screen import StartScreen\n'), ((1242, 1270), 'WaitingRoom.waiting_room.WaitingRoom.Controller', 'WaitingRoom.Controller', (['self'], {}), '(self)\n', (1264, 1270), False, 'from WaitingRoom.waiting_room import WaitingRoom\n'), ((1342, 1369), 'GameScreen.game_screen.GameScreen.Controller', 'GameScreen.Controller', (['self'], {}), '(self)\n', (1363, 1369), False, 'from GameScreen.game_screen import GameScreen\n'), ((1394, 1418), 'Options.options.Options.Controller', 'Options.Controller', (['self'], {}), '(self)\n', (1412, 1418), False, 'from Options.options import Options\n'), ((2864, 2908), 'Options.options.Options.Module.overwrite_config', 'Options.Module.overwrite_config', (['self.config'], {}), '(self.config)\n', (2895, 2908), False, 'from Options.options import Options\n')]
|
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class AddMail(models.Model):
mail_address = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.mail_address
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
|
[
"django.db.models.CharField",
"django.db.models.DateTimeField",
"datetime.timedelta",
"django.utils.timezone.now"
] |
[((163, 195), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (179, 195), False, 'from django.db import models\n'), ((212, 250), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""date published"""'], {}), "('date published')\n", (232, 250), False, 'from django.db import models\n'), ((381, 395), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (393, 395), False, 'from django.utils import timezone\n'), ((398, 424), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (416, 424), False, 'import datetime\n')]
|
from sklearn import datasets
import numpy as np
def get_info():
return {
'name': 'sklearn_iris',
'description': 'ScikitLearn | Iris',
'class_names': ['Iris Setosa', 'Iris Versicolor', 'Iris Virginica']
}
def get_data(datasets_path):
data = datasets.load_iris()
return {
'X_train': np.array(data.data),
'y_train': np.array(data.target),
'X_test': np.array([]),
'y_test': np.array([]),
'class_names': ['Iris Setosa', 'Iris Versicolor', 'Iris Virginica']
}
|
[
"sklearn.datasets.load_iris",
"numpy.array"
] |
[((280, 300), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (298, 300), False, 'from sklearn import datasets\n'), ((334, 353), 'numpy.array', 'np.array', (['data.data'], {}), '(data.data)\n', (342, 353), True, 'import numpy as np\n'), ((374, 395), 'numpy.array', 'np.array', (['data.target'], {}), '(data.target)\n', (382, 395), True, 'import numpy as np\n'), ((415, 427), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (423, 427), True, 'import numpy as np\n'), ((447, 459), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (455, 459), True, 'import numpy as np\n')]
|
import sys
from pathlib import Path # if you haven't already done so
root = str(Path(__file__).resolve().parents[1])
sys.path.append(root)
import argparse
import tempfile
import os
import shutil
import yaml
import csv
from collections import OrderedDict
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from lib.fasta_generator import *
from lib.input_file_converter import *
from lib.calculate_manufacturability import *
def define_parser():
parser = argparse.ArgumentParser(
"pvacseq generate_protein_fasta",
description="Generate an annotated fasta file from a VCF with protein sequences of mutations and matching wildtypes",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"input_vcf",
help="A VEP-annotated single- or multi-sample VCF containing genotype, transcript, "
+"Wildtype protein sequence, and Downstream protein sequence information."
+"The VCF may be gzipped (requires tabix index)."
)
parser.add_argument(
"flanking_sequence_length", type=int,
help="Number of amino acids to add on each side of the mutation when creating the FASTA.",
)
parser.add_argument(
"output_file",
help="The output fasta file."
)
parser.add_argument(
"--input-tsv",
help = "A pVACseq all_epitopes or filtered TSV file with epitopes to use for subsetting the input VCF to peptides of interest. Only the peptide sequences for the epitopes in the TSV will be used when creating the FASTA."
)
parser.add_argument(
"-p", "--phased-proximal-variants-vcf",
help="A VCF with phased proximal variant information to incorporate into the predicted fasta sequences. Must be gzipped and tabix indexed."
)
parser.add_argument(
"--mutant-only",
help="Only output mutant peptide sequences",
default=False,
action='store_true',
)
parser.add_argument(
"-d", "--downstream-sequence-length",
default="1000",
help="Cap to limit the downstream sequence length for frameshifts when creating the fasta file. "
+ "Use 'full' to include the full downstream sequence."
)
parser.add_argument(
"-s", "--sample-name",
help="The name of the sample being processed. Required when processing a multi-sample VCF and must be a sample ID in the input VCF #CHROM header line."
)
return parser
def convert_vcf(input_vcf, temp_dir, sample_name, phased_proximal_variants_vcf, flanking_sequence_length):
print("Converting VCF to TSV")
tsv_file = os.path.join(temp_dir, 'tmp.tsv')
convert_params = {
'input_file' : input_vcf,
'output_file': tsv_file,
}
if sample_name is not None:
convert_params['sample_name'] = sample_name
if phased_proximal_variants_vcf is not None:
convert_params['proximal_variants_vcf'] = phased_proximal_variants_vcf
proximal_variants_tsv = os.path.join(temp_dir, 'proximal_variants.tsv')
convert_params['proximal_variants_tsv'] = proximal_variants_tsv
convert_params['flanking_bases'] = flanking_sequence_length * 4
else:
proximal_variants_tsv = None
converter = VcfConverter(**convert_params)
converter.execute()
print("Completed")
return proximal_variants_tsv
def generate_fasta(flanking_sequence_length, downstream_sequence_length, temp_dir, proximal_variants_tsv):
print("Generating Variant Peptide FASTA and Key File")
tsv_file = os.path.join(temp_dir, 'tmp.tsv')
fasta_file = os.path.join(temp_dir, 'tmp.fasta')
fasta_key_file = os.path.join(temp_dir, 'tmp.fasta.key')
generate_fasta_params = {
'input_file' : tsv_file,
'flanking_sequence_length' : flanking_sequence_length,
'epitope_length' : 0,
'output_file' : fasta_file,
'output_key_file' : fasta_key_file,
'downstream_sequence_length': downstream_sequence_length,
'proximal_variants_file' : proximal_variants_tsv,
}
fasta_generator = FastaGenerator(**generate_fasta_params)
fasta_generator.execute()
print("Completed")
def parse_input_tsv(input_tsv):
if input_tsv is None:
return None
indexes = []
with open(input_tsv, 'r') as fh:
reader = csv.DictReader(fh, delimiter = "\t")
for line in reader:
indexes.append(line['Index'])
return indexes
def parse_files(output_file, temp_dir, mutant_only, input_tsv):
print("Parsing the Variant Peptide FASTA and Key File")
fasta_file_path = os.path.join(temp_dir, 'tmp.fasta')
fasta_key_file_path = os.path.join(temp_dir, 'tmp.fasta.key')
with open(fasta_key_file_path, 'r') as fasta_key_file:
keys = yaml.load(fasta_key_file, Loader=yaml.FullLoader)
tsv_indexes = parse_input_tsv(input_tsv)
dataframe = OrderedDict()
output_records = []
for record in SeqIO.parse(fasta_file_path, "fasta"):
ids = keys[int(record.id)]
for record_id in ids:
if mutant_only and record_id.startswith('WT.'):
continue
if tsv_indexes is not None:
sequence_type, index = record_id.split('.', 1)
if index not in tsv_indexes:
continue
new_record = SeqRecord(record.seq, id=record_id, description=record_id)
output_records.append(new_record)
SeqIO.write(output_records, output_file, "fasta")
print("Completed")
def main(args_input = sys.argv[1:]):
parser = define_parser()
args = parser.parse_args(args_input)
if args.downstream_sequence_length == 'full':
downstream_sequence_length = None
elif args.downstream_sequence_length.isdigit():
downstream_sequence_length = int(args.downstream_sequence_length)
else:
sys.exit("The downstream sequence length needs to be a positive integer or 'full'")
temp_dir = tempfile.mkdtemp()
proximal_variants_tsv = convert_vcf(args.input_vcf, temp_dir, args.sample_name, args.phased_proximal_variants_vcf, args.flanking_sequence_length)
generate_fasta(args.flanking_sequence_length, downstream_sequence_length, temp_dir, proximal_variants_tsv)
parse_files(args.output_file, temp_dir, args.mutant_only, args.input_tsv)
shutil.rmtree(temp_dir)
manufacturability_file = "{}.manufacturability.tsv".format(args.output_file)
print("Calculating Manufacturability Metrics")
CalculateManufacturability(args.output_file, manufacturability_file, 'fasta').execute()
print("Completed")
if __name__ == '__main__':
main()
|
[
"sys.path.append",
"yaml.load",
"Bio.SeqIO.parse",
"argparse.ArgumentParser",
"Bio.SeqIO.write",
"csv.DictReader",
"Bio.SeqRecord.SeqRecord",
"pathlib.Path",
"tempfile.mkdtemp",
"collections.OrderedDict",
"shutil.rmtree",
"os.path.join",
"sys.exit"
] |
[((117, 138), 'sys.path.append', 'sys.path.append', (['root'], {}), '(root)\n', (132, 138), False, 'import sys\n'), ((521, 762), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""pvacseq generate_protein_fasta"""'], {'description': '"""Generate an annotated fasta file from a VCF with protein sequences of mutations and matching wildtypes"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "('pvacseq generate_protein_fasta', description=\n 'Generate an annotated fasta file from a VCF with protein sequences of mutations and matching wildtypes'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (544, 762), False, 'import argparse\n'), ((2690, 2723), 'os.path.join', 'os.path.join', (['temp_dir', '"""tmp.tsv"""'], {}), "(temp_dir, 'tmp.tsv')\n", (2702, 2723), False, 'import os\n'), ((3612, 3645), 'os.path.join', 'os.path.join', (['temp_dir', '"""tmp.tsv"""'], {}), "(temp_dir, 'tmp.tsv')\n", (3624, 3645), False, 'import os\n'), ((3663, 3698), 'os.path.join', 'os.path.join', (['temp_dir', '"""tmp.fasta"""'], {}), "(temp_dir, 'tmp.fasta')\n", (3675, 3698), False, 'import os\n'), ((3720, 3759), 'os.path.join', 'os.path.join', (['temp_dir', '"""tmp.fasta.key"""'], {}), "(temp_dir, 'tmp.fasta.key')\n", (3732, 3759), False, 'import os\n'), ((4718, 4753), 'os.path.join', 'os.path.join', (['temp_dir', '"""tmp.fasta"""'], {}), "(temp_dir, 'tmp.fasta')\n", (4730, 4753), False, 'import os\n'), ((4780, 4819), 'os.path.join', 'os.path.join', (['temp_dir', '"""tmp.fasta.key"""'], {}), "(temp_dir, 'tmp.fasta.key')\n", (4792, 4819), False, 'import os\n'), ((5008, 5021), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5019, 5021), False, 'from collections import OrderedDict\n'), ((5064, 5101), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fasta_file_path', '"""fasta"""'], {}), "(fasta_file_path, 'fasta')\n", (5075, 5101), False, 'from Bio import SeqIO\n'), ((5565, 5614), 'Bio.SeqIO.write', 'SeqIO.write', (['output_records', 'output_file', '"""fasta"""'], {}), "(output_records, output_file, 'fasta')\n", (5576, 5614), False, 'from Bio import SeqIO\n'), ((6083, 6101), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (6099, 6101), False, 'import tempfile\n'), ((6445, 6468), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (6458, 6468), False, 'import shutil\n'), ((3064, 3111), 'os.path.join', 'os.path.join', (['temp_dir', '"""proximal_variants.tsv"""'], {}), "(temp_dir, 'proximal_variants.tsv')\n", (3076, 3111), False, 'import os\n'), ((4445, 4479), 'csv.DictReader', 'csv.DictReader', (['fh'], {'delimiter': '"""\t"""'}), "(fh, delimiter='\\t')\n", (4459, 4479), False, 'import csv\n'), ((4895, 4944), 'yaml.load', 'yaml.load', (['fasta_key_file'], {'Loader': 'yaml.FullLoader'}), '(fasta_key_file, Loader=yaml.FullLoader)\n', (4904, 4944), False, 'import yaml\n'), ((5455, 5513), 'Bio.SeqRecord.SeqRecord', 'SeqRecord', (['record.seq'], {'id': 'record_id', 'description': 'record_id'}), '(record.seq, id=record_id, description=record_id)\n', (5464, 5513), False, 'from Bio.SeqRecord import SeqRecord\n'), ((5983, 6071), 'sys.exit', 'sys.exit', (['"""The downstream sequence length needs to be a positive integer or \'full\'"""'], {}), '(\n "The downstream sequence length needs to be a positive integer or \'full\'")\n', (5991, 6071), False, 'import sys\n'), ((80, 94), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (84, 94), False, 'from pathlib import Path\n')]
|
'''
Created on Mar 7, 2020
@author: ballance
'''
import os
import sys
import importlib
from tsr.engine_info import EngineInfo
from tsr.messaging import verbose_note, error
from tsr.tool_info import ToolInfo
import subprocess
from _io import StringIO
import cmd
from tsr.plusarg_info import PlusargInfo
from json import tool
import json
from tsr import messaging
class Registry(object):
_inst = None
def __init__(self):
self.engines = []
self.tools = []
# Directories
self.mkfile_dirs = []
# PYTHONPATH
self.pythonpath = []
# Add the system mkfiles directory
self.mkfile_dirs.append(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "mkfiles"))
# Load up the entries in the system path
self.pythonpath.extend(sys.path)
pass
@staticmethod
def inst() -> 'Registry':
if Registry._inst is None:
Registry._inst = Registry()
return Registry._inst
def get_engine(self, name):
for e in self.engines:
if e.name == name:
return e
return None
def get_tool(self, name):
for e in self.tools:
if e.name == name:
return e
return None
def register_engine(self, engine_info : EngineInfo):
self.engines.append(engine_info)
engine_info.rgy = self
def register_tool(self, tool_info):
self.tools.append(tool_info)
tool_info.rgy = self
def _process_pythonpath_dir(self, pp_dir):
for f in os.listdir(pp_dir):
if f == ".tsr" and os.path.isfile(os.path.join(pp_dir, "__init__.py")):
# TODO: this is a TSR extension directory
messaging.verbose_note("TSR plugin (" + os.path.join(pp_dir, f) + ")", 3)
import importlib.util
spec = importlib.util.spec_from_file_location(
"vlsim.tsr",
os.path.join(pp_dir, "__init__.py"),
submodule_search_locations=None)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
elif os.path.splitext(f)[1] == ".egg-link":
line = None
with open(os.path.join(pp_dir, f), "r") as f:
line = f.readline().strip()
if line is not None and line != "":
messaging.verbose_note("Process editable package: " + line, 3)
self._process_pythonpath_dir(line)
elif not f.startswith("__") and os.path.isdir(os.path.join(pp_dir, f)):
self._process_pythonpath_dir(os.path.join(pp_dir, f))
def load(self, load_info=False):
for pp in self.pythonpath:
if os.path.isdir(pp):
self._process_pythonpath_dir(pp)
for mkfile_dir in self.mkfile_dirs:
self._load_mkfiles_dir(mkfile_dir)
if load_info:
for info in self.engines:
info.load_info()
for info in self.tools:
info.load_info()
def _load_mkfiles_dir(self, dir):
"""Processes files from a makefiles directory to find engine and tool files"""
verbose_note("processing mkfiles directory " + dir)
for f in os.listdir(dir):
if os.path.isfile(os.path.join(dir, f)):
basename, ext = os.path.splitext(f)
info = None
if ext == ".mk":
if f.startswith("engine_"):
name = basename[len("engine_"):]
verbose_note("found engine named \"" + name + "\"")
info = EngineInfo(name, os.path.join(dir, f))
info.rgy = self
self.engines.append(info)
elif f.startswith("tool_"):
name = basename[len("tool_"):]
verbose_note("found tool named \"" + name + "\"")
info = ToolInfo(name, os.path.join(dir, f))
info.rgy = self
self.tools.append(info)
else:
verbose_note("ignore makefile " + f, 2)
def _load_info(self, info):
json_file = os.path.join(
os.path.dirname(info.mkfile),
os.path.splitext(os.path.basename(info.mkfile))[0] + ".json")
if os.path.isfile(json_file):
self._load_info_json(info, json_file)
self._load_mkfile_description(info)
self._load_mkfile_plusargs(info)
def _load_info_json(self, info, json_file):
with open(json_file, "r") as fp:
info = json.load(fp)
pass
def _run_make(self, args):
cmd = ["make", "TSR_PYTHON=" + sys.executable]
cmd.extend(args)
out = subprocess.check_output(cmd)
return out
def _load_mkfile_description(self, info):
cmd = ["RULES=1", "-f", info.mkfile, info.name + "-info"]
verbose_note("Querying description for \"" + info.name + "\"")
try:
out = self._run_make(cmd)
info.description = out.decode().strip()
verbose_note(" Description: \"" + info.description + "\"")
except Exception as e:
error("Failed to load description from " + info.mkfile + "(" + str(e) + ")")
def _load_mkfile_plusargs(self, info):
cmd = ["RULES=1", "-f", info.mkfile, info.name + "-plusargs"]
verbose_note("Querying plusargs supported by \"" + info.name + "\"")
try:
out = self._run_make(cmd)
for line in out.decode().splitlines():
line = line.strip()
if line.startswith("+"):
if line.find('- '):
desc = line[line.find('- ')+1:].strip()
line = line[:line.find('- ')]
else:
desc = ""
if line.find("=") != -1:
# Plusarg with a value
name=line[1:line.find('=')].strip()
vtype=line[line.find('=')+1:].strip()
else:
# Just a plain plusarg
name=line[1:]
vtype=None
verbose_note("Plusargs: name=" + str(name) + " vtype=" + str(vtype) + " desc=" + str(desc))
plusarg = PlusargInfo(name, desc, vtype)
info.add_plusarg(plusarg)
except Exception as e:
error("Failed to load description from " + info.mkfile + "(" + str(e) + ")")
pass
|
[
"cmd.extend",
"tsr.messaging.verbose_note",
"json.load",
"os.path.abspath",
"os.path.basename",
"os.path.isdir",
"tsr.plusarg_info.PlusargInfo",
"subprocess.check_output",
"os.path.dirname",
"os.path.isfile",
"os.path.splitext",
"os.path.join",
"os.listdir",
"importlib.util.module_from_spec"
] |
[((1666, 1684), 'os.listdir', 'os.listdir', (['pp_dir'], {}), '(pp_dir)\n', (1676, 1684), False, 'import os\n'), ((3411, 3462), 'tsr.messaging.verbose_note', 'verbose_note', (["('processing mkfiles directory ' + dir)"], {}), "('processing mkfiles directory ' + dir)\n", (3423, 3462), False, 'from tsr.messaging import verbose_note, error\n'), ((3489, 3504), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (3499, 3504), False, 'import os\n'), ((4673, 4698), 'os.path.isfile', 'os.path.isfile', (['json_file'], {}), '(json_file)\n', (4687, 4698), False, 'import os\n'), ((5116, 5132), 'cmd.extend', 'cmd.extend', (['args'], {}), '(args)\n', (5126, 5132), False, 'import cmd\n'), ((5156, 5184), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (5179, 5184), False, 'import subprocess\n'), ((5343, 5403), 'tsr.messaging.verbose_note', 'verbose_note', (['(\'Querying description for "\' + info.name + \'"\')'], {}), '(\'Querying description for "\' + info.name + \'"\')\n', (5355, 5403), False, 'from tsr.messaging import verbose_note, error\n'), ((5837, 5903), 'tsr.messaging.verbose_note', 'verbose_note', (['(\'Querying plusargs supported by "\' + info.name + \'"\')'], {}), '(\'Querying plusargs supported by "\' + info.name + \'"\')\n', (5849, 5903), False, 'from tsr.messaging import verbose_note, error\n'), ((2929, 2946), 'os.path.isdir', 'os.path.isdir', (['pp'], {}), '(pp)\n', (2942, 2946), False, 'import os\n'), ((4549, 4577), 'os.path.dirname', 'os.path.dirname', (['info.mkfile'], {}), '(info.mkfile)\n', (4564, 4577), False, 'import os\n'), ((4961, 4974), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (4970, 4974), False, 'import json\n'), ((5530, 5587), 'tsr.messaging.verbose_note', 'verbose_note', (['(\' Description: "\' + info.description + \'"\')'], {}), '(\' Description: "\' + info.description + \'"\')\n', (5542, 5587), False, 'from tsr.messaging import verbose_note, error\n'), ((2185, 2222), 'importlib.util.module_from_spec', 'importlib.util.module_from_spec', (['spec'], {}), '(spec)\n', (2216, 2222), False, 'import importlib\n'), ((3536, 3556), 'os.path.join', 'os.path.join', (['dir', 'f'], {}), '(dir, f)\n', (3548, 3556), False, 'import os\n'), ((3591, 3610), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (3607, 3610), False, 'import os\n'), ((729, 754), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (744, 754), False, 'import os\n'), ((1732, 1767), 'os.path.join', 'os.path.join', (['pp_dir', '"""__init__.py"""'], {}), "(pp_dir, '__init__.py')\n", (1744, 1767), False, 'import os\n'), ((2073, 2108), 'os.path.join', 'os.path.join', (['pp_dir', '"""__init__.py"""'], {}), "(pp_dir, '__init__.py')\n", (2085, 2108), False, 'import os\n'), ((6838, 6868), 'tsr.plusarg_info.PlusargInfo', 'PlusargInfo', (['name', 'desc', 'vtype'], {}), '(name, desc, vtype)\n', (6849, 6868), False, 'from tsr.plusarg_info import PlusargInfo\n'), ((2286, 2305), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (2302, 2305), False, 'import os\n'), ((2536, 2598), 'tsr.messaging.verbose_note', 'messaging.verbose_note', (["('Process editable package: ' + line)", '(3)'], {}), "('Process editable package: ' + line, 3)\n", (2558, 2598), False, 'from tsr import messaging\n'), ((3835, 3884), 'tsr.messaging.verbose_note', 'verbose_note', (['(\'found engine named "\' + name + \'"\')'], {}), '(\'found engine named "\' + name + \'"\')\n', (3847, 3884), False, 'from tsr.messaging import verbose_note, error\n'), ((4608, 4637), 'os.path.basename', 'os.path.basename', (['info.mkfile'], {}), '(info.mkfile)\n', (4624, 4637), False, 'import os\n'), ((1884, 1907), 'os.path.join', 'os.path.join', (['pp_dir', 'f'], {}), '(pp_dir, f)\n', (1896, 1907), False, 'import os\n'), ((2379, 2402), 'os.path.join', 'os.path.join', (['pp_dir', 'f'], {}), '(pp_dir, f)\n', (2391, 2402), False, 'import os\n'), ((2732, 2755), 'os.path.join', 'os.path.join', (['pp_dir', 'f'], {}), '(pp_dir, f)\n', (2744, 2755), False, 'import os\n'), ((2803, 2826), 'os.path.join', 'os.path.join', (['pp_dir', 'f'], {}), '(pp_dir, f)\n', (2815, 2826), False, 'import os\n'), ((3935, 3955), 'os.path.join', 'os.path.join', (['dir', 'f'], {}), '(dir, f)\n', (3947, 3955), False, 'import os\n'), ((4174, 4221), 'tsr.messaging.verbose_note', 'verbose_note', (['(\'found tool named "\' + name + \'"\')'], {}), '(\'found tool named "\' + name + \'"\')\n', (4186, 4221), False, 'from tsr.messaging import verbose_note, error\n'), ((4430, 4469), 'tsr.messaging.verbose_note', 'verbose_note', (["('ignore makefile ' + f)", '(2)'], {}), "('ignore makefile ' + f, 2)\n", (4442, 4469), False, 'from tsr.messaging import verbose_note, error\n'), ((4270, 4290), 'os.path.join', 'os.path.join', (['dir', 'f'], {}), '(dir, f)\n', (4282, 4290), False, 'import os\n')]
|
#!/usr/bin/env python
import sys
import django
from django.conf import settings
from django.test.runner import DiscoverRunner
settings.configure(DEBUG=True,
DATABASES={
'default':{
'ENGINE':'django.db.backends.sqlite3',
}
},
ROOT_URLCONF='flowr.urls',
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'flowr',
'flowr.tests',
),
)
django.setup()
runner = DiscoverRunner(verbosity=1)
failures = runner.run_tests(['flowr.tests'])
if failures:
sys.exit(failures)
|
[
"django.conf.settings.configure",
"django.test.runner.DiscoverRunner",
"django.setup",
"sys.exit"
] |
[((129, 409), 'django.conf.settings.configure', 'settings.configure', ([], {'DEBUG': '(True)', 'DATABASES': "{'default': {'ENGINE': 'django.db.backends.sqlite3'}}", 'ROOT_URLCONF': '"""flowr.urls"""', 'INSTALLED_APPS': "('django.contrib.auth', 'django.contrib.contenttypes',\n 'django.contrib.sessions', 'django.contrib.admin', 'flowr', 'flowr.tests')"}), "(DEBUG=True, DATABASES={'default': {'ENGINE':\n 'django.db.backends.sqlite3'}}, ROOT_URLCONF='flowr.urls',\n INSTALLED_APPS=('django.contrib.auth', 'django.contrib.contenttypes',\n 'django.contrib.sessions', 'django.contrib.admin', 'flowr', 'flowr.tests'))\n", (147, 409), False, 'from django.conf import settings\n'), ((503, 517), 'django.setup', 'django.setup', ([], {}), '()\n', (515, 517), False, 'import django\n'), ((527, 554), 'django.test.runner.DiscoverRunner', 'DiscoverRunner', ([], {'verbosity': '(1)'}), '(verbosity=1)\n', (541, 554), False, 'from django.test.runner import DiscoverRunner\n'), ((617, 635), 'sys.exit', 'sys.exit', (['failures'], {}), '(failures)\n', (625, 635), False, 'import sys\n')]
|
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Software flag module to store and report binary status."""
import hw_driver
# pylint: disable=invalid-name
# This follows servod drv naming convention
class sflag(hw_driver.HwDriver):
"""A driver to store and report an on/off flag."""
# This is a binary flag
VALID_VALUES = [0, 1]
# This is not a constant but at the class level so that
# it can be shared between set and get.
# We use a list to allow for sharing across as the class will not make it
# an instance variable if we write into the lists' 0th element.
vstore = [None]
def set(self, value):
"""Set the value to |value|."""
# While these controls _should_ be using a map so that the values
# are converted to on/off, we still need to make sure.
value = int(value)
if value not in self.VALID_VALUES:
raise hw_driver.HwDriverError('Invalid value: %d' %
self.vstore[0])
self.vstore[0] = value
def get(self):
"""Return the |self.vstore| for this flag."""
if self.vstore[0] is None:
# Initialize with a 0 unless a default is provided
self.vstore[0] = int(self._params.get('default_value', 0))
if self.vstore[0] not in self.VALID_VALUES:
# The default must have been invalid. This is because set() guards
# against invalid values already - the only other source of values.
raise hw_driver.HwDriverError('Invalid default: %d' %
self.vstore[0])
return self.vstore[0]
|
[
"hw_driver.HwDriverError"
] |
[((985, 1046), 'hw_driver.HwDriverError', 'hw_driver.HwDriverError', (["('Invalid value: %d' % self.vstore[0])"], {}), "('Invalid value: %d' % self.vstore[0])\n", (1008, 1046), False, 'import hw_driver\n'), ((1538, 1601), 'hw_driver.HwDriverError', 'hw_driver.HwDriverError', (["('Invalid default: %d' % self.vstore[0])"], {}), "('Invalid default: %d' % self.vstore[0])\n", (1561, 1601), False, 'import hw_driver\n')]
|
import numpy as np
import pandas as pd
import WindFarmGenetic # wind farm layout optimization using genetic algorithms classes
from datetime import datetime
import os
from sklearn.svm import SVR
import pickle
# Wind farm settings and algorithm settings
# parameters for the genetic algorithm
elite_rate = 0.2
cross_rate = 0.6
random_rate = 0.5
mutate_rate = 0.1
wt_N = 25 # number of wind turbines 15, 20, or 25
# NA_loc_array : not available location array, the index starting from 1
# L1 : wind farm, cells 121(inclusive) to 144(inclusive)
NA_loc_array = np.arange(121, 145, 1)
#
# # L2
# NA_loc_array = np.arange(61, 85, 1)
#
# # L3
# NA_loc_array = np.concatenate((np.arange(11, 144, 12), np.arange(12, 145, 12)))
#
# # L4
# NA_loc_array = np.concatenate((np.arange(6, 144, 12), np.arange(7, 145, 12)))
#
# # L5
# NA_loc_array = np.concatenate((np.arange(41, 105, 12), np.arange(42, 105, 12),
# np.arange(43, 105, 12),
# np.arange(44, 105, 12)))
#
# # L6
# NA_loc_array = np.concatenate((np.arange(1, 28, 12), np.arange(2, 28, 12),
# np.arange(12, 37, 12),
# np.arange(11, 37, 12),
# np.arange(109, 145, 12), np.arange(119, 145, 12),
# np.arange(110, 145, 12),
# np.arange(120, 145, 12),
# ))
#
# # L7
# NA_loc_array = np.arange(133, 145, 1)
#
# # L8
# NA_loc_array = np.arange(61, 73, 1)
#
# # L9
# NA_loc_array = np.arange(12, 145, 12)
#
# # L10
# NA_loc_array = np.arange(6, 145, 12)
#
# # L11
# NA_loc_array = np.concatenate((np.arange(42, 105, 12),
# np.arange(43, 105, 12)))
#
# # L12
# NA_loc_array = np.array((1, 2, 11, 12, 13, 24, 121, 132, 133, 134, 143, 144))
# convert numpy array to list, datatype convert
NA_loc = NA_loc_array.tolist()
# L0
# NA_loc = []
population_size = 120 # how many layouts in a population
iteration_times = 200 # how many iterations in a genetic algorithm run
n_inits = 100 # number of initial populations n_inits >= run_times
run_times = 100 # number of different initial populations
# wind farm size, cells
cols_cells = 12 # number of cells each row
rows_cells = 12 # number of cells each column
cell_width = 77.0 * 3 # unit : m
# all data will be save in data folder
data_folder = "data"
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# Create a WindFarmGenetic object
# create an WindFarmGenetic object. Specify the number of rows and the number columns of the wind farm land. N is the number of wind turbines.
# NA_loc is the not available locations on the wind farm land. Landowners does not want to participate in the wind farm.
# pop_size: how many individuals in the population
# iteration: iteration times of the genetic algorithm
wfg = WindFarmGenetic.WindFarmGenetic(rows=rows_cells, cols=cols_cells, N=wt_N, NA_loc=NA_loc, pop_size=population_size,
iteration=iteration_times, cell_width=cell_width, elite_rate=elite_rate,
cross_rate=cross_rate, random_rate=random_rate, mutate_rate=mutate_rate)
# Specify the wind distribution
# wind distribution is discrete (number of wind speeds) by (number of wind directions)
# wfg.init_1_direction_1_N_speed_13()
# file name to store the wind power distribution SVR model
# svr_model_filename = 'svr_1s1d_N_13.svr'
# wfg.init_4_direction_1_speed_13()
# svr_model_filename = 'svr_1s4d_13.svr'
wfg.init_6_direction_1_speed_13()
# svr_model_filename = 'svr_1s6d_13.svr'
################################################
# generate initial populations
################################################
# initial population saved folder
init_pops_data_folder = "{}/init_data".format(data_folder)
if not os.path.exists(init_pops_data_folder):
os.makedirs(init_pops_data_folder)
# generate initial populations to start with and store them
# in order to start from the same initial population for different methods
# so it is fair to compare the final results
for i in range(n_inits):
wfg.gen_init_pop_NA()
wfg.save_init_pop_NA("{}/init_{}.dat".format(init_pops_data_folder, i),
"{}/init_{}_NA.dat".format(init_pops_data_folder, i))
# Create results folder
# results folder
# adaptive_best_layouts_N60_9_20190422213718.dat : best layout for AGA of run index 9
# result_CGA_20190422213715.dat : run time and best eta for CGA method
results_data_folder = "data/results"
if not os.path.exists(results_data_folder):
os.makedirs(results_data_folder)
# if cg,ag,sg folder does not exist, create these folders. Folders to store the running results
# cg: convertional genetic algorithm
# ag: adaptive genetic algorithm
# sg: support vector regression guided genetic algorithm
cg_result_folder = "{}/cg".format(results_data_folder)
if not os.path.exists(cg_result_folder):
os.makedirs(cg_result_folder)
ag_result_folder = "{}/ag".format(results_data_folder)
if not os.path.exists(ag_result_folder):
os.makedirs(ag_result_folder)
sg_result_folder = "{}/sg".format(results_data_folder)
if not os.path.exists(sg_result_folder):
os.makedirs(sg_result_folder)
# resul_arr: run_times by 2 , the first column is the run time in seconds for each run and the second column is the conversion efficiency for the run
result_arr = np.zeros((run_times, 2), dtype=np.float32)
# Run adaptive genetic algorithm (AGA)
# CGA: Conventional genetic algorithm
for i in range(0, run_times): # run times
print("run times {} ...".format(i))
# load initial population
wfg.load_init_pop_NA("{}/init_{}.dat".format(init_pops_data_folder, i),
"{}/init_{}_NA.dat".format(init_pops_data_folder, i))
# run the conventional genetic algorithm and return run time and conversion efficiency
run_time, eta = wfg.conventional_genetic_alg(i, result_folder=cg_result_folder)
result_arr[i, 0] = run_time
result_arr[i, 1] = eta
time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
# save the run time and etas to a file
filename = "{}/result_conventional_{}.dat".format(cg_result_folder, time_stamp)
np.savetxt(filename, result_arr, fmt='%f', delimiter=" ")
# Run adaptive genetic algorithm (AGA)
# AGA: adaptive genetic algorithm
for i in range(0, run_times): # run times
print("run times {} ...".format(i))
wfg.load_init_pop_NA("{}/init_{}.dat".format(init_pops_data_folder, i),
"{}/init_{}_NA.dat".format(init_pops_data_folder, i))
run_time, eta = wfg.adaptive_genetic_alg(i, result_folder=ag_result_folder)
result_arr[i, 0] = run_time
result_arr[i, 1] = eta
time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
filename = "{}/result_adaptive_{}.dat".format(ag_result_folder, time_stamp)
np.savetxt(filename, result_arr, fmt='%f', delimiter=" ")
# Run support vector regression guided genetic algorithm (SUGGA)
# Generate wind distribution surface
#############################################
# generate wind distribution surface
#############################################
n_mc_samples = 10000 # svr train data, number of layouts to average
wds_data_folder = "{}/wds".format(data_folder)
if not os.path.exists(wds_data_folder):
os.makedirs(wds_data_folder)
# mc : monte-carlo
# number of layouts to generate as the training data for regression
# to build the power distribution surface
# mc_layout.dat file stores layouts only with 0s and 1s. 0 means no turbine here. 1 means one turbine here.
# mc_layout_NA.dat file stores layouts with 0s, 1s and 2s. 2 means no turbine and not available for turbine.
# These two files are used to generate wind power distribution.
# Each file has 10000 lines. Each line is layout.
# gen_mc_grid_with_NA_loc function generates these two files.
train_mc_layouts, train_mc_layouts_NA = WindFarmGenetic.LayoutGridMCGenerator.gen_mc_grid_with_NA_loc(rows_cells,
cols_cells,
n_mc_samples,
wt_N, NA_loc,
"{}/mc_layout.dat".format(
wds_data_folder),
"{}/mc_layout_NA.dat".format(
wds_data_folder))
# wfg.init_1_direction_1_N_speed_13()
# file name to store the wind power distribution SVR model
# svr_model_filename = 'svr_1s1d_N_13.svr'
# wfg.init_4_direction_1_speed_13()
# svr_model_filename = 'svr_1s4d_13.svr'
# wfg.init_6_direction_1_speed_13()
svr_model_filename = 'svr_1s6d_13.svr'
# load Monte-Carlo layouts from a text file. 10000 random layouts
layouts = np.genfromtxt("{}/mc_layout.dat".format(wds_data_folder), delimiter=" ", dtype=np.int32)
# generate the location index coordinate and average power output at each location index coordinate
# location index coordinate : in the cells, the cell with index 1 has location index (0,0) and the cell 2 has (1,0)
# store the location index coordinate in x.dat and average power in y.dat
wfg.mc_gen_xy_NA(rows=rows_cells, cols=cols_cells, layouts=layouts, n=n_mc_samples, N=wt_N,
xfname="{}/x.dat".format(wds_data_folder),
yfname="{}/y.dat".format(wds_data_folder))
# read index location coordinates
x_original = pd.read_csv("{}/x.dat".format(wds_data_folder), header=None, nrows=rows_cells * cols_cells,
delim_whitespace=True, dtype=np.float32)
x_original = x_original.values
# read the power output of each index location coordinate
y_original = pd.read_csv("{}/y.dat".format(wds_data_folder), header=None, nrows=rows_cells * cols_cells,
delim_whitespace=True, dtype=np.float32)
y_original = y_original.values.flatten()
# create a SVR object and specify the kernal and other parameters
svr_model = SVR(kernel='rbf', C=2000.0, gamma=0.3, epsilon=.1)
# build the SVR power distribution model
svr_model.fit(x_original, y_original)
# save the SVR model to a file
pickle.dump(svr_model, open("{}/{}".format(wds_data_folder, svr_model_filename), 'wb'))
# This is how to load SVR model from a file
# svr_model = pickle.load(open("{}/{}".format(wds_data_folder,svr_model_filename), 'rb'))
# SUGGA: support vector regression guided genetic algorithm
for i in range(0, run_times): # run times
print("run times {} ...".format(i))
wfg.load_init_pop_NA("{}/init_{}.dat".format(init_pops_data_folder, i),
"{}/init_{}_NA.dat".format(init_pops_data_folder, i))
run_time, eta = wfg.sugga_genetic_alg(i, svr_model=svr_model, result_folder=sg_result_folder)
result_arr[i, 0] = run_time
result_arr[i, 1] = eta
time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
filename = "{}/result_sugga_{}.dat".format(sg_result_folder, time_stamp)
np.savetxt(filename, result_arr, fmt='%f', delimiter=" ")
|
[
"sklearn.svm.SVR",
"os.makedirs",
"numpy.savetxt",
"numpy.zeros",
"os.path.exists",
"numpy.arange",
"WindFarmGenetic.WindFarmGenetic",
"datetime.datetime.now"
] |
[((581, 603), 'numpy.arange', 'np.arange', (['(121)', '(145)', '(1)'], {}), '(121, 145, 1)\n', (590, 603), True, 'import numpy as np\n'), ((2996, 3268), 'WindFarmGenetic.WindFarmGenetic', 'WindFarmGenetic.WindFarmGenetic', ([], {'rows': 'rows_cells', 'cols': 'cols_cells', 'N': 'wt_N', 'NA_loc': 'NA_loc', 'pop_size': 'population_size', 'iteration': 'iteration_times', 'cell_width': 'cell_width', 'elite_rate': 'elite_rate', 'cross_rate': 'cross_rate', 'random_rate': 'random_rate', 'mutate_rate': 'mutate_rate'}), '(rows=rows_cells, cols=cols_cells, N=wt_N,\n NA_loc=NA_loc, pop_size=population_size, iteration=iteration_times,\n cell_width=cell_width, elite_rate=elite_rate, cross_rate=cross_rate,\n random_rate=random_rate, mutate_rate=mutate_rate)\n', (3027, 3268), False, 'import WindFarmGenetic\n'), ((5590, 5632), 'numpy.zeros', 'np.zeros', (['(run_times, 2)'], {'dtype': 'np.float32'}), '((run_times, 2), dtype=np.float32)\n', (5598, 5632), True, 'import numpy as np\n'), ((6401, 6459), 'numpy.savetxt', 'np.savetxt', (['filename', 'result_arr'], {'fmt': '"""%f"""', 'delimiter': '""" """'}), "(filename, result_arr, fmt='%f', delimiter=' ')\n", (6411, 6459), True, 'import numpy as np\n'), ((7053, 7111), 'numpy.savetxt', 'np.savetxt', (['filename', 'result_arr'], {'fmt': '"""%f"""', 'delimiter': '""" """'}), "(filename, result_arr, fmt='%f', delimiter=' ')\n", (7063, 7111), True, 'import numpy as np\n'), ((10641, 10692), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'C': '(2000.0)', 'gamma': '(0.3)', 'epsilon': '(0.1)'}), "(kernel='rbf', C=2000.0, gamma=0.3, epsilon=0.1)\n", (10644, 10692), False, 'from sklearn.svm import SVR\n'), ((11630, 11688), 'numpy.savetxt', 'np.savetxt', (['filename', 'result_arr'], {'fmt': '"""%f"""', 'delimiter': '""" """'}), "(filename, result_arr, fmt='%f', delimiter=' ')\n", (11640, 11688), True, 'import numpy as np\n'), ((2520, 2547), 'os.path.exists', 'os.path.exists', (['data_folder'], {}), '(data_folder)\n', (2534, 2547), False, 'import os\n'), ((2554, 2578), 'os.makedirs', 'os.makedirs', (['data_folder'], {}), '(data_folder)\n', (2565, 2578), False, 'import os\n'), ((3996, 4033), 'os.path.exists', 'os.path.exists', (['init_pops_data_folder'], {}), '(init_pops_data_folder)\n', (4010, 4033), False, 'import os\n'), ((4040, 4074), 'os.makedirs', 'os.makedirs', (['init_pops_data_folder'], {}), '(init_pops_data_folder)\n', (4051, 4074), False, 'import os\n'), ((4720, 4755), 'os.path.exists', 'os.path.exists', (['results_data_folder'], {}), '(results_data_folder)\n', (4734, 4755), False, 'import os\n'), ((4762, 4794), 'os.makedirs', 'os.makedirs', (['results_data_folder'], {}), '(results_data_folder)\n', (4773, 4794), False, 'import os\n'), ((5086, 5118), 'os.path.exists', 'os.path.exists', (['cg_result_folder'], {}), '(cg_result_folder)\n', (5100, 5118), False, 'import os\n'), ((5125, 5154), 'os.makedirs', 'os.makedirs', (['cg_result_folder'], {}), '(cg_result_folder)\n', (5136, 5154), False, 'import os\n'), ((5221, 5253), 'os.path.exists', 'os.path.exists', (['ag_result_folder'], {}), '(ag_result_folder)\n', (5235, 5253), False, 'import os\n'), ((5260, 5289), 'os.makedirs', 'os.makedirs', (['ag_result_folder'], {}), '(ag_result_folder)\n', (5271, 5289), False, 'import os\n'), ((5356, 5388), 'os.path.exists', 'os.path.exists', (['sg_result_folder'], {}), '(sg_result_folder)\n', (5370, 5388), False, 'import os\n'), ((5395, 5424), 'os.makedirs', 'os.makedirs', (['sg_result_folder'], {}), '(sg_result_folder)\n', (5406, 5424), False, 'import os\n'), ((7480, 7511), 'os.path.exists', 'os.path.exists', (['wds_data_folder'], {}), '(wds_data_folder)\n', (7494, 7511), False, 'import os\n'), ((7518, 7546), 'os.makedirs', 'os.makedirs', (['wds_data_folder'], {}), '(wds_data_folder)\n', (7529, 7546), False, 'import os\n'), ((6239, 6253), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6251, 6253), False, 'from datetime import datetime\n'), ((6935, 6949), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6947, 6949), False, 'from datetime import datetime\n'), ((11515, 11529), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11527, 11529), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
"""
目的
- アノテーション作業の前の一番最初の画像データの前処理
- 画像サイズを小さくする & 画像サイズを揃える
"""
import os
import glob
import numpy as np
from PIL import Image
import argparse
def main(args):
img_files = glob.glob(os.path.join(args.img_dir, args.img_filter))
print('image_dir : ', args.img_dir, ', filter : ', args.img_filter)
print('image file number : ', len(img_files))
"""
画像サイズが異なるものがあるが、縦横比は同じと仮定。
高さを302に固定して、リサイズする。
これで不具合出るようなら、強制的に(402, 302)でリサイズすれば良い。
"""
height_size = 302
for img_file in img_files:
org_img = Image.open(img_file)
img = org_img.copy()
if img.height > img.width: # 向きを一定にする
img = img.rotate(90, expand=True)
scale = float(height_size) / img.height
res_img = img.resize((int(img.width*scale), height_size))
res_img.save(os.path.join(args.out_dir, img_file.split('/')[-1]))
print(img_file, np.array(org_img).shape, '->', np.array(res_img).shape)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='argparser')
parser.add_argument('--img_dir', type=str, default='data/org_images')
parser.add_argument('--out_dir', type=str, default='data/res_images')
parser.add_argument('--img_filter', type=str, default='*.JPG')
args = parser.parse_args()
main(args)
|
[
"numpy.array",
"os.path.join",
"argparse.ArgumentParser",
"PIL.Image.open"
] |
[((1035, 1083), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""argparser"""'}), "(description='argparser')\n", (1058, 1083), False, 'import argparse\n'), ((216, 259), 'os.path.join', 'os.path.join', (['args.img_dir', 'args.img_filter'], {}), '(args.img_dir, args.img_filter)\n', (228, 259), False, 'import os\n'), ((569, 589), 'PIL.Image.open', 'Image.open', (['img_file'], {}), '(img_file)\n', (579, 589), False, 'from PIL import Image\n'), ((924, 941), 'numpy.array', 'np.array', (['org_img'], {}), '(org_img)\n', (932, 941), True, 'import numpy as np\n'), ((955, 972), 'numpy.array', 'np.array', (['res_img'], {}), '(res_img)\n', (963, 972), True, 'import numpy as np\n')]
|
from flask import render_template, flash, redirect, url_for, request
import pymysql
import json
from app import *
from app.form import ServerInfo
from app.handler import *
@app.route('/', methods=['GET', 'POST'])
def login():
form = ServerInfo()
if form.validate_on_submit():
flash('Login requested for user {}, remember_me={}'.format(
form.username.data, form.remember_me.data))
return redirect(url_for('databases'))
return render_template('index.html', form=form)
@app.route("/connect", methods=['GET', 'POST'])
def connect():
try:
mysql = pymysql.connect(
request.form["host"],
request.form["username"],
request.form["password"],
charset='utf8mb4'
)
data = connect_to_db(mysql, "SHOW DATABASES")
result = ''
for database in data:
result += "<option value=\""+database[0]+"\">"+database[0]+"</option>"
json_data = {"status": "success", "result": result}
return json.dumps(json_data)
except:
json_data = {"status": "error", "message": "Couldn't connect to MySQL, check your credentials!"}
return json_data
@app.route("/generate", methods=['GET', 'POST'])
def generate():
mysql = pymysql.connect(
request.form["host"],
request.form["username"],
request.form["password"],
request.form["database"],
charset='utf8mb4')
data = generate_mage(mysql, request.form["createUsers"])
return data
|
[
"json.dumps",
"flask.url_for",
"app.form.ServerInfo",
"flask.render_template",
"pymysql.connect"
] |
[((237, 249), 'app.form.ServerInfo', 'ServerInfo', ([], {}), '()\n', (247, 249), False, 'from app.form import ServerInfo\n'), ((438, 478), 'flask.render_template', 'render_template', (['"""index.html"""'], {'form': 'form'}), "('index.html', form=form)\n", (453, 478), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((1133, 1272), 'pymysql.connect', 'pymysql.connect', (["request.form['host']", "request.form['username']", "request.form['password']", "request.form['database']"], {'charset': '"""utf8mb4"""'}), "(request.form['host'], request.form['username'], request.\n form['password'], request.form['database'], charset='utf8mb4')\n", (1148, 1272), False, 'import pymysql\n'), ((560, 673), 'pymysql.connect', 'pymysql.connect', (["request.form['host']", "request.form['username']", "request.form['password']"], {'charset': '"""utf8mb4"""'}), "(request.form['host'], request.form['username'], request.\n form['password'], charset='utf8mb4')\n", (575, 673), False, 'import pymysql\n'), ((908, 929), 'json.dumps', 'json.dumps', (['json_data'], {}), '(json_data)\n', (918, 929), False, 'import json\n'), ((408, 428), 'flask.url_for', 'url_for', (['"""databases"""'], {}), "('databases')\n", (415, 428), False, 'from flask import render_template, flash, redirect, url_for, request\n')]
|
import math
from datetime import datetime, timedelta
from Ops import Op
def splitTc(tc):
hrs, mins, secs, frames = tc.split(":")
return hrs, mins, secs, frames
def TCFtoInt(tc, fps):
hrs, mins, secs, frames = splitTc(tc)
fps = math.ceil(float(fps))
if hrs != "" and mins != "" and secs != "" and frames != "" and fps != "":
mins = (int(hrs) * 60) + int(mins)
secs = (int(mins) * 60) + int(secs)
frames = (int(secs) * (int(fps))) + int(frames)
return frames
return None
def TCSub(tc1, tc2, fps):
""" tc1 minus tc2 == Result """
tc1hr, tc1min, tc1sec, tc1frame = splitTc(tc1)
tc2hr, tc2min, tc2sec, tc2frame = splitTc(tc2)
tc1Delta = timedelta(hours=int(tc1hr), minutes=int(tc1min), seconds=int(tc1sec))
tc2Delta = timedelta(hours=int(tc2hr), minutes=int(tc2min), seconds=int(tc2sec))
tcDate = datetime.fromtimestamp(int(tc1Delta.total_seconds()) - int(tc2Delta.total_seconds()))
totalFrames = int(tc1frame) - int(tc2frame)
if totalFrames < 0:
totalFrames += fps
tcDate = tcDate - timedelta(seconds=1)
return "%s:%02d" % (tcDate.strftime("%H:%M:%S"), int(totalFrames))
class SetFrameRange(Op.Op):
def __init__(self, name='/SetFrameRange', locations=''):
fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'Location containing a frame range', 'string', locations, {}),
]
super(self.__class__, self).__init__(name, fields)
def cook(self, location, interface, attrs):
if not interface.opParamsDirty(): return
# Get frame range from location and set if we find any
frameRange = interface.attr('frameRange')
if not frameRange: return
if len(frameRange) != 2: return
interface.setFrameRange(frameRange[0], frameRange[1])
interface.updateTimeline()
self.logger.info('Set range to [%d, %d]' % (frameRange[0], frameRange[1]))
# Register Ops
import Registry
Registry.registerOp('Set Frame Range', SetFrameRange)
|
[
"datetime.timedelta",
"Registry.registerOp"
] |
[((1854, 1907), 'Registry.registerOp', 'Registry.registerOp', (['"""Set Frame Range"""', 'SetFrameRange'], {}), "('Set Frame Range', SetFrameRange)\n", (1873, 1907), False, 'import Registry\n'), ((1014, 1034), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (1023, 1034), False, 'from datetime import datetime, timedelta\n')]
|
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib
matplotlib.use('Agg')
# True indicates it's for 10-class multiway version
test_f1s_filename = '/Users/sofiaserrano/Downloads/paperResults/binaryTEST_withcontext_bootstrappedf1s.csv'
dev_f1s_filename = '/Users/sofiaserrano/Downloads/paperResults/binaryDEV_withcontext_bootstrappedf1s.csv'
tag = 'binary'
sns.set()
def read_in_data(fname):
roberta_f1s = []
baseline_f1s = []
with open(fname, 'r') as f:
field_names = f.readline().strip().split(',')
roberta_is_field_0 = (field_names[0] == 'bootstrapped_f1_roberta')
if not roberta_is_field_0:
assert field_names[0] == 'bootstrapped_f1_baseline'
for line in f:
string_vals = line.strip().split(',')
if roberta_is_field_0:
roberta_f1s.append(float(string_vals[0]))
baseline_f1s.append(float(string_vals[1]))
else:
roberta_f1s.append(float(string_vals[1]))
baseline_f1s.append(float(string_vals[0]))
return roberta_f1s, baseline_f1s
dev_roberta_f1s, dev_baseline_f1s = read_in_data(dev_f1s_filename)
test_roberta_f1s, test_baseline_f1s = read_in_data(test_f1s_filename)
list_of_row_dicts = []
for data_point in dev_roberta_f1s:
row_dict = {'Data split': 'Dev',
"Model": "RoBERTa",
"Bootstrapped F1 score": data_point}
list_of_row_dicts.append(row_dict)
for data_point in test_roberta_f1s:
row_dict = {'Data split': 'Test',
"Model": "RoBERTa",
"Bootstrapped F1 score": data_point}
list_of_row_dicts.append(row_dict)
for data_point in dev_baseline_f1s:
row_dict = {'Data split': 'Dev',
"Model": "Baseline",
"Bootstrapped F1 score": data_point}
list_of_row_dicts.append(row_dict)
for data_point in test_baseline_f1s:
row_dict = {'Data split': 'Test',
"Model": "Baseline",
"Bootstrapped F1 score": data_point}
list_of_row_dicts.append(row_dict)
data_to_plot = pd.DataFrame(list_of_row_dicts)
fig = plt.figure(figsize=(12, 4))
#plt.ylim(0, 1)
ax = sns.boxplot(x="Data split", y="Bootstrapped F1 score", hue="Model", data=data_to_plot, palette='PuOr')
plt.title('Bootstrapped F1 scores for ' + tag + ' held-out data')
plt.savefig('/Users/sofiaserrano/Downloads/paperResults/BootstrappedF1s' + tag + '.png', bbox_inches='tight')
plt.close(fig)
|
[
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"seaborn.boxplot",
"matplotlib.use",
"seaborn.set",
"matplotlib.pyplot.savefig"
] |
[((92, 113), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (106, 113), False, 'import matplotlib\n'), ((399, 408), 'seaborn.set', 'sns.set', ([], {}), '()\n', (406, 408), True, 'import seaborn as sns\n'), ((2123, 2154), 'pandas.DataFrame', 'pd.DataFrame', (['list_of_row_dicts'], {}), '(list_of_row_dicts)\n', (2135, 2154), True, 'import pandas as pd\n'), ((2163, 2190), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (2173, 2190), True, 'import matplotlib.pyplot as plt\n'), ((2212, 2319), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Data split"""', 'y': '"""Bootstrapped F1 score"""', 'hue': '"""Model"""', 'data': 'data_to_plot', 'palette': '"""PuOr"""'}), "(x='Data split', y='Bootstrapped F1 score', hue='Model', data=\n data_to_plot, palette='PuOr')\n", (2223, 2319), True, 'import seaborn as sns\n'), ((2315, 2380), 'matplotlib.pyplot.title', 'plt.title', (["('Bootstrapped F1 scores for ' + tag + ' held-out data')"], {}), "('Bootstrapped F1 scores for ' + tag + ' held-out data')\n", (2324, 2380), True, 'import matplotlib.pyplot as plt\n'), ((2381, 2494), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/Users/sofiaserrano/Downloads/paperResults/BootstrappedF1s' + tag + '.png')"], {'bbox_inches': '"""tight"""'}), "('/Users/sofiaserrano/Downloads/paperResults/BootstrappedF1s' +\n tag + '.png', bbox_inches='tight')\n", (2392, 2494), True, 'import matplotlib.pyplot as plt\n'), ((2491, 2505), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2500, 2505), True, 'import matplotlib.pyplot as plt\n')]
|
import time
import microcontroller
from hardware import drivers
if drivers.vbus_detect.value:
from . import low_battery_splash
while drivers._read_bat_percent() < 4:
time.sleep(5)
print(f"[time: {time.monotonic()}] battery too low, waiting to boot")
else:
print("battery charged to 4+ percent, restarting")
microcontroller.reset()
|
[
"microcontroller.reset",
"hardware.drivers._read_bat_percent",
"time.monotonic",
"time.sleep"
] |
[((139, 166), 'hardware.drivers._read_bat_percent', 'drivers._read_bat_percent', ([], {}), '()\n', (164, 166), False, 'from hardware import drivers\n'), ((176, 189), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (186, 189), False, 'import time\n'), ((329, 352), 'microcontroller.reset', 'microcontroller.reset', ([], {}), '()\n', (350, 352), False, 'import microcontroller\n'), ((210, 226), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (224, 226), False, 'import time\n')]
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training Loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_graphics.projects.cvxnet.lib import datasets
from tensorflow_graphics.projects.cvxnet.lib import models
from tensorflow_graphics.projects.cvxnet.lib import utils
tf.disable_eager_execution()
flags = tf.app.flags
logging = tf.logging
tf.logging.set_verbosity(tf.logging.INFO)
utils.define_flags()
FLAGS = flags.FLAGS
def main(unused_argv):
tf.set_random_seed(2191997)
np.random.seed(6281996)
logging.info("=> Starting ...")
# Select dataset.
logging.info("=> Preparing datasets ...")
data = datasets.get_dataset(FLAGS.dataset, "train", FLAGS)
batch = tf.data.make_one_shot_iterator(data).get_next()
# Select model.
logging.info("=> Creating {} model".format(FLAGS.model))
model = models.get_model(FLAGS.model, FLAGS)
optimizer = tf.train.AdamOptimizer(FLAGS.lr)
# Set up the graph
train_loss, train_op, global_step = model.compute_loss(
batch, training=True, optimizer=optimizer)
# Training hooks
stop_hook = tf.train.StopAtStepHook(last_step=FLAGS.max_steps)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir)
ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summary_hook = tf.train.SummarySaverHook(
save_steps=100, summary_writer=summary_writer, summary_op=ops)
step_counter_hook = tf.train.StepCounterHook(summary_writer=summary_writer)
hooks = [stop_hook, step_counter_hook, summary_hook]
logging.info("=> Start training loop ...")
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=hooks,
scaffold=None,
save_checkpoint_steps=FLAGS.save_every,
save_checkpoint_secs=None,
save_summaries_steps=None,
save_summaries_secs=None,
log_step_count_steps=None,
max_wait_secs=3600) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run([batch, train_loss, global_step, train_op])
if __name__ == "__main__":
tf.app.run(main)
|
[
"numpy.random.seed",
"tensorflow.compat.v1.train.SummarySaverHook",
"tensorflow_graphics.projects.cvxnet.lib.utils.define_flags",
"tensorflow.compat.v1.data.make_one_shot_iterator",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.train.StopAtStepHook",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow_graphics.projects.cvxnet.lib.models.get_model",
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.train.StepCounterHook",
"tensorflow.compat.v1.train.MonitoredTrainingSession",
"tensorflow_graphics.projects.cvxnet.lib.datasets.get_dataset",
"tensorflow.compat.v1.app.run"
] |
[((951, 979), 'tensorflow.compat.v1.disable_eager_execution', 'tf.disable_eager_execution', ([], {}), '()\n', (977, 979), True, 'import tensorflow.compat.v1 as tf\n'), ((1023, 1064), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (1047, 1064), True, 'import tensorflow.compat.v1 as tf\n'), ((1066, 1086), 'tensorflow_graphics.projects.cvxnet.lib.utils.define_flags', 'utils.define_flags', ([], {}), '()\n', (1084, 1086), False, 'from tensorflow_graphics.projects.cvxnet.lib import utils\n'), ((1134, 1161), 'tensorflow.compat.v1.set_random_seed', 'tf.set_random_seed', (['(2191997)'], {}), '(2191997)\n', (1152, 1161), True, 'import tensorflow.compat.v1 as tf\n'), ((1164, 1187), 'numpy.random.seed', 'np.random.seed', (['(6281996)'], {}), '(6281996)\n', (1178, 1187), True, 'import numpy as np\n'), ((1297, 1348), 'tensorflow_graphics.projects.cvxnet.lib.datasets.get_dataset', 'datasets.get_dataset', (['FLAGS.dataset', '"""train"""', 'FLAGS'], {}), "(FLAGS.dataset, 'train', FLAGS)\n", (1317, 1348), False, 'from tensorflow_graphics.projects.cvxnet.lib import datasets\n'), ((1495, 1531), 'tensorflow_graphics.projects.cvxnet.lib.models.get_model', 'models.get_model', (['FLAGS.model', 'FLAGS'], {}), '(FLAGS.model, FLAGS)\n', (1511, 1531), False, 'from tensorflow_graphics.projects.cvxnet.lib import models\n'), ((1546, 1578), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['FLAGS.lr'], {}), '(FLAGS.lr)\n', (1568, 1578), True, 'import tensorflow.compat.v1 as tf\n'), ((1742, 1792), 'tensorflow.compat.v1.train.StopAtStepHook', 'tf.train.StopAtStepHook', ([], {'last_step': 'FLAGS.max_steps'}), '(last_step=FLAGS.max_steps)\n', (1765, 1792), True, 'import tensorflow.compat.v1 as tf\n'), ((1812, 1850), 'tensorflow.compat.v1.summary.FileWriter', 'tf.summary.FileWriter', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (1833, 1850), True, 'import tensorflow.compat.v1 as tf\n'), ((1859, 1900), 'tensorflow.compat.v1.get_collection', 'tf.get_collection', (['tf.GraphKeys.SUMMARIES'], {}), '(tf.GraphKeys.SUMMARIES)\n', (1876, 1900), True, 'import tensorflow.compat.v1 as tf\n'), ((1918, 2010), 'tensorflow.compat.v1.train.SummarySaverHook', 'tf.train.SummarySaverHook', ([], {'save_steps': '(100)', 'summary_writer': 'summary_writer', 'summary_op': 'ops'}), '(save_steps=100, summary_writer=summary_writer,\n summary_op=ops)\n', (1943, 2010), True, 'import tensorflow.compat.v1 as tf\n'), ((2036, 2091), 'tensorflow.compat.v1.train.StepCounterHook', 'tf.train.StepCounterHook', ([], {'summary_writer': 'summary_writer'}), '(summary_writer=summary_writer)\n', (2060, 2091), True, 'import tensorflow.compat.v1 as tf\n'), ((2661, 2677), 'tensorflow.compat.v1.app.run', 'tf.app.run', (['main'], {}), '(main)\n', (2671, 2677), True, 'import tensorflow.compat.v1 as tf\n'), ((2200, 2473), 'tensorflow.compat.v1.train.MonitoredTrainingSession', 'tf.train.MonitoredTrainingSession', ([], {'checkpoint_dir': 'FLAGS.train_dir', 'hooks': 'hooks', 'scaffold': 'None', 'save_checkpoint_steps': 'FLAGS.save_every', 'save_checkpoint_secs': 'None', 'save_summaries_steps': 'None', 'save_summaries_secs': 'None', 'log_step_count_steps': 'None', 'max_wait_secs': '(3600)'}), '(checkpoint_dir=FLAGS.train_dir, hooks=\n hooks, scaffold=None, save_checkpoint_steps=FLAGS.save_every,\n save_checkpoint_secs=None, save_summaries_steps=None,\n save_summaries_secs=None, log_step_count_steps=None, max_wait_secs=3600)\n', (2233, 2473), True, 'import tensorflow.compat.v1 as tf\n'), ((1359, 1395), 'tensorflow.compat.v1.data.make_one_shot_iterator', 'tf.data.make_one_shot_iterator', (['data'], {}), '(data)\n', (1389, 1395), True, 'import tensorflow.compat.v1 as tf\n')]
|