text stringlengths 38 1.54M |
|---|
import graphene
from graphene_django.types import DjangoObjectType
from graphene.types.generic import GenericScalar
from django.utils import timezone
import datetime
import pytz
from django.db.models import Q
from backend.utils import HelperClass
from functools import reduce
from collections import Counter
import datetime
from graphql import GraphQLError
from backend.utils import HelperClass
from .. import models
from app.schemas.board_schema import *
def where_is_card(card):
# VRNE:
# 0 - če je pred mejnim stolpcem
# 1 - če je med mejnima stolpcema
# 2 - če je na koncu
columns = list(models.Column.objects.filter(board=card.project.board))
list_columns = get_columns_absolute(columns, [])
first_boundary = list(models.Column.objects.filter(board=card.project.board, boundary=True))[0]
second_boundary = list(models.Column.objects.filter(board=card.project.board, boundary=True))[1]
first_b_index = list_columns.index(first_boundary)
second_b_index = list_columns.index(second_boundary)
column_index = list_columns.index(card.column)
if column_index < first_b_index:
return 0
elif column_index > second_b_index:
return 2
else:
return 1
def get_columns_absolute(columns, list_of_columns):
for col in columns:
if len(col.children.all()) == 0:
list_of_columns.append(col)
else:
list_of_columns = get_columns_absolute(list(col.children.all()), list_of_columns)
return list_of_columns
def get_first_column(card):
return models.CardLog.objects.filter(card=card).first().from_column
def get_current_column(card):
return models.CardLog.objects.filter(card=card).last().to_column
def card_per_column_time(card, minimal=True, column_from=None, column_to=None):
localtz = pytz.timezone('Europe/Ljubljana')
board = card.project.board
cols = models.Column.objects.filter(board=board)
if minimal:
priority = models.Column.objects.get(board=board, priority=True)
backlogs = models.Column.objects.filter(board=board, position__lt=priority.position, parent=priority.parent)
done_col = get_done_column(board)
cols = cols.exclude(id__in=[c.id for c in backlogs])
cols = cols.exclude(id=done_col.id)
if column_from and column_to:
cols = columns_between(column_from, column_to)
per_column = {}
cols = sort_columns(cols)
for col in cols:
per_column[col.name] = 0
if col == get_first_column(card):
log = card.logs.filter(to_column=col).first()
project_start = card.project.date_start
start = localtz.localize(datetime.datetime(project_start.year, project_start.month, project_start.day))
diff = abs((log.timestamp - start).total_seconds()) / 3600
per_column[col.name] = float("{0:.2f}".format(diff))
elif col == get_current_column(card):
log = card.logs.filter(to_column=col).first()
diff = abs((timezone.now() - log.timestamp).total_seconds()) / 3600
if col == get_done_column(board):
diff = 0
per_column[col.name] = float("{0:.2f}".format(diff))
for a, b in zip(card.logs.filter(from_column=col), card.logs.filter(to_column=col)):
per_column[col.name] += abs((a.timestamp - b.timestamp).total_seconds()) / 3600
return per_column
def card_total_time(card, minimal=True, column_from=None, column_to=None):
data = card_per_column_time(card, minimal, column_from, column_to)
return sum([v for _, v in data.items()])
def get_done_column(board):
board_columns = board.column_set.all()
pos, par = board_columns.get(acceptance=True).position, board_columns.get(acceptance=True).parent
return board_columns.get(position=pos + 1, parent=par)
def get_boundary_columns(board):
boundary = models.Column.objects.filter(board=board, boundary=True)
left, right = boundary
return left, right
def done_cards(project_id):
try:
project = models.Project.objects.get(id=project_id)
done_column = get_done_column(project.board)
project_cards = models.Card.objects.filter(project=project)
return [c for c in project_cards if get_current_column(c) == done_column]
except:
return []
def filter_cards(project_id, creation_start, creation_end, done_start, done_end, dev_start, dev_end, \
estimate_from, estimate_to, card_type):
cards = models.Card.objects.all()
if project_id:
project = models.Project.objects.get(id=project_id)
cards = cards.filter(project=project)
if creation_start:
start = HelperClass.get_si_date(creation_start)
cards = cards.filter(date_created__gte=start)
if creation_end:
end = HelperClass.get_si_date(creation_end)
cards = cards.filter(date_created__lte=end + datetime.timedelta(days=1))
if done_start:
start = HelperClass.get_si_date(done_start)
done_column = get_done_column(project.board)
valid_cards = models.CardLog.objects.filter(to_column=done_column, timestamp__gte=start).values_list('card',
flat=True)
cards = cards.filter(pk__in=[c for c in valid_cards])
if done_end:
end = HelperClass.get_si_date(done_end)
done_column = get_done_column(project.board)
valid_cards = models.CardLog.objects.filter(to_column=done_column,
timestamp__lt=end + datetime.timedelta(days=1)).values_list('card',
flat=True)
cards = cards.filter(pk__in=[c for c in valid_cards])
if dev_start:
start = HelperClass.get_si_date(dev_start)
left, _ = get_boundary_columns(project.board)
valid_cards = models.CardLog.objects.filter(to_column=left, timestamp__gte=start).values_list('card', flat=True)
cards = cards.filter(pk__in=[c for c in valid_cards])
if dev_end:
end = HelperClass.get_si_date(dev_end)
left, _ = get_boundary_columns(project.board)
valid_cards = models.CardLog.objects.filter(to_column=left,
timestamp__lt=end + datetime.timedelta(days=1)).values_list('card',
flat=True)
cards = cards.filter(pk__in=[c for c in valid_cards])
if estimate_from:
cards = cards.filter(estimate__gte=estimate_from)
if estimate_to:
cards = cards.filter(estimate__lte=estimate_to)
if card_type:
type_ids = [t.split('_')[1] for t in card_type if t]
if type_ids: cards = cards.filter(reduce(lambda x, y: x | y, [Q(type__id=id) for id in type_ids]))
return cards.filter(is_deleted=False)
def cards_per_dev(cards):
assignees = cards.values_list('owner', flat=True)
c = Counter(assignees)
result = []
for id, count in c.items():
u = models.UserTeam.objects.get(id=id)
u = u.member
dict = {}
dict['name'] = u.first_name + ' ' + u.last_name
dict['email'] = u.email
dict['value'] = count
result.append(dict)
return result
def estimate_per_dev(cards):
assignees = cards.values_list('owner', flat=True).distinct()
result = []
for id in assignees:
ut = models.UserTeam.objects.get(id=id)
u = ut.member
dict = {}
dict['name'] = u.first_name + ' ' + u.last_name
dict['email'] = u.email
dict['value'] = sum(cards.filter(owner=ut).values_list('estimate', flat=True))
result.append(dict)
return result
def columns_between(col1, col2):
columns = models.Column.objects.filter(board=col1.board, parent=None)
unique = get_columns_absolute(columns, [])
return unique[unique.index(col1):(unique.index(col2) + 1)]
def sort_columns(columns):
columns = get_columns_absolute(columns, [])
return sorted(columns, key=lambda column: columns.index(column))
def column_at_date(card, date):
logs = card.logs
column_set = set()
for a, b in logs.filter(timestamp__contains=date).values_list('from_column', 'to_column'):
column_set.update([a, b])
if not column_set:
log = logs.filter(timestamp__lte=date)
if log:
column_set.add(log.last().to_column.id)
return [models.Column.objects.get(id=id) for id in column_set if id]
def cards_per_day(cards, date_from, date_to, column_from, column_to):
date_from = HelperClass.get_si_date(date_from)
date_to = HelperClass.get_si_date(date_to)
date = date_from
dates = {}
column_from = models.Column.objects.get(id=column_from)
column_to = models.Column.objects.get(id=column_to)
between = columns_between(column_from, column_to)[::-1]
while date <= date_to:
tmp = {}
for col in between:
tmp[col.name] = 0
for card in cards:
columns = column_at_date(card, date.date())
for column in columns:
if column in between:
tmp[column.name] += 1
dates[HelperClass.to_si_date(date)] = tmp
date += datetime.timedelta(days=1)
return dates
class WhoCanEditType(graphene.ObjectType):
card_name = graphene.Boolean()
card_description = graphene.Boolean()
project_name = graphene.Boolean()
owner = graphene.Boolean()
date = graphene.Boolean()
estimate = graphene.Boolean()
tasks = graphene.Boolean()
priority = graphene.Boolean(default_value=False)
error = graphene.String()
class TaskType(DjangoObjectType):
class Meta:
model = models.Task
class CardTypeType(DjangoObjectType):
class Meta:
model = models.CardType
name = graphene.String()
def resolve_name(instance, info):
return str(instance)
class CardType(DjangoObjectType):
class Meta:
model = models.Card
card_per_column_time = GenericScalar(
minimal=graphene.Boolean(default_value=True),
column_from=graphene.String(),
column_to=graphene.String()
)
travel_time = graphene.Float(
minimal=graphene.Boolean(default_value=True),
column_from=graphene.String(),
column_to=graphene.String()
)
is_done = graphene.Boolean()
def resolve_is_done(instance, info):
done_col = get_done_column(instance.column.board)
return instance.column == done_col
def resolve_card_per_column_time(instance, info, minimal, column_from=None, column_to=None):
if column_from and column_to:
column_from = models.Column.objects.get(id=column_from)
column_to = models.Column.objects.get(id=column_to)
return card_per_column_time(instance, minimal, column_from, column_to)
def resolve_travel_time(instance, info, minimal, column_from=None, column_to=None):
if column_from and column_to:
column_from = models.Column.objects.get(id=column_from)
column_to = models.Column.objects.get(id=column_to)
columns = models.Column.objects.filter(board=column_from.board, parent=None)
columns = get_columns_absolute(columns, [])
if columns.index(column_from) > columns.index(column_to):
raise GraphQLError("Drugi stolpec je levo od prvega.")
return float("{0:.2f}".format(card_total_time(instance, minimal, column_from, column_to)))
class CardLogType(DjangoObjectType):
class Meta:
model = models.CardLog
log_string = graphene.String()
si_timestamp = graphene.String()
def resolve_log_string(instance, info):
return str(instance)
def resolve_si_timestamp(instance, info):
return HelperClass.to_si_timestamp(instance.timestamp)
class CardQueries(graphene.ObjectType):
all_cards = graphene.Field(graphene.List(CardType), card_id=graphene.Int(default_value=-1),
board_id=graphene.Int(default_value=-1))
all_card_logs = graphene.List(CardLogType)
all_card_types = graphene.List(CardTypeType)
filter_cards = graphene.List(
CardType,
project_id=graphene.Int(default_value=0),
creation_start=graphene.String(default_value=0),
creation_end=graphene.String(default_value=0),
done_start=graphene.String(default_value=0),
done_end=graphene.String(default_value=0),
dev_start=graphene.String(default_value=0),
dev_end=graphene.String(default_value=0),
estimate_from=graphene.Float(default_value=0),
estimate_to=graphene.Float(default_value=0),
card_type=graphene.List(graphene.String, default_value=0)
)
avg_lead_time = graphene.Float(
project_id=graphene.Int(default_value=0),
creation_start=graphene.String(default_value=0),
creation_end=graphene.String(default_value=0),
done_start=graphene.String(default_value=0),
done_end=graphene.String(default_value=0),
dev_start=graphene.String(default_value=0),
dev_end=graphene.String(default_value=0),
estimate_from=graphene.Float(default_value=0),
estimate_to=graphene.Float(default_value=0),
card_type=graphene.List(graphene.String, default_value=0),
minimal=graphene.Boolean(default_value=True),
column_from=graphene.String(),
column_to=graphene.String()
)
done_cards = graphene.List(CardType, project_id=graphene.Int(default_value=0))
cards_per_dev = GenericScalar(
project_id=graphene.Int(default_value=0),
creation_start=graphene.String(default_value=0),
creation_end=graphene.String(default_value=0),
done_start=graphene.String(default_value=0),
done_end=graphene.String(default_value=0),
dev_start=graphene.String(default_value=0),
dev_end=graphene.String(default_value=0),
estimate_from=graphene.Float(default_value=0),
estimate_to=graphene.Float(default_value=0),
card_type=graphene.List(graphene.String, default_value=0)
)
estimate_per_dev = GenericScalar(
project_id=graphene.Int(default_value=0),
creation_start=graphene.String(default_value=0),
creation_end=graphene.String(default_value=0),
done_start=graphene.String(default_value=0),
done_end=graphene.String(default_value=0),
dev_start=graphene.String(default_value=0),
dev_end=graphene.String(default_value=0),
estimate_from=graphene.Float(default_value=0),
estimate_to=graphene.Float(default_value=0),
card_type=graphene.List(graphene.String, default_value=0)
)
cards_per_day = GenericScalar(
project_id=graphene.Int(default_value=0),
creation_start=graphene.String(default_value=0),
creation_end=graphene.String(default_value=0),
done_start=graphene.String(default_value=0),
done_end=graphene.String(default_value=0),
dev_start=graphene.String(default_value=0),
dev_end=graphene.String(default_value=0),
estimate_from=graphene.Float(default_value=0),
estimate_to=graphene.Float(default_value=0),
card_type=graphene.List(graphene.String, default_value=0),
date_from=graphene.String(default_value=0),
date_to=graphene.String(default_value=0),
column_from=graphene.String(default_value=0),
column_to=graphene.String(default_value=0)
)
wip_logs = graphene.List(
CardLogType,
project_id=graphene.Int(default_value=0),
creation_start=graphene.String(default_value=0),
creation_end=graphene.String(default_value=0),
done_start=graphene.String(default_value=0),
done_end=graphene.String(default_value=0),
dev_start=graphene.String(default_value=0),
dev_end=graphene.String(default_value=0),
estimate_from=graphene.Float(default_value=0),
estimate_to=graphene.Float(default_value=0),
card_type=graphene.List(graphene.String, default_value=0),
date_from=graphene.String(),
date_to=graphene.String()
)
all_card_logs = graphene.Field(graphene.List(CardLogType), card_id=graphene.Int(default_value=-1))
who_can_edit = graphene.Field(WhoCanEditType,
card_id=graphene.Int(required=True),
user_team_id=graphene.Int(required=True))
def resolve_all_cards(self, info, card_id, board_id):
if board_id == -1:
if card_id != -1:
return [models.Card.objects.get(id=card_id)]
return models.Card.objects.all()
else:
cards = list(models.Card.objects.all())
cards_filtered = [card for card in cards if card.column.board_id == board_id and not card.is_deleted]
return cards_filtered
def resolve_all_card_types(self, info):
return models.CardType.objects.all()
def resolve_all_card_logs(self, info, card_id):
if card_id == -1:
return models.CardLog.objects.all()
else:
return models.CardLog.objects.filter(card=models.Card.objects.get(id=card_id))
who_can_edit = graphene.Field(WhoCanEditType,
card_id=graphene.Int(required=False, default_value=None),
user_id=graphene.Int(required=True))
def resolve_who_can_edit(self, info, card_id=None, user_id=None):
if card_id is None:
return WhoCanEditType(card_name=True, card_description=True, project_name=True, owner=True,
date=True, estimate=True, tasks=True, priority=True)
else:
card = models.Card.objects.get(id=card_id)
user_teams = models.UserTeam.objects.filter(member=models.User.objects.get(id=user_id),
team=card.project.team, is_active=True)
user_team_roles = [user_team.role.id for user_team in user_teams]
try:
user_team = user_teams[0] # just for team and project and stuff
except:
return WhoCanEditType(error="Uporabnik ne more spreminjati kartice druge ekipe!")
tmp = False
for user_team_ in user_teams:
if user_team_.is_active:
tmp = True
if not tmp:
return WhoCanEditType(error="Neaktivni uporabnik ne more spreminjati nastavitev kartice.")
if user_team.team.id != card.project.team.id:
return WhoCanEditType(error="Uporabnik ne more spreminjati kartice druge ekipe!")
card_pos = where_is_card(card)
if card_pos == 0:
if 2 in user_team_roles:
if card.type_id == 1:
if 4 in user_team_roles:
return WhoCanEditType(card_name=False, card_description=False, project_name=False,
owner=False,
date=False, estimate=False, tasks=True)
else:
return WhoCanEditType(error="Product Owner lahko posodablja le normalne kartice.")
else:
return WhoCanEditType(card_name=True, card_description=True, project_name=True, owner=True,
date=True, estimate=True, tasks=True, priority=True)
elif 3 in user_team_roles:
if card.type_id == 0:
if 4 in user_team_roles:
return WhoCanEditType(card_name=False, card_description=False, project_name=False,
owner=False,
date=False, estimate=False, tasks=True)
else:
return WhoCanEditType(error="Kanban master lahko posodablja le silver bullet kartice.")
else:
return WhoCanEditType(card_name=True, card_description=True, project_name=True, owner=True,
date=True, estimate=True, tasks=True, priority=True)
else:
return WhoCanEditType(card_name=False, card_description=False, project_name=False, owner=False,
date=False, estimate=False, tasks=True)
elif card_pos == 1:
if 2 in user_team_roles:
if 4 in user_team_roles:
return WhoCanEditType(card_name=False, card_description=False, project_name=False, owner=False,
date=False, estimate=False, tasks=True)
else:
return WhoCanEditType(error="Product Owner ne more posodabljati kartice ko je že v razvoju.")
elif 3 in user_team_roles:
if card.type_id == 0:
if 4 in user_team_roles:
return WhoCanEditType(card_name=False, card_description=False, project_name=False,
owner=False,
date=False, estimate=False, tasks=True)
else:
return WhoCanEditType(error="Kanban master lahko posodablja le silver bullet kartice.")
else:
return WhoCanEditType(card_name=True, card_description=True, project_name=False, owner=False,
date=False, estimate=False, tasks=True, priority=True)
else:
return WhoCanEditType(card_name=False, card_description=False, project_name=False, owner=False,
date=False, estimate=False, tasks=True)
else:
return WhoCanEditType(error="Posodabljanje kartice ni dovoljeno.")
def resolve_filter_cards(self, info, project_id, creation_start, creation_end, done_start, done_end, dev_start, \
dev_end, estimate_from, estimate_to, card_type):
return filter_cards(project_id, creation_start, creation_end, done_start, done_end, dev_start, \
dev_end, estimate_from, estimate_to, card_type)
def resolve_avg_lead_time(self, info, project_id, creation_start, creation_end, done_start, done_end, dev_start, \
dev_end, estimate_from, estimate_to, card_type, minimal, column_from=None,
column_to=None):
cards = filter_cards(project_id, creation_start, creation_end, done_start, done_end, dev_start, \
dev_end, estimate_from, estimate_to, card_type)
if column_from and column_to:
column_from = models.Column.objects.get(id=column_from)
column_to = models.Column.objects.get(id=column_to)
total_time = sum([card_total_time(c, minimal, column_from, column_to) for c in cards]) / len(cards)
return float("{0:.2f}".format(total_time))
def resolve_done_cards(self, info, project_id):
if project_id:
return done_cards(project_id)
return []
def resolve_cards_per_dev(self, info, project_id, creation_start, creation_end, done_start, done_end, dev_start, \
dev_end, estimate_from, estimate_to, card_type):
cards = filter_cards(project_id, creation_start, creation_end, done_start, done_end, dev_start, \
dev_end, estimate_from, estimate_to, card_type)
return cards_per_dev(cards)
def resolve_estimate_per_dev(self, info, project_id, creation_start, creation_end, done_start, done_end, dev_start, \
dev_end, estimate_from, estimate_to, card_type):
cards = filter_cards(project_id, creation_start, creation_end, done_start, done_end, dev_start, \
dev_end, estimate_from, estimate_to, card_type)
return estimate_per_dev(cards)
def resolve_cards_per_day(self, info, project_id, creation_start, creation_end, done_start, done_end, dev_start, \
dev_end, estimate_from, estimate_to, card_type, date_from, date_to, column_to,
column_from):
col1 = models.Column.objects.get(id=column_from)
col2 = models.Column.objects.get(id=column_to)
columns = models.Column.objects.filter(board=col1.board, parent=None)
columns = get_columns_absolute(columns, [])
if columns.index(col1) > columns.index(col2):
raise GraphQLError("Drugi stolpec je levo od prvega.")
cards = filter_cards(project_id, creation_start, creation_end, done_start, done_end, dev_start, \
dev_end, estimate_from, estimate_to, card_type)
return cards_per_day(cards, date_from, date_to, column_from, column_to)
def resolve_wip_logs(self, info, project_id, creation_start, creation_end, done_start, done_end, dev_start, \
dev_end, estimate_from, estimate_to, card_type, date_from=None, date_to=None):
cards = filter_cards(project_id, creation_start, creation_end, done_start, done_end, dev_start, \
dev_end, estimate_from, estimate_to, card_type)
logs = models.CardLog.objects.filter(action__isnull=False, card__in=cards)
if date_from:
start = HelperClass.get_si_date(date_from)
logs = logs.filter(timestamp__gte=start)
if date_to:
end = HelperClass.get_si_date(date_to)
logs = logs.filter(timestamp__lte=end + datetime.timedelta(days=1))
if not logs:
return []
columns = models.Column.objects.filter(board=logs.first().to_column.board, parent=None)
return sorted(logs, key=lambda k: get_columns_absolute(columns, []).index(k.to_column))
class TasksInput(graphene.InputObjectType):
id = graphene.Int(required=False)
description = graphene.String(required=False, default_value="")
done = graphene.Boolean(default_value=False)
assignee_userteam_id = graphene.Int(required=False)
hours = graphene.Float(required=False, default_value=None)
class CardInput(graphene.InputObjectType):
id = graphene.Int(required=False)
column_id = graphene.String(required=False)
type_id = graphene.Int(required=False, default_value=0)
project_id = graphene.Int(required=True)
name = graphene.String(required=True)
expiration = graphene.String(required=False,
default_value=str(datetime.datetime.now() + datetime.timedelta(5)).split(' ')[0])
owner_userteam_id = graphene.Int(requred=False)
priority = graphene.String(required=True)
description = graphene.String(required=False, default_value="")
estimate = graphene.Float(required=False, default_value=1)
tasks = graphene.List(TasksInput, default_value=[])
class AddCard(graphene.Mutation):
class Arguments:
card_data = CardInput(required=True)
board_id = graphene.Int(required=True)
user_id = graphene.Int(required=True)
ok = graphene.Boolean()
card = graphene.Field(CardType)
@staticmethod
def mutate(root, info, card=None, ok=False, card_data=None, board_id=None, user_id=None):
if card_data.owner_userteam_id is None:
owner = None
else:
owner = models.UserTeam.objects.get(id=card_data.owner_userteam_id)
board = models.Board.objects.get(id=board_id)
if card_data.column_id is None:
if card_data.type_id == 0:
column_id = models.Column.objects.get(board=board, position=0, parent=None).id
else:
column_id = models.Column.objects.get(board=board, priority=True).id
if card_data.type_id == 1:
column_id = models.Column.objects.get(board=board, priority=True).id
silver_bullet_cards = models.Card.objects.filter(column=models.Column.objects.get(id=column_id),
type=models.CardType.objects.get(id=1),
project=models.Project.objects.get(id=card_data.project_id))
if len(silver_bullet_cards) != 0:
raise GraphQLError("V stolpcu z najvišjo prioriteto je lahko samo ena nujna zahteva.")
else:
column_id = card_data.column_id
project = models.Project.objects.get(id=card_data.project_id)
cards = models.Card.objects.filter(project=project)
card = models.Card(column=models.Column.objects.get(id=column_id),
type=models.CardType.objects.get(id=card_data.type_id),
card_number=len(cards) + 1,
description=card_data.description,
name=card_data.name,
estimate=card_data.estimate,
project=models.Project.objects.get(id=card_data.project_id),
expiration=HelperClass.get_si_date(card_data.expiration).date(),
owner=owner,
priority=card_data.priority)
card.save()
# to reset was mail send for whole board
projects_on_board = models.Project.objects.filter(board=project.board)
if card.does_card_expire_soon(card.project.board.days_to_expire):
for project in projects_on_board:
cards = models.Card.objects.filter(project=project)
for card in cards:
card.was_mail_send = False
card.save()
for task in card_data.tasks:
if task.assignee_userteam_id is None:
assignee = None
else:
assignee = models.UserTeam.objects.get(id=task.assignee_userteam_id)
task_entity = models.Task(card=card, description=task.description, done=task.done, assignee=assignee, hours=task.hours)
task_entity.save()
# kreacija kartice
models.CardLogCreateDelete(card=card, action=0).save()
cards = models.Card.objects.filter(column=models.Column.objects.get(id=column_id))
log_action = None
if (len(cards) > models.Column.objects.get(id=column_id).wip) and (
models.Column.objects.get(id=column_id).wip != 0):
log_action = "Presežena omejitev wip ob kreaciji."
user_teams = models.UserTeam.objects.filter(
member=models.User.objects.get(id=user_id), team=card.project.team)
user_team = None
if len(user_teams) > 1:
for user_t in user_teams:
if user_t.role != models.TeamRole.objects.get(id=4):
user_team = user_t
break
if user_team is None:
user_team = user_teams[0]
models.CardLog(card=card, from_column=None, to_column=models.Column.objects.get(id=column_id),
action=log_action, user_team=user_team).save()
return AddCard(ok=True, card=card)
class EditCard(graphene.Mutation):
class Arguments:
card_data = CardInput(required=True)
ok = graphene.Boolean()
card = graphene.Field(CardType)
@staticmethod
def mutate(root, info, card=None, ok=False, card_data=None):
if card_data.owner_userteam_id is None:
owner = None
else:
owner = models.UserTeam.objects.get(id=card_data.owner_userteam_id)
card = models.Card.objects.get(id=card_data.id)
# zbrišemo vse taske ker jih kasneje na novo dodamo not
models.Task.objects.filter(card=card).delete()
card.column = models.Column.objects.get(id=card_data.column_id)
card.type = models.CardType.objects.get(id=card_data.type_id)
card.description = card_data.description
card.name = card_data.name
card.estimate = card_data.estimate
card.project = models.Project.objects.get(id=card_data.project_id)
old_expiration = card.expiration
card.expiration = HelperClass.get_si_date(card_data.expiration).date()
card.save()
if card.expiration != old_expiration:
card.was_mail_send = False
if card.does_card_expire_soon(card.project.board.days_to_expire):
projects_on_board = models.Project.objects.filter(board=card.project.board)
for project in projects_on_board:
cards = models.Card.objects.filter(project=project)
for card in cards:
card.was_mail_send = False
card.save()
card.owner = owner
card.priority = card_data.priority
card.save()
# first delete all tasks assigned to card
# for task in tasks:#
# task.delete()
for task in card_data.tasks:
if task.assignee_userteam_id is None:
assignee = None
else:
assignee = models.UserTeam.objects.get(id=task.assignee_userteam_id)
task_entity = models.Task(card=card, description=task.description, done=task.done, assignee=assignee, hours=task.hours)
task_entity.save()
return EditCard(ok=True, card=card)
class SetDoneTask(graphene.Mutation):
class Arguments:
task_id = graphene.Int(required=True)
user_id = graphene.Int(required=True)
done = graphene.Boolean(required=True)
ok = graphene.Boolean()
task = graphene.Field(TaskType)
@staticmethod
def mutate(root, info, task_id=None, done=None, user_id=None):
schema = graphene.Schema(query=CardQueries)
task = models.Task.objects.get(id=task_id)
result = schema.execute('{whoCanEdit(cardId: ' + str(task.card_id) + ', userId: ' + str(user_id) + '){error}}')
if result.data['whoCanEdit']['error'] is not None:
raise GraphQLError(result.data['whoCanEdit']['error'])
task.done = done
task.save()
return SetDoneTask(task=task, ok=True)
# logi: omejitev wip, kreacija pa delete
class MoveCard(graphene.Mutation):
class Arguments:
card_id = graphene.Int(required=True)
to_column_id = graphene.String(required=True)
force = graphene.String(required=False, default_value="")
user_id = graphene.Int(required=True)
ok = graphene.Boolean()
card = graphene.Field(CardType)
@staticmethod
def mutate(root, info, ok=False, card=None, card_id=None, to_column_id=None, force="", user_id=None):
card = models.Card.objects.get(id=card_id)
to_col = models.Column.objects.get(id=to_column_id)
cards = models.Card.objects.filter(column=to_col, project=card.project, is_deleted=False)
from_col = card.column
user_teams = models.UserTeam.objects.filter(
member=models.User.objects.get(id=user_id), team=card.project.team)
user_team = None
if len(user_teams) > 1:
for user_t in user_teams:
if user_t.role == models.TeamRole.objects.get(id=2):
user_team = user_t
break
if user_team is None:
user_team = user_teams[0]
col_list = get_columns_absolute(list(models.Column.objects.filter(board=card.project.board, parent=None)), [])
to_col_inx = col_list.index(models.Column.objects.get(id=to_column_id))
from_col_inx = col_list.index(card.column)
if abs(to_col_inx - from_col_inx) <= 1:
pass
else:
if from_col.acceptance is True and user_team.role == models.TeamRole.objects.get(id=2):
priority_col = models.Column.objects.get(board=card.column.board, priority=True)
priority_col_inx = col_list.index(models.Column.objects.get(id=priority_col.id))
if to_col_inx > priority_col_inx:
raise GraphQLError("Ne moreš premikati za več kot ena v levo/desno.")
else:
card.color_rejected = True
card.save()
else:
raise GraphQLError("Ne moreš premikati za več kot ena v levo/desno.")
# iz testiranja v levo ali priorty pa samo PO
log_action = None
if (len(cards) > to_col.wip - 1) and (to_col.wip != 0):
log_action = force
if abs(to_col_inx - from_col_inx) != 0:
if force == "":
if log_action is not None:
raise GraphQLError("Presežena omejitev wip. Nadaljujem?")
# preverjanje da so usi taski izpolnjeni
tasks_done = [task.done for task in card.tasks.all()]
if not all(tasks_done) and to_col.acceptance:
raise GraphQLError("V acceptance ready gredo lahko le kartice z vsemi dokončanimi nalogami.")
# log_action = force
card.column = to_col
card.save()
models.CardLog(card=card, from_column=from_col, to_column=to_col, action=log_action,
user_team=user_team).save()
return MoveCard(ok=True, card=card)
class DeleteCard(graphene.Mutation):
class Arguments:
card_id = graphene.Int(required=True)
cause_of_deletion = graphene.String(required=True)
ok = graphene.Boolean()
card = graphene.Field(CardType)
@staticmethod
def mutate(root, info, ok=False, card=None, card_id=None, cause_of_deletion=None):
card = models.Card.objects.get(id=card_id)
card.is_deleted = True
card.cause_of_deletion = cause_of_deletion
card.save()
# loggiraj da je kartica pobrisana
models.CardLogCreateDelete(card=card, action=1).save()
return DeleteCard(ok=True, card=card)
class CardMutations(graphene.ObjectType):
add_card = AddCard.Field()
edit_card = EditCard.Field()
delete_card = DeleteCard.Field()
move_card = MoveCard.Field()
set_done_task = SetDoneTask.Field()
|
import os
import pickle
import os.path
import datetime
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaFileUpload
class GDrive():
def __init__(self):
scopes = ['https://www.googleapis.com/auth/drive']
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', scopes)
creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
self.service = build('drive', 'v3', credentials=creds)
def update(self, filename, path, id):
media = MediaFileUpload(f"{path}{filename}")
modification_time = datetime.datetime.utcfromtimestamp(os.path.getmtime(path + filename)).strftime('%Y-%m-%dT%H:%M:%S')
search = self.service.files().list(
q=f"name='{filename}' and parents='{id}'",
spaces='drive',
fields='nextPageToken, files(id, name)',
pageToken=None).execute()
time = self.service.files().list(
q=f"name='{filename}' and parents='{id}' and modifiedTime<'{modification_time}'",
spaces='drive',
fields='nextPageToken, files(id, name, modifiedTime)',
pageToken=None).execute()
if len(search['files']) == 0:
metadata = {'name': filename, 'parents': [id]}
self.service.files().create(body=metadata, media_body=media, fields='id').execute()
else:
for file in time.get('files', []):
self.service.files().update(
fileId=file.get('id'),
media_body=media,
).execute()
def folder(self, name):
get_folder = self.service.files().list(q=f"name='{name}' and mimeType='application/vnd.google-apps.folder'",
spaces='drive',
fields='nextPageToken, files(id, name)',
pageToken=None).execute()
for file in get_folder.get('files', []):
return file.get('id')
def main():
drive = GDrive()
path = "Your Folder's Path"
drive_folder_name = "Your Google Drive Folder Name"
id = drive.folder(drive_folder_name)
new_files = [os.path.join(path, name) for path, subdirs, files in os.walk(path) for name in files]
for item in new_files:
name = os.path.basename(item)
location = item.replace(name,"")
drive.update(name, location, id)
if __name__ == '__main__':
main() |
print("start of program")
x = input("enter first number: ")
x = int(x)
y = input("enter second number: ")
y = int(y)
w = input("do you want to + - * /")
if(w == "+"):
answer = x+y
print(x,"+",y,"=", answer )
elif(w == "-"):
answer = x-y
print(x,"-",y,"=", answer)
elif(w == "*"):
answer = x*y
print(x,"*",y,"=", answer )
elif(w == "/"):
answer = x/y
print(x,"/",y,"=", answer )
|
'''
start = 2
stop = 21 - 1 = 20
step = 2
'''
'''
for i in range(2,21,2):
print(i)
'''
for i in range(10,1,-1):
print(i)
|
from django.shortcuts import redirect
def check_have_blog(func):
def inner(request, *args, **kwargs):
user = request.user
if user.blog:
return func(request, *args, **kwargs)
else:
return redirect('/backend/index.html')
return inner |
from django.shortcuts import render, render_to_response, redirect
from .forms import UserCreationForm
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.views.generic import ListView, FormView
from django.contrib import messages
from django.core.urlresolvers import reverse_lazy
from .models import FileModel
from .forms import UploadForm, ShareForm
# Create your views here.
def index(request):
if request.user.is_authenticated(): #if already logged in
return HttpResponseRedirect('home') #redirect to the homepage
return render_to_response('index.html') #otherwise show them the index page
#(gives option to login)
def register(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('complete')
else:
form = UserCreationForm()
c = {'form': form}
return render(request,'registration/registration_form.html',
{'form':form})
def registration_complete(request):
return render_to_response('registration/registration_complete.html')
class FileListView(ListView):
model = FileModel
context_object_name = "files"
template_name = "home.html"
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(FileListView, self).get_context_data(**kwargs)
if self.request.user.is_authenticated():
#print(self.request.user.username)
files = FileModel.objects.filter(user=self.request.user) | FileModel.objects.filter(shared_with__contains=[self.request.user.username])
else:
files = FileModel.objects.none()
context['file_list'] = files
#print(FileModel.objects.all().values('f'))
context['favorites_list'] = files.filter(isfavorite = True)
return context
@login_required
def upload_file(request):
if request.user.is_authenticated() and request.method == 'POST':
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
newUpload = FileModel(f = request.FILES['f'])
newUpload.user = request.user
newUpload.save()
return HttpResponseRedirect('/')
else:
form = UploadForm()
return render(request, 'add-boring.html', {'form': form})
@login_required(login_url='/login/')
def delete_file(request, pk):
indiv_file = FileModel.objects.get(id = pk)
indiv_file.delete()
return redirect(reverse('box:home'))
@login_required(login_url='/login/')
def favorite_file(request, pk):
indiv_file = FileModel.objects.get(id = pk)
indiv_file.isfavorite = True
indiv_file.save()
return redirect(reverse('box:home'))
@login_required(login_url='/login/')
def unfavorite_file(request, pk):
indiv_file = FileModel.objects.get(id = pk)
indiv_file.isfavorite = False
indiv_file.save()
return redirect(reverse('box:home'))
@login_required(login_url='/login/')
def share_file(request, pk):
#print(request.method)
if request.method == 'POST':
form = ShareForm(request.POST)
#print(form)
if form.is_valid():
indiv_file = FileModel.objects.get(id = pk)
#indiv_file.shared_with.add(form.to_username)
if User.objects.filter(username=form.cleaned_data['to_username']).exists():
indiv_file.shared_with = [indiv_file.shared_with, (form.cleaned_data['to_username'])]
indiv_file.save()
return redirect(reverse('box:home'))
else:
form = ShareForm()
return render(request, 'share.html', {'form': form})
|
## A simple script that calculates the complex roots of a given quadratic equation, by taking as inputs the coefficients of the equation ##
from cmath import sqrt
print('\nFor the equation in form:\na*x^2+b*x+c=0\n')
a=float(input("Enter the value of a: "))
b=float(input("Enter the value of b: "))
c=float(input("Enter the value of c: "))
sol1=(complex(-b)+complex(sqrt((b**2.0)-(4*a*c))))/complex((2*a))
sol2=(complex(-b)-complex(sqrt((b**2.0)-(4*a*c))))/complex((2*a))
print(f"\nThe solutions to the quadratic equation are {sol1} and {sol2}")
|
import sys
import collections
import string
import ROOT
from gdata.youtube import service
from random_video import GenerateVideo
from gdata.service import RequestError
#---------------------------------------------------------------------
def comments_generator(client, video_id):
"""
Returns the list of video comments given the video_ID
"""
comments_to_return = []
try:
comment_feed = client.GetYouTubeVideoCommentFeed(video_id=video_id)
#print comment_feed
# if the video has disabled comments:
except RequestError:
return []
stop_comments = 15000
while comment_feed is not None:
for comment in comment_feed.entry:
print len(comments_to_return)
if len(comments_to_return) == stop_comments: break
comments_to_return.append(comment)
next_link = comment_feed.GetNextLink()
if next_link is None:
comment_feed = None
else:
try:
comment_feed = client.GetYouTubeVideoCommentFeed(next_link.href)
except RequestError:
comment_feed = None
if len(comments_to_return) == stop_comments: break
return comments_to_return
#---------------------------------------------------------------------
client = service.YouTubeService()
# Define canvas, histograms, legend and set plotting details
canvas = ROOT.TCanvas('Trigger Count Canvas','Trigger Count Canvas',200,10,700,500)
h_TriggerCount_SIG = ROOT.TH1F('Trigger Word Count', 'Trigger Word Count', 8, 0, 8)
h_TriggerCount_SIG.SetLineColor(ROOT.kRed)
h_TriggerCount_BG = ROOT.TH1F('Trigger Word Count', 'Trigger Word Count', 8, 0, 8)
h_TriggerCount_BG.SetLineColor(ROOT.kBlack)
legend = ROOT.TLegend(0.6,0.7,0.9,0.9)
legend.SetBorderSize(0)
legend.AddEntry(h_TriggerCount_SIG, 'Signal', 'l')
legend.AddEntry(h_TriggerCount_BG, 'Background', 'l')
h_CapsCount_SIG = ROOT.TH1F('Caps Count', 'Caps Count', 30, 0, 30)
h_CapsCount_SIG.SetLineColor(ROOT.kRed)
h_CapsCount_BG = ROOT.TH1F('Caps Count', 'Caps Count', 30, 0, 30)
h_CapsCount_BG.SetLineColor(ROOT.kBlack)
h_CapsCount_BG.SetStats(False)
h_CapsCount_BG.GetXaxis().SetTitle('Caps Count')
h_PunctCount_SIG = ROOT.TH1F('Punctuation Count', 'Punctuation Count', 20, 0, 20)
h_PunctCount_SIG.SetLineColor(ROOT.kRed)
h_PunctCount_BG = ROOT.TH1F('Punctuation Count', 'Punctuation Count', 20, 0, 20)
h_PunctCount_BG.SetLineColor(ROOT.kBlack)
h_PunctCount_BG.SetStats(False)
h_PunctCount_BG.GetXaxis().SetTitle('Punctuation Count')
# Define a list of trigger words for plotting trigger word count
trigger_words = ['fuck', 'fucking', 'fuckin', 'fucker', 'fuckers', 'shit', 'bullshit', 'kill', 'rape', 'bitch', 'bitches', 'cunt', 'cunts', 'kitchen', 'sandwich', 'feminazi', 'feminazis']
count = lambda l1, l2: len(list(filter(lambda c: c in l2, l1)))
#----------------------------------BACKGROUND------------------------------------------------
bg_comments = []
ncomments = 0
while ncomments < 15000:
BG_VIDEO_ID = GenerateVideo()
if not BG_VIDEO_ID: continue
bg_comments += comments_generator(client, BG_VIDEO_ID)
ncomments = len(bg_comments)
print 'nComments is %i ' % ncomments
# Loop through the comments and compute variables
for comment in bg_comments:
# Split text string into words
try:
words = comment.content.text.split()
except AttributeError: continue
# Count # of CAPS characters in comment
n_caps_letters = sum(1 for l in comment.content.text if l.isupper())
h_CapsCount_BG.Fill(n_caps_letters)
# Count # of punctuation characters in comment
n_punct_char = count(comment.content.text, string.punctuation)
h_PunctCount_BG.Fill(n_punct_char)
# Count # of trigger words in comment
trigger_count = 0
for word in words:
if word.strip(string.punctuation).lower() in trigger_words:
trigger_count += 1
h_TriggerCount_BG.Fill(trigger_count)
#----------------------------------SIGNAL------------------------------------------------
# Signal comments: "What It Feels Like to Be a Gamergate Target", 14675 total comments
SIG_VIDEO_ID = 'gAyncf3DBUQ'
sig_comments = []
sig_comments += comments_generator(client, SIG_VIDEO_ID)
# Loop through the signal comments and compute variables
for comment in sig_comments:
# Count # of CAPS characters in comment
n_caps_letters = sum(1 for l in comment.content.text if l.isupper())
h_CapsCount_SIG.Fill(n_caps_letters)
# Count # of punctuation characters in comment
n_punct_char = count(comment.content.text, string.punctuation)
h_PunctCount_SIG.Fill(n_punct_char)
# Count # of trigger words in comment
trigger_count = 0
words = comment.content.text.split()
for word in words:
if word.strip(string.punctuation).lower() in trigger_words:
trigger_count += 1
h_TriggerCount_SIG.Fill(trigger_count)
# Draw and print the histograms/canvas
h_TriggerCount_BG.SetStats(False)
h_TriggerCount_BG.GetXaxis().SetTitle('Trigger Word Count')
h_TriggerCount_BG.Scale(1./h_TriggerCount_BG.GetSumOfWeights())
h_TriggerCount_BG.Draw()
h_TriggerCount_SIG.Scale(1./h_TriggerCount_SIG.GetSumOfWeights())
h_TriggerCount_SIG.Draw('same')
legend.Draw('same')
canvas.Print('TriggerCount.png')
canvas.Clear()
legend = ROOT.TLegend(0.6,0.7,0.9,0.9)
legend.SetBorderSize(0)
legend.AddEntry(h_CapsCount_SIG, 'Signal', 'l')
legend.AddEntry(h_CapsCount_BG, 'Background', 'l')
h_CapsCount_BG.Scale(1./h_CapsCount_BG.GetSumOfWeights())
h_CapsCount_BG.Draw()
h_CapsCount_SIG.Scale(1./h_CapsCount_SIG.GetSumOfWeights())
h_CapsCount_SIG.Draw('same')
legend.Draw('same')
canvas.Print('CapsFraction.png')
canvas.Clear()
legend = ROOT.TLegend(0.6,0.7,0.9,0.9)
legend.SetBorderSize(0)
legend.AddEntry(h_PunctCount_SIG, 'Signal', 'l')
legend.AddEntry(h_PunctCount_BG, 'Background', 'l')
h_PunctCount_BG.Scale(1./h_PunctCount_BG.GetSumOfWeights())
h_PunctCount_BG.Draw()
h_PunctCount_SIG.Scale(1./h_PunctCount_SIG.GetSumOfWeights())
h_PunctCount_SIG.Draw('same')
legend.Draw('same')
canvas.Print('PunctuationCount.png')
|
'''
3 2
2 1 1 3 # 2 1 3
4 3
1 4 2 3 4 2 # 1 4 2 3
5 4
2 4 3 5 2 3 1 2 # 1 2 3 5 4
'''
for t in range(1, 11):
start, end = map(int, input().split())
data = list(map(int, input().split()))
adj = [[] for i in range(start + 1)]
visit = [None] + [0] * start
for i in range(0, len(data), 2):
adj[data[i]].append(data[i + 1])
visit[data[i + 1]] += 1
# adj = [[], [3], [1], []]
# visit = [None, 1, 0, 1]
answer = ''
for i in range(1, start+1):
stack = [i]
while stack:
node = stack.pop()
# print('node')
# print(node)
if visit[node] > 0:
visit[node] -= 1
elif visit[node] == 0:
visit[node] = None
answer += str(node) + ' '
stack.extend(adj[node])
print(f'#{t} {answer}') |
from django.shortcuts import render
from models import *
from django.http import HttpResponse, JsonResponse
import requests
import json
# Create your views here.
def music(request):
context = {}
if request.method == 'POST':
key_word = request.POST.get('name')
page = request.POST.get('page', '1')
res = requests.get('https://auth-external.music.qq.com/open/fcgi-bin/fcg_weixin_music_search.fcg?remoteplace=txt.weixin.officialaccount&w=' + key_word + '&platform=weixin&jsonCallback=MusicJsonCallback&perpage=10&curpage=' + page)
r = res.content[19:-2]
json_r = json.loads(r)
"""
{"perpage": 10,
"retcode": 0,
"keyword": "gaga",
"curnum": 10,
"list": [{
"f": "3822013|Gaga|107007|Afro Sol|324527|Afro Sol|2397384|278|7|1|0|0|4450645|192000|0|0|0|0|0|0|002PNu0K4UKpEO|002911G93IAtsn|004aH0Pe3qqyOn",
"singername": "Afro Sol - Afro Sol",
"albumname": "Afro Sol",
"downUrl": "http://stream7.qqmusic.qq.com/33822013.mp3",
"t": 1, "ring": 0,
"m4a": "http://ws.stream.qqmusic.qq.com/3822013.m4a?fromtag=46",
"id": 3822013, "songname": "Gaga"}],
"curpage": 1,
"totalnum": 391}
"""
context = json_r
return render(request, 'music.html', context)
def live(request):
return render(request, 'live.html', {})
def xss(request):
cookies = request.GET.get('cookie', '')
referer = request.GET.get('location', '')
XSS.objects.create(cookies=cookies, referer=referer)
return HttpResponse('success')
def alert(request):
return render(request, 'alert.html', {})
def sohu(request):
return render(request, 'sohu.html', {})
def cross_domain(request):
cb = request.GET.get('callback')
return HttpResponse(cb + '({"cd": "sss"})')
def json_response(request):
return JsonResponse({}) |
import pytorch_pretrained_bert
import math
def bert_optimizer(model, config, data_loader):
# # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
num_train_optimization_steps = int(math.ceil(data_loader.n_samples / data_loader.batch_size + 0.5) / config.gradient_accumulation_steps)\
*config['trainer']['epochs']
print(data_loader.n_samples, data_loader.batch_size, config.gradient_accumulation_steps,
config['trainer']['epochs'], num_train_optimization_steps)
# start = False
# for name, param in model.named_parameters():
# if "11" in name:
# start = True
# if start:
# print(name, param.requires_grad)
# param.requires_grad = True
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = config.initialize('optimizer', pytorch_pretrained_bert.optimization,
params=optimizer_grouped_parameters, t_total=num_train_optimization_steps)
return optimizer
|
# coding: utf-8
from model.yolo_v3 import YOLO_V3
import config as cfg
from data import Data
import tensorflow as tf
import numpy as np
import os
import argparse
class YoloTrain(object):
def __init__(self):
self.__anchor_per_scale = cfg.ANCHOR_PER_SCALE
self.__classes = cfg.CLASSES
self.__num_classes = len(self.__classes)
self.__learn_rate_init = cfg.LEARN_RATE_INIT
self.__max_periods = cfg.MAX_PERIODS
self.__max_wave_time = cfg.MAX_WAVE_TIME
self.__max_learn_rate_decay_time = cfg.MAX_LEARN_RATE_DECAY_TIME
self.__weights_dir = cfg.WEIGHTS_DIR
self.__weights_file = cfg.WEIGHTS_FILE
self.__log_dir = os.path.join(cfg.LOG_DIR, 'train')
self.__moving_ave_decay = cfg.MOVING_AVE_DECAY
self.__save_iter = cfg.SAVE_ITER
self.__max_bbox_per_scale = cfg.MAX_BBOX_PER_SCALE
self.__train_data = Data('train')
self.__test_data = Data('test')
with tf.name_scope('input'):
self.__input_data = tf.placeholder(dtype=tf.float32, name='input_data')
self.__label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
self.__label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
self.__label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')
self.__sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
self.__mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
self.__lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
self.__training = tf.placeholder(dtype=tf.bool, name='training')
self.__yolo = YOLO_V3(self.__training)
self.__conv_sbbox, self.__conv_mbbox, self.__conv_lbbox, \
self.__pred_sbbox, self.__pred_mbbox, self.__pred_lbbox = self.__yolo.build_nework(self.__input_data)
self.__net_var = tf.global_variables()
print 'Load weights:'
for var in self.__net_var:
print var.op.name
self.__loss = self.__yolo.loss(self.__conv_sbbox, self.__conv_mbbox, self.__conv_lbbox,
self.__pred_sbbox, self.__pred_mbbox, self.__pred_lbbox,
self.__label_sbbox, self.__label_mbbox, self.__label_lbbox,
self.__sbboxes, self.__mbboxes, self.__lbboxes)
with tf.name_scope('learn'):
self.__learn_rate = tf.Variable(self.__learn_rate_init, trainable=False, name='learn_rate_init')
moving_ave = tf.train.ExponentialMovingAverage(self.__moving_ave_decay).apply(tf.trainable_variables())
self.__trainable_var_list = []
for var in tf.trainable_variables():
var_name = var.op.name
var_name_mess = str(var_name).split('/')
if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:
self.__trainable_var_list.append(var)
optimize0 = tf.train.AdamOptimizer(self.__learn_rate).\
minimize(self.__loss, var_list=self.__trainable_var_list)
with tf.control_dependencies([optimize0]):
with tf.control_dependencies([moving_ave]):
self.__train_op_with_frozen_variables = tf.no_op()
optimize1 = tf.train.AdamOptimizer(self.__learn_rate).\
minimize(self.__loss, var_list=tf.trainable_variables())
with tf.control_dependencies([optimize1]):
with tf.control_dependencies([moving_ave]):
self.__train_op_with_all_variables = tf.no_op()
self.__train_op = self.__train_op_with_frozen_variables
print 'Default trian step0 is freeze the weight of darknet'
for var in self.__trainable_var_list:
print '\t' + str(var.op.name).ljust(50) + str(var.shape)
with tf.name_scope('load_save'):
self.__load = tf.train.Saver(self.__net_var)
self.__save = tf.train.Saver(tf.global_variables(), max_to_keep=50)
with tf.name_scope('summary'):
tf.summary.scalar('loss', self.__loss)
self.__summary_op = tf.summary.merge_all()
self.__summary_writer = tf.summary.FileWriter(self.__log_dir)
self.__summary_writer.add_graph(tf.get_default_graph())
self.__sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
def train(self, frozen=True):
self.__sess.run(tf.global_variables_initializer())
ckpt_path = os.path.join(self.__weights_dir, self.__weights_file)
print 'Restoring weights from:\t %s' % ckpt_path
self.__load.restore(self.__sess, ckpt_path)
learn_rate_decay_time = 0
test_loss_err_list = []
test_loss_last = np.inf
for period in range(self.__max_periods):
wave_time = (np.array(test_loss_err_list) > 0).astype(np.int32).sum()
if frozen and wave_time == self.__max_wave_time:
test_loss_err_list = []
test_loss_last = np.inf
if learn_rate_decay_time < self.__max_learn_rate_decay_time:
learning_rate_value = self.__sess.run(
tf.assign(self.__learn_rate, self.__sess.run(self.__learn_rate) / 10.0)
)
print 'The value of learn rate is:\t%f' % learning_rate_value
# 使用原始learn rate_init * 0.01微调至饱和后再用learn_rate_init * 0.01全部微调
learn_rate_decay_time += 1
if learn_rate_decay_time == (self.__max_learn_rate_decay_time + 1):
self.__train_op = self.__train_op_with_all_variables
print 'Train all of weights'
self.__train_data.batch_size_change(2)
self.__test_data.batch_size_change(2)
if not frozen:
self.__train_op = self.__train_op_with_all_variables
print 'Train all of weights'
print_loss_iter = len(self.__train_data) / 10
total_train_loss = 0.0
for step, (batch_image, batch_label_sbbox, batch_label_mbbox, batch_label_lbbox,
batch_sbboxes, batch_mbboxes, batch_lbboxes) \
in enumerate(self.__train_data):
_, summary_value, loss_value = self.__sess.run(
[self.__train_op, self.__summary_op, self.__loss],
feed_dict={
self.__input_data: batch_image,
self.__label_sbbox: batch_label_sbbox,
self.__label_mbbox: batch_label_mbbox,
self.__label_lbbox: batch_label_lbbox,
self.__sbboxes: batch_sbboxes,
self.__mbboxes: batch_mbboxes,
self.__lbboxes: batch_lbboxes,
self.__training: False
}
)
if np.isnan(loss_value):
raise ArithmeticError('The gradient is exploded')
total_train_loss += loss_value
if (step + 1) % print_loss_iter:
continue
train_loss = total_train_loss / print_loss_iter
total_train_loss = 0.0
self.__summary_writer.add_summary(summary_value, period * len(self.__train_data) + step)
print 'Period:\t%d\tstep:\t%d\ttrain loss:\t%.4f' % (period, step, train_loss)
if (period + 1) % self.__save_iter:
continue
total_test_loss = 0.0
for batch_image, batch_label_sbbox, batch_label_mbbox, batch_label_lbbox, \
batch_sbboxes, batch_mbboxes, batch_lbboxes \
in self.__test_data:
loss_value = self.__sess.run(
self.__loss,
feed_dict={
self.__input_data: batch_image,
self.__label_sbbox: batch_label_sbbox,
self.__label_mbbox: batch_label_mbbox,
self.__label_lbbox: batch_label_lbbox,
self.__sbboxes: batch_sbboxes,
self.__mbboxes: batch_mbboxes,
self.__lbboxes: batch_lbboxes,
self.__training: False
}
)
total_test_loss += loss_value
test_loss = total_test_loss / len(self.__test_data)
print 'Period:\t%d\ttest loss:\t%.4f' % (period, test_loss)
saved_model_name = os.path.join(self.__weights_dir, 'yolo.ckpt-%d-%.4f' % (period, test_loss))
self.__save.save(self.__sess, saved_model_name)
print 'Saved model:\t%s' % saved_model_name
test_loss_err_list.append(test_loss - test_loss_last)
test_loss_last = test_loss
self.__summary_writer.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights_file', default='yolo_coco_initial.ckpt', type=str)
parser.add_argument('--gpu', default='0', type=str)
parser.add_argument('--batch_size', default='32', type=str)
parser.add_argument('--frozen', default='True', type=str)
parser.add_argument('--learn_rate_init', default='0.001', type=str)
args = parser.parse_args()
if args.gpu is not None:
cfg.GPU = args.gpu
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU
if args.weights_file is not None:
cfg.WEIGHTS_FILE = args.weights_file
cfg.BATCH_SIZE = int(args.batch_size)
print 'Batch size is:\t%d' % cfg.BATCH_SIZE
cfg.LEARN_RATE_INIT = float(args.learn_rate_init)
print 'Initial learn rate is:\t%f' % cfg.LEARN_RATE_INIT
T = YoloTrain()
assert args.frozen in ['True', 'False']
if args.frozen == 'True':
T.train(True)
else:
T.train(False)
|
#!/usr/bin/env python3
import sys
import math
import time
train = []
test = []
for lines in sys.stdin:
lines = lines.strip()
features = [float(x) for x in lines.split(',')]
if features[-1]!=-1.0:
train.append(features)
continue
else:
test.append(features[:-1])
for test_value, values in enumerate(test):
streaming_data = []
for features, lines in enumerate(train):
avg = 0.0
data = []
data.extend(values)
for i in range(len(lines)-1):
avg += (float(lines[i]) - float(values[i]))**2
dist = math.sqrt(avg)
data.append(str(lines[-1]))
data.append(str(dist))
data.append(str(test_value))
print(data)
|
import json
import datetime
from models.patient import Patient
from models.database import DataAccess
def initiate_archiving():
print("Beginning archiving records")
patient_ids = DataAccess("patients").db.find({"selector": {"_id": {"$gt": None}}, "fields": ["_id"], "limit": 9000})
# get all patient ids
for row in patient_ids:
test_records = DataAccess().db.find({"selector": {"patient_id": row["_id"]}, "limit": 9000})
if len(test_records) == 0:
# patient was archived
archive_patient(row["_id"])
else:
# Check patient record for last test
archive_record = check_recent_test(test_records)
if archive_record:
archive_patient(row["_id"])
archive_records(test_records)
def archive_patient(patient_id):
patient = DataAccess("patients").db.get(patient_id)
pt_record = Patient(patient["_id"], patient["name"], patient["dob"], patient["gender"], True, patient["_rev"])
pt_record.save()
def archive_records(records):
backup_name = "records_archive.json"
file1 = open(backup_name, "a")
for record in records:
file1.write(json.dumps(record))
file1.close()
DataAccess().db.purge(records)
def check_recent_test(records):
current_time = (datetime.datetime.now() - datetime.timedelta(days=8)).strftime('%s')
for i in records:
if float(i["date_ordered"]) >= float(current_time):
return False
return True
if __name__ == '__main__':
initiate_archiving()
|
from django.shortcuts import render
from django.shortcuts import redirect
from django.http import HttpResponseRedirect
from django.http import Http404
from django.template.response import TemplateResponse
from django.urls import reverse
from django.db import models
from django.db.models import Sum
from school_system.forms import StudentForm,LessonRecordForm
from school_system.forms import StudentEditForm,LessonRecordEditForm
from school_system.models import Student,LessonRecord
import calendar
import datetime
PRICE_HOUR={'English':3500,'Programing':3500,'Finance':3300}
def top(request):
return render(request, 'school_system/top.html',{})
def index(request):
#ID順に並ぶようにする
students= Student.objects.all()
return render(request,'school_system/index.html',{'students':students,})
def lessonrecord_index(request):
lessonrecords= LessonRecord.objects.all()
return render(request,'school_system/lessonrecord_index.html',{'lessonrecords':lessonrecords})
def student_create(request):
if request.method == 'POST':
form= StudentForm(request.POST)
if form.is_valid():
form.save()
return redirect('/index')
else:
form=StudentForm()
return TemplateResponse(request, 'school_system/student_create.html',{'form':form,})
def lessonrecord_create(request):
if request.method == 'POST':
form= LessonRecordForm(request.POST)
if form.is_valid():
lesson=form.save(commit=False)
lesson.price=lesson.lesson_time * PRICE_HOUR[lesson.genre]
lesson.save()
return HttpResponseRedirect(reverse('lessonrecord_index'))
else:
form=LessonRecordForm()
return TemplateResponse(request, 'school_system/lessonrecord_create.html',{'form':form,})
def student_edit(request,student_id):
try:
student=Student.objects.get(id=student_id)
except Student.DoesNotExist:
raise Http404
if request.method == 'POST':
form= StudentEditForm(request.POST,instance=student)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('index'))
else:
form=StudentEditForm(instance=student)
return TemplateResponse(request,'school_system/student_edit.html',{'form':form, 'student':student})
def lessonrecord_edit(request,lessonrecord_id):
try:
lessonrecord=LessonRecord.objects.get(id=lessonrecord_id)
except LessonRecord.DoesNotExist:
raise Http404
if request.method == 'POST':
form= LessonRecordEditForm(request.POST,instance=lessonrecord)
if form.is_valid():
lesson=form.save(commit=False)
lesson.price=lesson.lesson_time * PRICE_HOUR[lesson.genre]
lesson.save()
return HttpResponseRedirect(reverse('lessonrecord_index'))
else:
form=LessonRecordEditForm(instance=lessonrecord)
return TemplateResponse(request,'school_system/lessonrecord_edit.html',{'form':form,'lessonrecord':lessonrecord})
def invoice_list(request):
return TemplateResponse(request,'school_system/invoice_list.html',{})
# #下記の方策は一旦pending
# def invoice_list(request):
#
# invoice_data= LessonRecord.objects.all()
# #ジャンル一覧を取得
# genre_list=[]
# genre_data= LessonRecord.objects.all().order_by('-genre')
#
# for genre in genre_data:
# genre_list.append(genre.genre)
# #顧客一覧を取得
# user_list=[]
# user_data= LessonRecord.objects.all().order_by('-name')
# for user in user_data:
# user_list.append(user.name)
# #日付一覧を取得
# date_list=[]
# for i in genre_data:
# date_list.append((i.date.strftime('%Y/%m/%d')[:7]))
#
# unique_date_list=list(set(date_list))
# unique_date_list.sort(reverse=False)
#
#
# monthly_sum_data=[]
# for i in range(len(unique_date_list)):
# year,month=unique_date_list[i].split("/")
# month_range= calendar.monthrange(int(year),int(month))[1]
# first_date=year + "-" + month + "-" + "01"
# last_date=year + "-" + month + "-" + str(month_range)
#
# total_of_month=LessonRecord.objects.filter(date__range=(first_date,last_date))
# genre_total=total_of_month.values('genre').annotate(total_price=Sum('price'))
#
# for j in range(len(category_total)):
# money= category_total[j]['total_price']
#
#
# return TemplateResponse(request,'school_system/invoice_list.html',{})
|
from __future__ import print_function
import numpy as np
import os.path as osp
from robolearn_envs.pybullet.core.bullet_utils import Link
from robolearn_envs.pybullet.core.bullet_sensors import Camera
from robolearn_envs.pybullet.core.bullet_multibody import BulletMultibody
class BulletObject(BulletMultibody):
def __init__(self, model_file, base_name,
init_pos=None, init_ori=None,
self_collision=True, use_file_inertia=True,
fixed_base=False, pybullet_client=None):
"""Non controllable rigid multi-body.
Args:
model_file:
base_name:
init_pos:
init_ori:
self_collision:
use_file_inertia:
fixed_base:
pybullet_client:
"""
BulletMultibody.__init__(self, pybullet_client)
model_type = osp.splitext(model_file)[1][1:]
if model_type not in ['urdf', 'mjcf', 'sdf']:
raise NotImplementedError("Wrong model_type: %s in %s. "
"Only .urdf, .mjfc, .sdf are supported"
% (model_type, model_file))
self._model_type = model_type
self.model_xml = model_file
self.base_name = base_name
self.self_collision = self_collision
self.use_file_intertia = use_file_inertia
if init_pos is None:
init_pos = [0, 0, 0]
self.init_base_pos = np.array(init_pos)
if init_ori is None:
init_ori = [0, 0, 0, 1]
self.init_base_ori = np.array(init_ori)
self.links = None
self.object_body = None
self._is_fixed_base = fixed_base
def spawn_in_pb(self, model_uid=None):
if model_uid is None:
if self._model_type == 'urdf':
load_fcn = self.pbc.loadURDF
elif self._model_type == 'mjcf':
load_fcn = self.pbc.loadMJCF
elif self._model_type == 'sdf':
load_fcn = self.pbc.loadSDF
# Spawn the robot again
if self._model_type == 'urdf':
# TODO: Use self.use_file_inertia for urdf
if self.self_collision:
flags = self.pbc.URDF_USE_SELF_COLLISION_EXCLUDE_PARENT + \
self.pbc.URDF_USE_INERTIA_FROM_FILE
else:
flags = self.pbc.URDF_USE_INERTIA_FROM_FILE
model_uid = load_fcn(
self.model_xml,
basePosition=self.init_base_pos,
baseOrientation=self.init_base_ori,
flags=flags,
useFixedBase=self._is_fixed_base,
)
else:
model_uid = load_fcn(
self.model_xml,
)
self.links = {}
if not isinstance(model_uid, tuple):
model_uid = [model_uid]
self._bodies_uids = model_uid
for i, bb in enumerate(model_uid):
# if self.pbc.getNumJoints(bb) == 0:
part_name, object_name = self.pbc.getBodyInfo(bb)
object_name = object_name.decode("utf8")
part_name = part_name.decode("utf8")
self.links[part_name] = Link(part_name, bb, -1,
pbc=self.pbc)
if len(model_uid) == 1:
self.object_body = self.links[part_name]
self.id = bb
else:
if i == 0:
self.object_body = self.links[part_name]
self.id = bb
def reset(self):
self.spawn_in_pb()
return self.get_pose()
def get_total_bodies(self):
return len(self.links)
def get_body_pose(self, body_name):
return self.links[body_name].get_pose()
def reset_base_pos(self):
self.pbc.resetBasePositionAndOrientation()
def add_camera(self, body_name, dist=3, width=320, height=320):
return Camera(self.links[body_name], dist=dist,
width=width, height=height)
|
import sys
import time
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from classifier import FCClassifier
class NLIModel(nn.Module):
"""
Main model class for the NLI task calling SentenceEmbedding and
Classifier classes
"""
def __init__(self, config):
super(NLIModel, self).__init__()
self.config = config
self.sentence_embedding = SentenceEmbedding(config)
self.classifier = FCClassifier(config)
def forward(self, batch):
prem = self.sentence_embedding(batch.premise) # Sentence embedding for the premise
hypo = self.sentence_embedding(batch.hypothesis) # Sentence embedding for the hypothesis
answer = self.classifier(prem, hypo)
return answer
class SentenceEmbedding(nn.Module):
"""
Prepare and encode sentence embeddings
"""
def __init__(self, config):
super(SentenceEmbedding, self).__init__()
self.config = config
self.word_embedding = nn.Embedding(config.embed_size, config.embed_dim)
#self.word_embedding.weight.requires_grad=False # Uncomment if you don't want to fine tune the word embeddings
self.encoder = eval(config.encoder_type)(config)
def forward(self, input_sentence):
sentence = self.word_embedding(input_sentence)
sentence = Variable(sentence.data)
embedding = self.encoder(sentence) # Encode the sentence with the selected sentence encoder
return embedding
class SumEncoder(nn.Module):
"""
Basic Sum Encoder
"""
def __init__(self, config):
super(SumEncoder, self).__init__()
self.config = config
def forward(self, inputs):
# Excervise 1: implement basic encoder summing the word vectors and taking the resulting
# vector as sentence embedding
return emb
class LSTMEncoder(nn.Module):
"""
Basic LSTM Encoder
"""
def __init__(self, config):
super(LSTMEncoder, self).__init__()
self.config = config
def forward(self, inputs):
# Excervise 2: implement basic one-layer LSTM encoder taking the final hidden state as the
# sentence embedding
return emb
class BiLSTMEncoder(nn.Module):
"""
Bidirectional LSTM with max pooling
"""
def __init__(self, config):
super(BiLSTMEncoder, self).__init__()
self.config = config
def forward(self, inputs):
# Excercise 3: implement bidirectional LSTM with max-pooling and take the max pooled output
# as the sentence embedding
return emb
class ConvEncoder(nn.Module):
"""
Hierarchical Convolutional Encoder
"""
def __init__(self, config):
super(ConvEncoder, self).__init__()
self.config = config
def forward(self, inputs):
# Bonus: implement hierarchical convolutional nn encoder
return emb
|
from random import randint
import random
from CX import crossoverOperator
from CX import crossoverOperator2
population = [] # list that holds paths
population_size = 10 # max 120 combinations
mutate_prob = 0.1
n_generations = 1
routes_length = [0]*population_size
fitness = [0]*population_size
best_path = 1000
cities = [0, 1, 2, 3, 4,5,6,7,8,9]
#,10,11,12,13,14,15,16.17,18,19,20
# distance matrix for our cities
distances1= [[0, 4, 4, 7, 3],
[4, 0, 2, 3, 5],
[4, 2, 0, 2, 3],
[7, 3, 2, 0, 6],
[3, 5, 3, 6, 0]]
distances = [
[ 0, 510 ,0, 635 ,355 , 0 , 91, 415, 605, 0],
[ 385 , 585 ,390 , 350 ,0 , 155 , 475 , 495 , 120 ,240],
[0 ,110 , 480 , 570 ,78 ,320 , 96 , 0, 130 , 500],
[540 , 97, 285, 36 , 29 , 0 , 490 , 605 , 295 , 460],
[120 , 350 , 425 , 390 , 0 ,370 , 320 , 700 , 0 , 590],
[365 ,350 , 370 , 625 , 0 ,155 , 380 , 640 , 63 , 430],
[200 ,160 ,175 , 535 ,240 , 0 , 68 , 440 , 575 , 27],
[320 , 91 , 48 , 67 ,430 ,300 , 90 , 0 , 610 , 360],
[705 ,520 , 835 , 605 ,590 ,610 ,865 , 250 , 480 , 545],
[0 ,655 ,235 , 585 ,555 ,750 ,615 , 625 , 645 ,775]
]
# calculates distance between 2 cities
def calc_distance(city1, city2):
print('hello')
print(city2)
return distances[city1][city2] # ord('A')=65
# creates a random route
def create_route():
list=[]
y=randint(0,len(cities))
for i in range(0,cities.__len__()):
list.append(cities[(i+y)%cities.__len__()])
print('list')
print(list)
return list
# calculates length of an route
def calc_route_length():
for i in range(population_size):
route_l = 0
for j in range(1, len(cities)):
# print('hello ')
# print(type(i))
print(population[i][j])
print(population)
route_l = route_l + calc_distance(population[i][j - 1], population[i][j])
# route_l = route_l + calc_distance(population[i][len(cities)-1], population[i][1]) calculate distance from last to first
routes_length[i] = route_l
fitness[i] = 1 / routes_length[i]
# creates starting population
def create_population():
for i in range(population_size):
population.append(create_route())
# swap with a probability 2 cities in a route
def swap_mutation(ind):
picks = random.sample(range(len(cities)), 2)
temp = population[ind][picks[0]]
population[ind][picks[0]] = population[ind][picks[1]]
population[ind][picks[1]] = temp
# print("Mutated path: ", population[ind])
# PMX crossover
def partially_matched_crossover(ind1, ind2):
size = len(cities)
p1, p2 = [0] * size, [0] * size
# Initialize the position of each indices in the individuals
for k in range(size):
p1[ind1[k]] = k
p2[ind2[k]] = k
# Choose crossover points
cxpoint1 = random.randint(0, size)
cxpoint2 = random.randint(0, size - 1)
if cxpoint2 >= cxpoint1:
cxpoint2 += 1
else: # Swap the two cx points
cxpoint1, cxpoint2 = cxpoint2, cxpoint1
# Apply crossover between cx points
for k in range(cxpoint1, cxpoint2):
# Keep track of the selected values
temp1 = ind1[k]
temp2 = ind2[k]
# Swap the matched value
ind1[k], ind1[p1[temp2]] = temp2, temp1
ind2[k], ind2[p2[temp1]] = temp1, temp2
# Position bookkeeping
p1[temp1], p1[temp2] = p1[temp2], p1[temp1]
p2[temp1], p2[temp2] = p2[temp2], p2[temp1]
return ind1, ind2
# function that picks a parent Fitness Proportionate Selection
def roulette_wheel_selection():
s = 0
partial_s = 0
ind = 0
for m in range(population_size):
s = s + fitness[m]
rand = random.uniform(0, s)
for m in range(population_size):
if partial_s < rand:
partial_s = partial_s + fitness[m]
ind = ind + 1
if ind == population_size: # prevent out of bounds list
ind = population_size - 1
return ind
# find fittest path called every generation
def find_fittest():
key = 1000
fittest = 0
for i in range(population_size):
if routes_length[i] < key:
key = routes_length[i]
fittest = i
print('ddfdd')
print(fittest)
return fittest
# sorts parallely the lists
#def sort_alongside(routes_length, population):
# routes_length, population = (list(i) for i in zip(*sorted(zip(routes_length, population))))
# initialize algorithm
create_population()
print("Population initialization:", "\n", population)
calc_route_length()
print("Population's paths length:", "\n", routes_length)
for j in range(n_generations):
for i in range(0, population_size, 2):
# pick parents for crossover
parent1 = roulette_wheel_selection()
parent2 = roulette_wheel_selection()
# always pick different parents (not necessary)
while True:
if parent1 == parent2:
parent2 = roulette_wheel_selection()
else:
break
# update population
# population[i], population[i + 1] = partially_matched_crossover(population[parent1], population[parent2])
population[i], population[i + 1] = crossoverOperator(population[parent1], population[parent2])
# calculate lengths for updated generation
calc_route_length()
# pick the paths for mutation based on a probability
for i in range(population_size):
rand = random.uniform(0, 1)
if rand < mutate_prob:
swap_mutation(i)
# calculate lengths after mutation
calc_route_length()
index=0
# find best path overall
if routes_length[find_fittest()] < best_path:
print('shakeeb')
print(index)
index = find_fittest()
best_path = routes_length[index]
print("Best route of generation", j+1, ": ", population[find_fittest()], "\n" "Route length: ",
routes_length[find_fittest()])
# print("Population of generation", j+1, ": \n", population)
print("Routes lengths:", routes_length, "\n")
print("Best path is:", population[index], "with length", best_path) |
#Exercício Python 29: Escreva um programa que leia a velocidade de um carro. Se ele ultrapassar 80Km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$7,00 por cada Km acima do limite.
v = int(input('Digite sua velocidade: '))
if v <= 80:
print('Você está na velocidade correta.')
else:
m = (v-80)*7.00
print('Você foi multado em R${:.2f}!'.format(m))
|
from app import db
'''Make clases declared here to
be available on make_shell_context in
Manager.py'''
class Category(db.Model):
__tablename__ = 'inv_categories'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique = True)
description = db.Column(db.String(128))
items = db.relationship('Item',backref='category')
def __repr__(self):
return '<Categorie %r>' % self.name
class Item(db.Model):
__tablename ='item'
itm_id = db.Column(db.Integer, primary_key = True)
itm_code = db.Column(db.String(64),unique = True)
itm_customs_code = db.Column(db.String(64),unique = True)
itm_quantity_on_hand = db.Column(db.Integer)
itm_quantity_on_order = db.Column(db.Integer)
itm_price = db.Column(db.Float)
itm_category_id_fk = db.Column(db.Integer, db.ForeignKey('inv_categories.id'))
itm_brand_id_fk = db.Column(db.Integer, db.ForeignKey('cmn_brands.bnd_id'))
def __repr__(self):
return '<Item %r>' % self.code
|
'''
Disjoint Sets support the following operations:
make_set(x): creates a singleton set {x}
find(x): returns ID of the set containing x
union(x, y): merges two sets containing x and y,respectively.
'''
class Disjoint_Sets_Element:
def __init__(self, _parent = -1, _rank = 0):
self.parent = _parent
self.rank = _rank
class Disjoint_Sets:
def __init__(self, _size):
'''
Implement the Make_Set to create singleton sets
'''
self.sets = [Disjoint_Sets_Element(i, 0) for i in range(_size)]
self.size = _size
self.num_of_sets = _size;
def find(self, i):
while i != self.sets[i].parent:
i = self.sets[i].parent
return i
def merge(self, i, j):
i_id = self.find(i);
j_id = self.find(j);
if i_id == j_id:
return
elif self.sets[i_id].rank < self.sets[j_id].rank:
self.num_of_sets -= 1
self.sets[i_id].parent = j_id
else:
self.num_of_sets -= 1
self.sets[j_id].parent = i_id
if self.sets[i_id].rank == self.sets[j_id].rank:
self.sets[i_id].rank += 1
def print_state(self):
print("parent")
for i in range(self.size):
print(f'{self.sets[i].parent} ', end='')
print('\n')
print("rank")
for i in range(self.size):
print(f'{self.sets[i].rank} ', end='')
print('\n')
if __name__ == '__main__':
size = 6
dis_sets = Disjoint_Sets(size)
dis_sets.merge(1, 3);
dis_sets.print_state();
dis_sets.merge(4, 1);
dis_sets.print_state();
dis_sets.merge(2, 0);
dis_sets.print_state();
dis_sets.merge(1, 2);
dis_sets.print_state();
dis_sets.merge(1, 5);
dis_sets.print_state();
|
#!/usr/bin/env python
"""
Runner class of Dotmanager.
Run configuration scripts.
"""
import os
import subprocess
__author__ = "Romain Ducout"
class Runner():
"""Applies the runs of a configuration"""
def __init__(self, config, config_root):
self.config = config
self.config_root = config_root
def apply_configuration(self):
"""Parse the configuration file and applies its runs"""
self.manage_runs()
@staticmethod
def manage_run(target_path, run_config):
"""Apply a single run configuration"""
target_path = os.path.expanduser(target_path)
if "description" in run_config:
print(run_config["description"])
subprocess.call([target_path])
def manage_runs(self):
"""Apply all runs configurations of the configuration file"""
if "runs" not in self.config:
return
config_runs = self.config["runs"]
for link_path in config_runs:
run_config = config_runs[link_path]
Runner.manage_run(
os.path.join(self.config_root, link_path),
run_config
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
from datetime import date
today = date.today()
AUTHOR = "Leonardo Giordani"
SITENAME = "The Digital Cat"
SITESUBTITLE = "Adventures of a curious cat in the land of programming"
SITEURL = ""
DEBUG = True
# WEBASSETS_DEBUG = True # This unfortunately messes up the scrolling in the page
PATH = "content"
TIMEZONE = "Europe/Paris"
DEFAULT_LANG = "en"
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = "atom.xml"
TAG_FEED_ATOM = "categories/{slug}/atom.xml"
CATEGORY_FEED_ATOM = "category/{slug}/atom.xml"
TRANSLATION_FEED_ATOM = None
DISPLAY_FEEDS_ON_SIDEBAR = True
DISPLAY_TAGS_ON_SIDEBAR = False
RELATED_POSTS_MAX = 10
JINJA_ENVIRONMENT = {"extensions": ["jinja2.ext.with_", "jinja2.ext.do"]}
ARTICLE_URL = "blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/"
ARTICLE_SAVE_AS = ARTICLE_URL + "index.html"
CATEGORY_URL = "category/{slug}/"
CATEGORY_SAVE_AS = CATEGORY_URL + "index.html"
TAG_URL = "categories/{slug}/"
TAG_SAVE_AS = TAG_URL + "index.html"
ARCHIVES_URL = "archives/"
ARCHIVES_SAVE_AS = ARCHIVES_URL + "index.html"
AUTHOR_URL = "authors/{slug}/"
AUTHOR_SAVE_AS = AUTHOR_URL + "index.html"
SITEMAP = {
"format": "xml",
}
DEFAULT_DATE_FORMAT = "%d/%m/%Y"
def order_by_modified_first(article):
try:
return today - article.modified.date()
except AttributeError:
return today - article.date.date()
ARTICLE_ORDER_BY = order_by_modified_first
SOCIAL = [
{"name": "Twitter", "icon": "twitter", "url": "https://twitter.com/thedigicat"},
{
"name": "GitHub",
"icon": "github",
"url": "https://github.com/TheDigitalCatOnline",
},
]
DEFAULT_PAGINATION = 9
PAGINATION_PATTERNS = (
(1, "{base_name}/", "{base_name}/index.html"),
(2, "{base_name}/page/{number}/", "{base_name}/page/{number}/index.html"),
)
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
STATIC_PATHS = [
"images",
"code",
"notebooks",
"files",
"extra/CNAME",
"extra/robots.txt",
]
EXTRA_PATH_METADATA = {
"extra/CNAME": {"path": "CNAME"},
"extra/robots.txt": {"path": "robots.txt"},
}
TWITTER_USERNAME = "thedigicat"
THEME = "../bengal"
FAVICON = "images/global/favicon.jpg"
MARKDOWN = {
"extension_configs": {
"markdown.extensions.codehilite": {"css_class": "highlight"},
"markdown.extensions.extra": {},
"markdown.extensions.meta": {},
"markdown.extensions.toc": {"permalink": True},
"mdx_video": {},
},
"output_format": "html5",
}
MAU = {
"custom_templates": {
"header.html": (
'<h{{ level }} id="{{ anchor }}">'
"{{ value }}"
'{% if anchor and level <= 2 %}<a class="headerlink" href="#{{ anchor }}" title="Permanent link">¶</a>{% endif %}'
"</h{{ level }}>"
),
"block-admonition.html": (
'<div class="admonition {{ kwargs.class }}">'
'<i class="fa fa-{{ kwargs.icon }}"></i>'
'<div class="content">'
'<div class="title">{{ kwargs.label }}</div>'
"<div>{{ content }}</div>"
"</div>"
"</div>"
),
"block-infobox.html": (
'<div class="infobox">'
'<i class="fa fa-{{ kwargs.icon }}"></i>'
'<div class="title">{{ kwargs.title }}</div>'
"<div>{{ content }}</div>"
"</div>"
),
"block-advertisement.html": (
"""
<div class="advertisement">
<a href="{{ kwargs.target }}">
<img src="{{ kwargs.image }}" />
</a>
<div class="body">
{{ content }}
<div class="actions">
<a class="action" href="{{ kwargs.target }}">{{ kwargs.action }}</a>
</div>
</div>
</div>
"""
),
"block-source.html": (
'<div{% if blocktype %} class="code"{% endif %}>'
'{% if title %}<div class="title">{{ title }}</div>{% endif %}'
'<div class="content">{{ content }}</div>'
'{% if kwargs.callouts %}<div class="callouts">'
"<table><tbody>"
"{% for callout in kwargs.callouts %}<tr>"
"<td>{{ callout[0] }}</td>"
"<td>{{ callout[1] }}</td>"
"</tr>{% endfor %}"
"</tbody></table>"
"</div>{% endif %}"
"</div>"
),
"image.html": (
'<div class="imageblock">'
'<img src="{{ uri }}"{% if alt_text %} alt="{{ alt_text }}"{% endif %}>'
'{% if title %}<div class="title">{{ title }}</div>{% endif %}'
"</div>"
),
},
"pygments": {"html": {}},
}
QUOTES = [
{
"quote": "I've seen things you people wouldn't believe. Attack ships"
" on fire off the shoulder of Orion. I watched C-beams glitter in the"
" dark near the Tannhäuser Gate. All those moments will be lost in"
" time, like tears in rain. Time to die.",
"source": "Blade Runner, 1982",
},
{
"quote": "Look at this. It’s worthless — ten dollars from a vendor in"
" the street. But I take it, I bury it in the sand for a thousand"
" years, it becomes priceless.",
"source": "Raiders of the Lost Ark, 1981",
},
{
"quote": "Roads? Where we're going, we don't need... roads.",
"source": "Back to the Future, 1985",
},
]
|
from client_chuli import *
class Gui:
def __init__(self, connfd):
self.connfd = connfd
self.state = 0
# 创建注册窗口(在登录窗口的基础上创建)
def client_signgui(self, t, v):
# 重置登录窗口提示语
# v.set('')
# 创建注册窗口
top2 = Toplevel(t)
top2.title("注册")
top2.geometry("400x200+200+20")
Label(top2, text="用户名:").place(x=50, y=40)
Label(top2, text="密码:").place(x=50, y=80)
Label(top2, text="确认密码:").place(x=50, y=120)
# 创建注册窗口提示语
var = StringVar()
Label(top2, textvariable=var).place(x=190, y=150)
# 用户名输入框
ns = Variable()
ns.set("请输入用户名")
entrynames = Entry(top2, textvariable=ns)
entrynames.place(x=120, y=40)
# 用户密码输入框
pwds = Variable()
entrypwds = Entry(top2, textvariable=pwds)
entrypwds.place(x=120, y=80)
# 用户密码确认输入框
pwds1 = Variable()
entrypwds1 = Entry(top2, textvariable=pwds1)
entrypwds1.place(x=120, y=120)
# 提交注册按钮
b_sign = Button(top2, text='注册', command=lambda: chuli.usrsign
(var, ns, pwds, pwds1, top2, self.connfd))
b_sign.place(x=130, y=150)
top2.mainloop()
# 创建登录窗口
def client_landgui(self, obj):
top = Tk()
top.title("登录")
top.geometry("400x300+200+20")
Label(top, text="用户名:").place(x=50, y=150)
Label(top, text="密码:").place(x=50, y=190)
# 创建窗口提示语
var = StringVar()
Label(top, textvariable=var).place(x=150, y=270)
# 用户名输入框
n = Variable()
n.set("请输入用户名")
entryname = Entry(top, textvariable=n)
entryname.place(x=120, y=150)
# 用户密码输入框
pwd = Variable()
entrypwd = Entry(top, textvariable=pwd, show="*")
entrypwd.place(x=120, y=190)
# 登录按钮
b_login = Button(top, text='登录', command=lambda: chuli.land_client(
n, var, pwd, top, self.connfd, obj))
b_login.place(x=130, y=230)
# 注册按钮
b_sign = Button(
top, text='注册', command=lambda: Gui.client_signgui(self, top, var))
b_sign.place(x=230, y=230)
def client_zjm(self):
# 创建用户界面
self.top1 = Tk()
self.top1.title("FTP文件管理系统")
self.top1.geometry("600x400+200+20")
# 创建文件夹选择容器
self.fr1 = LabelFrame(self.top1, width=150, height=100, text='文件夹')
self.fr1.grid(row=0, column=0, padx=5)
# 创建文件列表等容器
self.fr2 = LabelFrame(self.top1, width=250, height=390, text='文件列表')
self.fr2.grid(row=0, column=1, padx=5, rowspan=24)
# 创建用户信息容器
self.fr3 = LabelFrame(self.top1, width=150, height=100, text='用户信息')
self.fr3.grid(row=1, column=0)
# 创建下载信息容器
self.fr4 = LabelFrame(self.top1, width=150, height=190, text='上传下载信息')
self.fr4.grid(row=2, column=0)
# 创建文件列表
self.filelist2 = Listbox(self.fr2)
self.filelist2.place(x=0, y=0, width=230, height=334)
self.sb2 = Scrollbar(self.fr2)
self.sb2.place(x=230, y=0, height=333)
self.filelist2.config(yscrollcommand=self.sb2.set)
self.sb2.config(command=self.filelist2.yview)
def sendgui(self, view, download, upload, delete):
self.var = IntVar()
self.var.set(0) # 初始设置为显示共享文件夹
L = ['共享文件夹', '个人文件夹']
for i in range(2):
rb1 = Radiobutton(self.fr1, text=L[i], font=(
10), variable=self.var, value=i, command=lambda: view(self.var, self.t, self.t1))
rb1.place(x=0, y=i * 35)
# 创建用户信息容器中的Label
Label(self.fr3, text='用户名:', font=10).place(x=0, y=5)
Label(self.fr3, text='剩余空间:', font=10).place(x=0, y=35)
self.t = StringVar()
self.t1 = StringVar()
Label(self.fr3, textvariable=self.t, font=10).place(x=80, y=5)
Label(self.fr3, textvariable=self.t1, font=10).place(x=80, y=35)
# 创建下载信息容器中的Label
Label(self.fr4, text='文件名:').place(x=0, y=0)
Label(self.fr4, text='进度:').place(x=0, y=20)
self.d1 = StringVar()
self.dj1 = StringVar()
Label(self.fr4, textvariable=self.d1).place(x=40, y=0)
Label(self.fr4, textvariable=self.dj1).place(x=31, y=20)
Label(self.fr4, text='文件名:').place(x=0, y=55)
Label(self.fr4, text='进度:').place(x=0, y=75)
self.d2 = StringVar()
self.dj2 = StringVar()
Label(self.fr4, textvariable=self.d2).place(x=40, y=55)
Label(self.fr4, textvariable=self.dj2).place(x=31, y=75)
Label(self.fr4, text='文件名:').place(x=0, y=105)
Label(self.fr4, text='进度:').place(x=0, y=125)
self.d3 = StringVar()
self.dj3 = StringVar()
Label(self.fr4, textvariable=self.d3).place(x=40, y=105)
Label(self.fr4, textvariable=self.dj3).place(x=31, y=125)
# 创建文件列表容器中的按钮
Button(self.fr2, text="下载", command=lambda: download(
self.var)).place(x=190, y=335)
Button(self.fr2, text="刷新", command=lambda: view(
self.var, self.t, self.t1)).place(x=139, y=335)
Button(self.fr2, text="上传", command=lambda: upload(
self.var)).place(x=0, y=335)
Button(self.fr2, text="删除", command=lambda: delete(
self.var)).place(x=88, y=335)
# 显示进度条第一步
def jindu(self, filename, value):
self.Lb = [self.d1, self.d2, self.d3]
self.Lbj = [self.dj1, self.dj2, self.dj3]
self.Lb[value - 1].set(filename)
self.ci = 0 # 根据文件大小统计需要循环多少次,进而计算出每循环多少次为100分之1
self.bfb = 1 # 用来计数显示百分百
self.bfbh = 1 # 用来计数显示进度条
return
# 显示进度条第二步
def jinduview(self, filesize, value):
self.ci += 1
if self.ci == int(filesize / 1024 / 100):
self.Lbj[value - 1].set('[%s]%d%%' % (('>' * self.bfbh), self.bfb))
self.ci = 0
self.bfb += 1
if self.bfb % 10 == 0:
self.bfbh += 1
return
# 弹窗询问
def messageboxask(self, title, msg):
ask = tkinter.messagebox.askyesno(title=title, message=msg)
return ask
# 弹窗提醒
def messageboxinfo(self, title, msg):
tkinter.messagebox.showinfo(title=title, message=msg)
# 更改文件名
def rename(self):
top3 = Toplevel(self.top1)
top3.title("文件重命名")
top3.geometry("320x50+400+200")
def re():
self.newname = nname.get()
top3.destroy()
return
Label(top3, text="输入新文件名:", font=8).place(x=10, y=10)
nname = Variable()
Entry(top3, textvariable=nname).place(x=115, y=10)
Button(top3, text="确定", command=re).place(x=265, y=10)
top3.mainloop()
|
from django.db import models
# Create your models here.
class Author(models.Model):
name = models.CharField(max_length=50)
birth_date = models.DateField(null=True)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Book(models.Model):
title = models.CharField(max_length=50)
num_pages = models.IntegerField()
date_published = models.DateField(null=True)
author = models.ForeignKey(Author, on_delete=models.CASCADE, null=True)
def __str__(self):
return self.title
|
x = 1
apartments = []
sum_ap = 0
while x > 0:
x = int(input("Apartment price: "))
if x > 0:
sum_ap = sum_ap + 1
apartments.append(x)
print("Apartment", sum_ap, "price:", x)
else:
print("Try again!")
break
sum_price = 0
k = 0
for k in range(len(apartments)):
sum_price = sum_price + apartments[k]
avg = sum_price/sum_ap
print(sum_ap, "apartments have been registered. The average price for rent is", avg)
y = 1
while y > 0:
y = int(input("Rent price: "))
if y > avg:
print("Above average price.")
elif y == avg:
print("Same as average price.")
elif 0 < y < avg:
print("Below average price.")
else:
print("Quit!")
|
import Quartz
from Foundation import NSMutableData
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestCGPDFContext(TestCase):
def testFunctions(self):
data = NSMutableData.data()
self.assertIsInstance(data, Quartz.CFMutableDataRef)
consumer = Quartz.CGDataConsumerCreateWithCFData(data)
self.assertIsInstance(consumer, Quartz.CGDataConsumerRef)
self.assertArgIsIn(Quartz.CGPDFContextCreate, 1)
self.assertResultIsCFRetained(Quartz.CGPDFContextCreate)
context = Quartz.CGPDFContextCreate(consumer, None, None)
self.assertIsInstance(context, Quartz.CGContextRef)
if hasattr(Quartz, "CGPDFContextClose"):
Quartz.CGPDFContextClose(context)
self.assertResultIsCFRetained(Quartz.CGPDFContextCreateWithURL)
url = Quartz.CFURLCreateWithFileSystemPath(
None, "/tmp/pyobjc.test.pdf", Quartz.kCFURLPOSIXPathStyle, False
)
self.assertArgIsIn(Quartz.CGPDFContextCreateWithURL, 1)
context = Quartz.CGPDFContextCreateWithURL(url, None, None)
self.assertIsInstance(context, Quartz.CGContextRef)
Quartz.CGPDFContextBeginPage(context, None)
Quartz.CGPDFContextSetURLForRect(context, url, ((0, 0), (10, 10)))
Quartz.CGPDFContextAddDestinationAtPoint(context, "target", (50, 50))
Quartz.CGPDFContextSetDestinationForRect(
context, "target", ((100, 120), (50, 60))
)
Quartz.CGPDFContextEndPage(context)
if hasattr(Quartz, "CGPDFContextClose"):
Quartz.CGPDFContextClose(context)
@min_os_level("10.5")
def testFunctions10_5(self):
# Note actual test is in the function below this one.
Quartz.CGPDFContextClose
@min_os_level("10.7")
def testFunctions10_7(self):
data = NSMutableData.data()
consumer = Quartz.CGDataConsumerCreateWithCFData(data)
context = Quartz.CGPDFContextCreate(consumer, None, None)
metadata = (
b"""<?xpacket begin='' id='W5M0MpCehiHzreSzNTczkc9d'?><?xpacket end='w'?>"""
)
Quartz.CGPDFContextAddDocumentMetadata(
context, NSMutableData.dataWithBytes_length_(metadata, len(metadata))
)
@min_os_level("10.13")
def testFunctions10_13(self):
Quartz.CGPDFContextSetOutline
@min_os_level("10.15")
def testFunctions10_15(self):
Quartz.CGPDFTagTypeGetName
Quartz.CGPDFContextBeginTag
Quartz.CGPDFContextEndTag
def testConstants(self):
self.assertIsInstance(Quartz.kCGPDFContextMediaBox, str)
self.assertIsInstance(Quartz.kCGPDFContextCropBox, str)
self.assertIsInstance(Quartz.kCGPDFContextBleedBox, str)
self.assertIsInstance(Quartz.kCGPDFContextTrimBox, str)
self.assertIsInstance(Quartz.kCGPDFContextArtBox, str)
self.assertIsInstance(Quartz.kCGPDFContextTitle, str)
self.assertIsInstance(Quartz.kCGPDFContextAuthor, str)
self.assertIsInstance(Quartz.kCGPDFContextKeywords, str)
self.assertIsInstance(Quartz.kCGPDFContextCreator, str)
self.assertIsInstance(Quartz.kCGPDFContextOwnerPassword, str)
self.assertIsInstance(Quartz.kCGPDFContextUserPassword, str)
self.assertIsInstance(Quartz.kCGPDFContextEncryptionKeyLength, str)
self.assertIsInstance(Quartz.kCGPDFContextAllowsPrinting, str)
self.assertIsInstance(Quartz.kCGPDFContextAllowsCopying, str)
self.assertIsInstance(Quartz.kCGPDFContextOutputIntent, str)
self.assertIsInstance(Quartz.kCGPDFXOutputIntentSubtype, str)
self.assertIsInstance(Quartz.kCGPDFXOutputConditionIdentifier, str)
self.assertIsInstance(Quartz.kCGPDFXOutputCondition, str)
self.assertIsInstance(Quartz.kCGPDFXRegistryName, str)
self.assertIsInstance(Quartz.kCGPDFXInfo, str)
self.assertIsInstance(Quartz.kCGPDFXDestinationOutputProfile, str)
self.assertIsInstance(Quartz.kCGPDFContextOutputIntents, str)
self.assertEqual(Quartz.CGPDFTagTypeDocument, 100)
self.assertEqual(Quartz.CGPDFTagTypePart, 101)
self.assertEqual(Quartz.CGPDFTagTypeArt, 102)
self.assertEqual(Quartz.CGPDFTagTypeSection, 103)
self.assertEqual(Quartz.CGPDFTagTypeDiv, 104)
self.assertEqual(Quartz.CGPDFTagTypeBlockQuote, 105)
self.assertEqual(Quartz.CGPDFTagTypeCaption, 106)
self.assertEqual(Quartz.CGPDFTagTypeTOC, 107)
self.assertEqual(Quartz.CGPDFTagTypeTOCI, 108)
self.assertEqual(Quartz.CGPDFTagTypeIndex, 109)
self.assertEqual(Quartz.CGPDFTagTypeNonStructure, 110)
self.assertEqual(Quartz.CGPDFTagTypePrivate, 111)
self.assertEqual(Quartz.CGPDFTagTypeParagraph, 200)
self.assertEqual(Quartz.CGPDFTagTypeHeader, 201)
self.assertEqual(Quartz.CGPDFTagTypeHeader1, 202)
self.assertEqual(Quartz.CGPDFTagTypeHeader2, 203)
self.assertEqual(Quartz.CGPDFTagTypeHeader3, 204)
self.assertEqual(Quartz.CGPDFTagTypeHeader4, 205)
self.assertEqual(Quartz.CGPDFTagTypeHeader5, 206)
self.assertEqual(Quartz.CGPDFTagTypeHeader6, 207)
self.assertEqual(Quartz.CGPDFTagTypeList, 300)
self.assertEqual(Quartz.CGPDFTagTypeListItem, 301)
self.assertEqual(Quartz.CGPDFTagTypeLabel, 302)
self.assertEqual(Quartz.CGPDFTagTypeListBody, 303)
self.assertEqual(Quartz.CGPDFTagTypeTable, 400)
self.assertEqual(Quartz.CGPDFTagTypeTableRow, 401)
self.assertEqual(Quartz.CGPDFTagTypeTableHeaderCell, 402)
self.assertEqual(Quartz.CGPDFTagTypeTableDataCell, 403)
self.assertEqual(Quartz.CGPDFTagTypeTableHeader, 404)
self.assertEqual(Quartz.CGPDFTagTypeTableBody, 405)
self.assertEqual(Quartz.CGPDFTagTypeTableFooter, 406)
self.assertEqual(Quartz.CGPDFTagTypeSpan, 500)
self.assertEqual(Quartz.CGPDFTagTypeQuote, 501)
self.assertEqual(Quartz.CGPDFTagTypeNote, 502)
self.assertEqual(Quartz.CGPDFTagTypeReference, 503)
self.assertEqual(Quartz.CGPDFTagTypeBibliography, 504)
self.assertEqual(Quartz.CGPDFTagTypeCode, 505)
self.assertEqual(Quartz.CGPDFTagTypeLink, 506)
self.assertEqual(Quartz.CGPDFTagTypeAnnotation, 507)
self.assertEqual(Quartz.CGPDFTagTypeRuby, 600)
self.assertEqual(Quartz.CGPDFTagTypeRubyBaseText, 601)
self.assertEqual(Quartz.CGPDFTagTypeRubyAnnotationText, 602)
self.assertEqual(Quartz.CGPDFTagTypeRubyPunctuation, 603)
self.assertEqual(Quartz.CGPDFTagTypeWarichu, 604)
self.assertEqual(Quartz.CGPDFTagTypeWarichuText, 605)
self.assertEqual(Quartz.CGPDFTagTypeWarichuPunctiation, 606)
self.assertEqual(Quartz.CGPDFTagTypeFigure, 700)
self.assertEqual(Quartz.CGPDFTagTypeFormula, 701)
self.assertEqual(Quartz.CGPDFTagTypeForm, 702)
@min_os_level("10.5")
def testConstants10_5(self):
self.assertIsInstance(Quartz.kCGPDFContextSubject, str)
@min_os_level("10.13")
def testConstants10_13(self):
self.assertIsInstance(Quartz.kCGPDFContextAccessPermissions, str)
@min_os_level("10.15")
def testConstants10_15(self):
self.assertIsInstance(Quartz.kCGPDFTagPropertyActualText, str)
self.assertIsInstance(Quartz.kCGPDFTagPropertyAlternativeText, str)
self.assertIsInstance(Quartz.kCGPDFTagPropertyTitleText, str)
self.assertIsInstance(Quartz.kCGPDFTagPropertyLanguageText, str)
@min_os_level("11.0")
def testConstants11_0(self):
self.assertIsInstance(Quartz.kCGPDFContextCreateLinearizedPDF, str)
self.assertIsInstance(Quartz.kCGPDFContextCreatePDFA, str)
|
import unittest
from tests.unit_test_helper.console_test_helper import *
class TestOutput(unittest.TestCase):
def test(self):
temp_globals, temp_locals, content, output = execfile("lab16/ch016_t17_list_slicing.py")
self.assertEqual("!XeXgXaXsXsXeXmX XtXeXrXcXeXsX XeXhXtX XmXaX XI", temp_locals['garbled'])
self.assertEqual("I am the secret message!", temp_locals['message'])
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
import platform
from itertools import product
from egcg_core.config import cfg
from pyclarity_lims.entities import Sample, Container, Step
from EPPs.common import StepEPP, get_workflow_stage, InvalidStepError, finish_step
class CopySamples(StepEPP):
"""Creates duplicate submitted samples with the same sample UDF values as the input samples"""
_max_nb_project = 1
# mapping used to link udf value to the container type
udf_to_container_type = {
'96 well plate': ['96 well plate'],
'Tube': ['rack 96 positions', 'SGP rack 96 positions']
}
plate96_layout_counter = 0
plate96_layout = list(product([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']))
current_container = None
def complete_remove_from_processing(self, stage):
# Create new step with the routed artifacts
s = Step.create(self.lims, protocol_step=stage.step, inputs=self.artifacts,
container_type_name='Tube')
url = 'https://%s/clarity/work-complete/%s' % (platform.node(), self.process.id.split('-')[1])
s.details.udf['Reason for removal from processing:'] = 'Repeat samples requested. See step %s' % url
s.details.put()
# Move from "Record detail" window to the "Next Step"
s.advance()
for next_action in s.actions.next_actions:
next_action['action'] = 'complete'
s.actions.put()
# Complete the step
finish_step(s)
def next_sample_name_and_pos(self):
"""Provide the next available position on the current container and generate the associated sample name.
When the container runs out of positions, create a new container and start again."""
if not self.current_container:
try:
self.current_container = Container.create(
self.lims,
type=self.lims.get_container_types(name=self.process.udf['Container Type'])[0],
name=self.find_available_container(self.projects[0].name,
container_type=self.process.udf['Container Type'])
)
except:
raise InvalidStepError(
'Container could not be created. Check that Container Type udf has been populated')
elif self.plate96_layout_counter >= 96:
self.current_container = Container.create(
self.lims,
type=self.current_container.type,
name=self.find_available_container(self.projects[0].name, self.current_container.type.name)
)
self.plate96_layout_counter = 0
r, c = self.plate96_layout[self.plate96_layout_counter]
sample_name = self.current_container.name + '%s%02d' % (c, r)
self.plate96_layout_counter += 1
return sample_name, '%s:%d' % (c, r)
def create_samples_list(self):
samples_to_create = []
for input_sample in self.samples:
new_sample_name, new_sample_position = self.next_sample_name_and_pos()
new_sample_dict = {
'container': self.current_container,
'project': self.projects[0],
'name': new_sample_name,
'position': new_sample_position,
'udf': {'Prep Workflow': input_sample.udf['Prep Workflow'],
'Coverage (X)': input_sample.udf['Coverage (X)'],
'Required Yield (Gb)': input_sample.udf['Required Yield (Gb)'],
'Delivery': input_sample.udf['Delivery'],
'User Prepared Library': input_sample.udf['User Prepared Library'],
'Analysis Type': input_sample.udf['Analysis Type'],
'Rapid Analysis': input_sample.udf['Rapid Analysis'],
'Species': input_sample.udf['Species'],
'Genome Version': input_sample.udf['Genome Version']}
}
samples_to_create.append(new_sample_dict)
return samples_to_create
def _run(self):
# Create new samples
samples = self.lims.create_batch(Sample, self.create_samples_list())
self.lims.get_batch(samples, force=True)
# Assign newly created samples to the create manifest step
sample_artifacts = [s.artifact for s in samples]
stage_wf_st = cfg.query('workflow_stage', 'container_dispatch', 'start')
stage = get_workflow_stage(self.lims, stage_wf_st[0], stage_wf_st[1])
self.lims.route_artifacts(sample_artifacts, stage_uri=stage.uri)
# Assign the input samples to remove from processing step then complete the remove from processing step
stage_wf_st = cfg.query('workflow_stage', 'remove_from_processing', 'start')
stage = get_workflow_stage(self.lims, stage_wf_st[0], stage_wf_st[1])
self.lims.route_artifacts(self.artifacts, stage_uri=stage.uri)
self.complete_remove_from_processing(stage)
if __name__ == "__main__":
CopySamples().run()
|
def decorator_func(say_hello_func):
def wrapper_func(hello_var, world_var):
hello = "Hello, "
world = "World"
if not hello_var:
hello_var = hello
if not world_var:
world_var = world
return say_hello_func(hello_var, world_var)
return wrapper_func
@decorator_func
def say_hello(hello_var, world_var):
print(hello_var + " " + world_var)
say_hello("", "")
say_hello("Hi", "")
say_hello("", "There")
###
def my_decorator(some_function):
def wrapper():
print("\nSomething is happening before {}() is called.".
format(some_function.__name__))
some_function()
print("Something is happening after {}() is called.\n".
format(some_function.__name__))
return wrapper
###
def just_some_function():
print("Wheee!")
just_some_function = my_decorator(just_some_function)
just_some_function()
###
@my_decorator
def just_another_function():
print("Whoosh!")
just_another_function()
###
def a_decorator(func):
def wrapper(*args):
print("\nI am doing some boring work before executing {}()".format(func.__name__))
l = len(args)
if l == 0:
func()
elif l == 1:
func(args[0])
else:
func(*args)
print("I am doing some boring work after executing {}()\n".format(func.__name__))
return wrapper
@a_decorator
def hi(name):
print("Hi, " + name)
@a_decorator
def hello():
print("Hello")
@a_decorator
def greetings(fname, lname):
print("Greetings {} {}".format(fname, lname))
hi("truman")
hello()
greetings("Truman", "Smith")
|
# https://leetcode.com/problems/longest-common-subsequence/
# 2021/10
# 638 ms
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
L1, L2 = len(text1), len(text2)
dp = [[0] * (L1 + 1) for _ in range(0, L2 + 1)]
for j in range(0, L2):
for i in range(0, L1):
dp[j + 1][i + 1] = max(dp[j][i] + (1 if text1[i] == text2[j] else 0), dp[j][i + 1], dp[j + 1][i])
return dp[L2][L1]
|
import torch
from torch import nn
import sys
from src import models
from src import ctc
from src.utils import *
import torch.optim as optim
import numpy as np
import time
from torch.optim.lr_scheduler import ReduceLROnPlateau
import os
import pickle
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score, f1_score
from src.eval_metrics import *
####################################################################
#
# Construct the model and the CTC module (which may not be needed)
#
####################################################################
def get_CTC_module(hyp_params):
a2l_module = getattr(ctc, 'CTCModule')(in_dim=hyp_params.orig_d_a, out_seq_len=hyp_params.l_len)
v2l_module = getattr(ctc, 'CTCModule')(in_dim=hyp_params.orig_d_v, out_seq_len=hyp_params.l_len)
return a2l_module, v2l_module
def initiate(hyp_params, train_loader, valid_loader, test_loader):
model = getattr(models, hyp_params.model+'Model')(hyp_params)
if hyp_params.use_cuda:
model = model.cuda()
optimizer = getattr(optim, hyp_params.optim)(model.parameters(), lr=hyp_params.lr)
criterion = getattr(nn, hyp_params.criterion)()
if hyp_params.aligned or hyp_params.model=='MULT':
ctc_criterion = None
ctc_a2l_module, ctc_v2l_module = None, None
ctc_a2l_optimizer, ctc_v2l_optimizer = None, None
else:
from warpctc_pytorch import CTCLoss
ctc_criterion = CTCLoss()
ctc_a2l_module, ctc_v2l_module = get_CTC_module(hyp_params)
if hyp_params.use_cuda:
ctc_a2l_module, ctc_v2l_module = ctc_a2l_module.cuda(), ctc_v2l_module.cuda()
ctc_a2l_optimizer = getattr(optim, hyp_params.optim)(ctc_a2l_module.parameters(), lr=hyp_params.lr)
ctc_v2l_optimizer = getattr(optim, hyp_params.optim)(ctc_v2l_module.parameters(), lr=hyp_params.lr)
scheduler = ReduceLROnPlateau(optimizer, mode='min', patience=hyp_params.when, factor=0.1, verbose=True)
settings = {'model': model,
'optimizer': optimizer,
'criterion': criterion,
'ctc_a2l_module': ctc_a2l_module,
'ctc_v2l_module': ctc_v2l_module,
'ctc_a2l_optimizer': ctc_a2l_optimizer,
'ctc_v2l_optimizer': ctc_v2l_optimizer,
'ctc_criterion': ctc_criterion,
'scheduler': scheduler}
return train_model(settings, hyp_params, train_loader, valid_loader, test_loader)
####################################################################
#
# Training and evaluation scripts
#
####################################################################
def train_model(settings, hyp_params, train_loader, valid_loader, test_loader):
model = settings['model']
optimizer = settings['optimizer']
criterion = settings['criterion']
ctc_a2l_module = settings['ctc_a2l_module']
ctc_v2l_module = settings['ctc_v2l_module']
ctc_a2l_optimizer = settings['ctc_a2l_optimizer']
ctc_v2l_optimizer = settings['ctc_v2l_optimizer']
ctc_criterion = settings['ctc_criterion']
scheduler = settings['scheduler']
def train(model, optimizer, criterion, ctc_a2l_module, ctc_v2l_module, ctc_a2l_optimizer, ctc_v2l_optimizer, ctc_criterion):
epoch_loss = 0
model.train()
num_batches = hyp_params.n_train // hyp_params.batch_size
proc_loss, proc_size = 0, 0
start_time = time.time()
for i_batch, (batch_X, batch_Y, batch_META) in enumerate(train_loader):
sample_ind, text, audio, vision = batch_X
eval_attr = batch_Y.squeeze(-1) # if num of labels is 1
model.zero_grad()
if ctc_criterion is not None:
ctc_a2l_module.zero_grad()
ctc_v2l_module.zero_grad()
if hyp_params.use_cuda:
with torch.cuda.device(0):
text, audio, vision, eval_attr = text.cuda(), audio.cuda(), vision.cuda(), eval_attr.cuda()
if hyp_params.dataset == 'iemocap':
eval_attr = eval_attr.long()
batch_size = text.size(0)
batch_chunk = hyp_params.batch_chunk
######## CTC STARTS ######## Do not worry about this if not working on CTC
if ctc_criterion is not None:
ctc_a2l_net = nn.DataParallel(ctc_a2l_module) if batch_size > 10 else ctc_a2l_module
ctc_v2l_net = nn.DataParallel(ctc_v2l_module) if batch_size > 10 else ctc_v2l_module
audio, a2l_position = ctc_a2l_net(audio) # audio now is the aligned to text
vision, v2l_position = ctc_v2l_net(vision)
## Compute the ctc loss
l_len, a_len, v_len = hyp_params.l_len, hyp_params.a_len, hyp_params.v_len
# Output Labels
l_position = torch.tensor([i+1 for i in range(l_len)]*batch_size).int().cpu()
# Specifying each output length
l_length = torch.tensor([l_len]*batch_size).int().cpu()
# Specifying each input length
a_length = torch.tensor([a_len]*batch_size).int().cpu()
v_length = torch.tensor([v_len]*batch_size).int().cpu()
ctc_a2l_loss = ctc_criterion(a2l_position.transpose(0,1).cpu(), l_position, a_length, l_length)
ctc_v2l_loss = ctc_criterion(v2l_position.transpose(0,1).cpu(), l_position, v_length, l_length)
ctc_loss = ctc_a2l_loss + ctc_v2l_loss
ctc_loss = ctc_loss.cuda() if hyp_params.use_cuda else ctc_loss
else:
ctc_loss = 0
######## CTC ENDS ########
combined_loss = 0
net = nn.DataParallel(model) if batch_size > 10 else model
if batch_chunk > 1:
raw_loss = combined_loss = 0
text_chunks = text.chunk(batch_chunk, dim=0)
audio_chunks = audio.chunk(batch_chunk, dim=0)
vision_chunks = vision.chunk(batch_chunk, dim=0)
eval_attr_chunks = eval_attr.chunk(batch_chunk, dim=0)
for i in range(batch_chunk):
text_i, audio_i, vision_i = text_chunks[i], audio_chunks[i], vision_chunks[i]
eval_attr_i = eval_attr_chunks[i]
preds_i, hiddens_i = net(text_i, audio_i, vision_i)
if hyp_params.dataset == 'iemocap':
preds_i = preds_i.view(-1, 2)
eval_attr_i = eval_attr_i.view(-1)
raw_loss_i = criterion(preds_i, eval_attr_i) / batch_chunk
raw_loss += raw_loss_i
raw_loss_i.backward()
ctc_loss.backward()
combined_loss = raw_loss + ctc_loss
else:
preds, hiddens = net(text, audio, vision)
if hyp_params.dataset == 'iemocap':
preds = preds.view(-1, 2)
eval_attr = eval_attr.view(-1)
raw_loss = criterion(preds, eval_attr)
combined_loss = raw_loss + ctc_loss
combined_loss.backward()
if ctc_criterion is not None:
torch.nn.utils.clip_grad_norm_(ctc_a2l_module.parameters(), hyp_params.clip)
torch.nn.utils.clip_grad_norm_(ctc_v2l_module.parameters(), hyp_params.clip)
ctc_a2l_optimizer.step()
ctc_v2l_optimizer.step()
torch.nn.utils.clip_grad_norm_(model.parameters(), hyp_params.clip)
optimizer.step()
proc_loss += raw_loss.item() * batch_size
proc_size += batch_size
epoch_loss += combined_loss.item() * batch_size
if i_batch % hyp_params.log_interval == 0 and i_batch > 0:
avg_loss = proc_loss / proc_size
elapsed_time = time.time() - start_time
print('Epoch {:2d} | Batch {:3d}/{:3d} | Time/Batch(ms) {:5.2f} | Train Loss {:5.4f}'.
format(epoch, i_batch, num_batches, elapsed_time * 1000 / hyp_params.log_interval, avg_loss))
proc_loss, proc_size = 0, 0
start_time = time.time()
return epoch_loss / hyp_params.n_train
def evaluate(model, ctc_a2l_module, ctc_v2l_module, criterion, test=False):
model.eval()
loader = test_loader if test else valid_loader
total_loss = 0.0
results = []
truths = []
with torch.no_grad():
for i_batch, (batch_X, batch_Y, batch_META) in enumerate(loader):
sample_ind, text, audio, vision = batch_X
eval_attr = batch_Y.squeeze(dim=-1) # if num of labels is 1
if hyp_params.use_cuda:
with torch.cuda.device(0):
text, audio, vision, eval_attr = text.cuda(), audio.cuda(), vision.cuda(), eval_attr.cuda()
if hyp_params.dataset == 'iemocap':
eval_attr = eval_attr.long()
batch_size = text.size(0)
if (ctc_a2l_module is not None) and (ctc_v2l_module is not None):
ctc_a2l_net = nn.DataParallel(ctc_a2l_module) if batch_size > 10 else ctc_a2l_module
ctc_v2l_net = nn.DataParallel(ctc_v2l_module) if batch_size > 10 else ctc_v2l_module
audio, _ = ctc_a2l_net(audio) # audio aligned to text
vision, _ = ctc_v2l_net(vision) # vision aligned to text
net = nn.DataParallel(model) if batch_size > 10 else model
preds, _ = net(text, audio, vision)
if hyp_params.dataset == 'iemocap':
preds = preds.view(-1, 2)
eval_attr = eval_attr.view(-1)
total_loss += criterion(preds, eval_attr).item() * batch_size
# Collect the results into dictionary
results.append(preds)
truths.append(eval_attr)
avg_loss = total_loss / (hyp_params.n_test if test else hyp_params.n_valid)
results = torch.cat(results)
truths = torch.cat(truths)
return avg_loss, results, truths
best_valid = 1e8
for epoch in range(1, hyp_params.num_epochs+1):
start = time.time()
train(model, optimizer, criterion, ctc_a2l_module, ctc_v2l_module, ctc_a2l_optimizer, ctc_v2l_optimizer, ctc_criterion)
val_loss, _, _ = evaluate(model, ctc_a2l_module, ctc_v2l_module, criterion, test=False)
test_loss, _, _ = evaluate(model, ctc_a2l_module, ctc_v2l_module, criterion, test=True)
end = time.time()
duration = end-start
scheduler.step(val_loss) # Decay learning rate by validation loss
print("-"*50)
print('Epoch {:2d} | Time {:5.4f} sec | Valid Loss {:5.4f} | Test Loss {:5.4f}'.format(epoch, duration, val_loss, test_loss))
print("-"*50)
if val_loss < best_valid:
print(f"Saved model at pre_trained_models/{hyp_params.name}.pt!")
save_model(hyp_params, model, name=hyp_params.name)
best_valid = val_loss
model = load_model(hyp_params, name=hyp_params.name)
_, results, truths = evaluate(model, ctc_a2l_module, ctc_v2l_module, criterion, test=True)
if hyp_params.dataset == "mosei_senti":
eval_mosei_senti(results, truths, True)
elif hyp_params.dataset == 'mosi':
eval_mosi(results, truths, True)
elif hyp_params.dataset == 'iemocap':
eval_iemocap(results, truths)
sys.stdout.flush()
input('[Press Any Key to start another run]')
|
from django.db import models
from django.core.exceptions import ValidationError
from lis.specimen.lab_aliquot.models import BaseAliquot
from edc_base.model.models import BaseUuidModel
from .aliquot_condition import AliquotCondition
from .aliquot_type import AliquotType
from .receive import Receive
class Aliquot(BaseAliquot, BaseUuidModel):
"""Stores aliquot information and is the central model in the RAORR relational model."""
receive = models.ForeignKey(Receive, editable=False)
aliquot_type = models.ForeignKey(AliquotType, verbose_name="Aliquot Type", null=True)
aliquot_condition = models.ForeignKey(AliquotCondition, verbose_name="Aliquot Condition", null=True, blank=True)
import_datetime = models.DateTimeField(null=True, editable=False)
objects = models.Manager()
def save(self, *args, **kwargs):
self.subject_identifier = self.receive.registered_subject.subject_identifier
if self.source_aliquot and not self.primary_aliquot:
raise ValidationError('Primary aliquot may not be None')
super(Aliquot, self).save(*args, **kwargs)
class Meta:
app_label = 'lab_clinic_api'
unique_together = (('receive', 'count'), )
ordering = ('receive', 'count')
|
from pathlib import Path
from collections import namedtuple, Counter
def flatten(l):
flat = []
for el in l:
if type(el) is list:
flat.extend(el)
else:
flat.append(el)
return flat
class IncludeFile(Path):
_flavour = Path('.')._flavour
class SourceFile(Path):
_flavour = Path('.')._flavour
class RAATSource(SourceFile):
pass
class RAATInclude(IncludeFile):
pass
class LocalSource(SourceFile):
pass
class LocalInclude(IncludeFile):
pass
class LibraryInclude(IncludeFile):
pass
class ParameterSource(SourceFile):
pass
class ParameterInclude(IncludeFile):
pass
class DeviceSource(SourceFile):
pass
class DeviceInclude(IncludeFile):
pass
class ModuleSource(SourceFile):
pass
class ModuleInclude(IncludeFile):
pass
class SourceFileProvider:
def get_sources(self, target_type):
return [s for s in self.sources if isinstance(s, target_type)]
def get_includes(self, target_type):
return [s for s in self.includes if isinstance(s, target_type)]
class Setting(namedtuple("Setting", ["id", "name", "value"])):
__slots__ = ()
@classmethod
def from_xml(cls, setting_node, count):
dev_id = setting_node.attrib["id"]
name = setting_node.attrib.get("name", "")
if count == 0:
value = setting_node.attrib["value"]
else:
if "values" in setting_node.attrib:
value = setting_node.attrib["values"].split("|")
elif "all_values" in setting_node.attrib:
value = [setting_node.attrib["all_values"]] * count
else:
raise Exception(
"Expected 'values' or 'all_values' attribute for multiple settings (got {}, count {})".format(
setting_node.attrib, count)
)
return cls(dev_id, name, value)
@classmethod
def from_yaml(cls, setting_dict):
dev_id = setting_dict["id"]
name = setting_dict.get("name", "")
value = setting_dict["value"]
return cls(dev_id, name, value)
def update(self, new_value):
return Setting(self.id, self.name, new_value)
def parse(self, parse_function):
return self.update(parse_function(self.value))
def check(self, check_function, error_msg):
if not check_function(self.value):
raise Exception(error_msg)
@staticmethod
def make_group(settings_dict, expected_count):
counts = [len(setting.value) for setting in settings_dict.values()]
if not len(set(counts)) == 1 or counts[0] != expected_count:
raise Exception("Expected all setting lengths to be {}!".format(expected_count))
count = counts[0]
settings_list = []
for i in range(0, count):
new_settings_dict = {}
for setting in settings_dict.values():
new_settings_dict[setting.id] = Setting(setting.id, setting.name, setting.value[i])
settings_list.append(new_settings_dict)
return settings_list
class Devices(namedtuple("Devices", ["single", "grouped"])):
__slots__ = ()
@classmethod
def from_xml_list(cls, device_xml_nodes):
single_devices = []
group_devices = []
for node in device_xml_nodes:
if "count" in node.attrib:
group_devices.append(DeviceGroup.from_xml(node, int(node.attrib["count"])))
else:
single_devices.append(Device.from_xml(node))
return cls(single_devices, group_devices)
class Device(namedtuple("Device", ["name", "type", "settings"])):
__slots__ = ()
@classmethod
def from_xml(cls, device_node):
name = device_node.attrib["name"]
device_type = device_node.attrib["type"]
device_count = int(device_node.attrib.get("count", 0))
settings = [Setting.from_xml(setting_node, device_count) for setting_node in device_node.findall("setting")]
settings_dict = {setting.id: setting for setting in settings}
return cls(name, device_type, settings_dict)
@classmethod
def from_yaml(cls, device_dict):
name = device_dict["name"]
device_type = device_dict["type"]
settings = [Setting.from_yaml(setting) for setting in device_dict["setting"]]
settings_dict = {setting.id: setting for setting in settings}
return cls(name, device_type, settings_dict)
class DeviceGroup(namedtuple("Device", ["name", "type", "settings", "count"])):
@classmethod
def from_xml(cls, device_node, count):
device = Device.from_xml(device_node)
return cls(device.name, device.type, device.settings, count)
class Parameters(namedtuple("Parameters", ["single", "grouped"])):
__slots__ = ()
@classmethod
def from_xml_list(cls, param_xml_nodes):
single_params = []
group_params = []
for node in param_xml_nodes:
if "count" in node.attrib:
group_params.append(ParameterGroup.from_xml(node, int(node.attrib["count"])))
else:
single_params.append(Parameter.from_xml(node))
return cls(single_params, group_params)
class Parameter(namedtuple("Parameter", ["name", "type", "settings"])):
__slots__ = ()
@classmethod
def from_xml(cls, parameter_node):
#name = parameter_node.attrib["name"]
#parameter_type = parameter_node.attrib["type"]
#settings = [Setting.from_xml(setting_node, count) for setting_node in parameter_node.findall("setting")]
#settings_dict = {setting.id: setting for setting in settings}
#return cls(name, parameter_type, settings_dict)
name = parameter_node.attrib["name"]
parameter_type = parameter_node.attrib["type"]
parameter_count = int(parameter_node.attrib.get("count", 0))
settings = [Setting.from_xml(setting_node, parameter_count) for setting_node in parameter_node.findall("setting")]
settings_dict = {setting.id: setting for setting in settings}
return cls(name, parameter_type, settings_dict)
class ParameterGroup(namedtuple("Parameter", ["name", "type", "settings", "count"])):
@classmethod
def from_xml(cls, parameter_node, count):
param = Parameter.from_xml(parameter_node)
return cls(param.name, param.type, param.settings, count)
class LoggingModule(namedtuple("LoggingModule", ["name", "prefix"])):
__slots__ = ()
@classmethod
def from_xml(cls, log_module_node):
name = log_module_node.text
prefix = log_module_node.attrib.get("prefix", name[0:3])
return cls(name, prefix)
def c_name(self):
return "s_%d".format(self.name.lower())
class Module(namedtuple("Module", ["name"])):
__slots__ = ()
@classmethod
def from_xml(cls, module_node):
return cls(module_node.text)
def get_unique_log_modules(nodes):
prefixes = [node.attrib.get("prefix", node.text[0:3]) for node in nodes]
if len(set(prefixes)) < len(prefixes):
counter = Counter()
unique_prefixes = []
for prefix in prefixes:
if (counter[prefix] == 0):
unique_prefixes.append(prefix)
else:
unique_prefixes.append("{}_{}".format(prefix, counter[prefix]+1))
counter[prefix] += 1
else:
unique_prefixes = prefixes
return [LoggingModule(node.text, prefix) for (node, prefix) in zip(nodes, unique_prefixes)]
class Board(namedtuple("Board", [
"type", "subtype", "name",
"devices", "parameters",
"modules", "settings", "info", "raat", "custom_code",
"attrs", "log_modules", "defines", "arduino_libs"])
):
__slots__ = ()
@classmethod
def from_xml(cls, node):
board_node = node.find(".")
name = board_node.attrib["name"]
board_type = board_node.attrib["type"]
board_subtype = board_node.attrib.get("subtype", "")
devices = Devices.from_xml_list(node.find("devices") or [])
parameters = Parameters.from_xml_list(node.find("parameters") or [])
modules = node.find("modules") or []
modules = [Module.from_xml(module_node) for module_node in modules]
settings = node.findall("setting") or []
settings = [Setting.from_xml(setting_node) for setting_node in settings]
settings_dict = {setting.id: setting for setting in settings}
info = board_node.find("info").text
raat = board_node.find("raat")
if raat is None:
raat = {}
custom_code = board_node.find("custom_code")
if custom_code:
custom_code_filenames = [f.text for f in custom_code.findall("file")]
else:
custom_code_filenames = []
log_modules = get_unique_log_modules(node.find("logging") or [])
defines = board_node.find("defines")
if defines:
defines = [d.text for d in defines.findall("define")]
else:
defines = []
arduino_libs = board_node.find("libraries")
if arduino_libs:
arduino_libs = [d.text for d in arduino_libs.findall("library")]
else:
arduino_libs = []
return cls(
board_type, board_subtype, name, devices, parameters, modules, settings_dict, info, raat,
custom_code_filenames, board_node.attrib, log_modules, defines, arduino_libs
)
@classmethod
def from_yaml(cls, board_dict):
board_type = board_dict["board"]["type"]
board_subtype = board_dict["board"].get("subtype", "")
name = board_dict["board"]["name"]
devices = [Device.from_yaml(dev) for dev in board_dict["board"]["devices"]]
if "settings" in board_dict["board"]:
settings = [Setting.from_yaml(setting) for setting in board_dict["board"]["settings"]]
settings_dict = {setting.id: setting for setting in settings}
else:
settings_dict = {}
if "custom_code" in board_dict["board"]:
filenames = board_dict["board"]["custom_code"]
else:
filenames = []
if "defines" in board_dict["board"]:
defines = board_dict["board"]["defines"]
else:
defines = []
if "libraries" in board_dict["board"]:
arduino_libs = board_dict["board"]["libraries"]
else:
arduino_libs = []
info = board_dict["board"].get("info", "")
raat = board_dict["board"].get("raat", {})
log_modules = get_unique_log_modules(board_dict["board"].get("logging", []))
return cls(
board_type, board_subtype, name, devices, settings_dict, info, raat,
filenames, board_dict["board"], log_modules, defines, arduino_libs
)
|
class GenericPipelineTask:
def __init__(self):
pass
def run(self):
pass
class DataPreparationTask:
def __init__(self):
self.name = 'data_preparation'
def run(self):
pass
class FeatureEngineeringTask:
def __init__(self):
self.name = 'feature_engineering'
def run(self):
pass
class ModelTrainingTask:
def __init__(self):
self.name = 'model_training'
def run(self):
pass
class ReportGenerationTask:
def __init__(self):
self.name = 'report_generation'
def run(self):
pass
|
import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voices', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishme():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("good morning boss")
elif hour>=12 and hour<18:
speak("good afternoon boss")
else:
print("good evening boss")
speak(" i Am jarvis ! how may i help you")
def takecommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening................")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognising...")
query = r.recognize_google(audio,language='en-in')
print(f"user said: {query}\n")
except Exception as e:
print("say this again.....")
return "none"
return query
if __name__ =="__main__":
wishme()
while True:
query = takecommand().lower()
if 'wikipedia' in query:
speak("Searching wikipedia....")
query=query.replace("wikipedia","")
result = wikipedia.summary(query,sentences=2)
speak("according to wikipeida")
print(result)
speak(result)
elif "you" in query:
speak("i am good boss! it is depend on you how to take care of me. you are a god for me")
break
elif "exam" in query:
speak("boss today is your design and analysis of algorithm exam.")
break
elif "tommorow" in query:
speak("forget about it boss! just enjoy your day")
break
elif "jarvis open youtube" in query:
speak("opening youtube sir!")
webbrowser.open("youtube.com")
elif "hey jarvis what is the time" in query:
strtime=datetime.datetime.now().strftime("%H:%M:%S")
speak(f"sir the time is {strtime}")
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 08:59:42 2019
@author: SNR
"""
from selenium import webdriver
from BeautifulSoup import BeautifulSoup
import pandas as pd
driver = webdriver.chrome('N:\GitHub\PythonCorseJohn')
DateAndTime = []
News = []
TypeOfNews = []
driver.get('https://www.bseindia.com/corporates/ann.html')
content = driver.page_sourcre
soup = BeautifulSoup(content)
for a in soup.findAll('a', href=True, attrs={"class":"ng-binding"}):
date =a.find('b', attrs={'class':'ng-binding'})
news=a.find('a', attrs={'class':'ng-binding'})
typeofnews =a.find('td', attrs={'class':'tdcolumngrey ng-binding'})
a = DateAndTime.append(date.text)
b = News.append(news.text)
c = TypeOfNews.append(typeofnews)
print(a)
print(b)
print(c)
|
# encoding= utf-8
# @Time : 2020/5/11 9:25
# @Author : Yao
# @File : vector.py
# @Software: PyCharm
from math import hypot
class Vector:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
#字符串表达式
def __repr__(self):
return 'Vector(%r, %r)' %(self.x,self.y)
def __abs__(self):
#hypot平方根函数
return hypot(self.x, self.y)
def __bool__(self):
return bool(abs(self))
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return Vector(x, y)
def __mul__(self, scalar):
return Vector(self.x * scalar, self.y * scalar) |
from flask import Flask, flash
from config import config
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_pagedown import PageDown
from flask_moment import Moment
from flask_bootstrap import Bootstrap
from faker import Faker
db = SQLAlchemy()
login_manager = LoginManager()
pagedown = PageDown()
moment = Moment()
bootstrap = Bootstrap()
fake = Faker()
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
moment.init_app(app)
bootstrap.init_app(app)
app.app_context().push()
db.create_all()
from .main import main as mainBlueprint
app.register_blueprint(mainBlueprint)
return app |
# Generated by Django 2.0.5 on 2018-09-03 12:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_jalali.db.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('AmadoAccounting', '0034_remove_salary_status'),
]
operations = [
migrations.RemoveField(
model_name='salary',
name='add_date',
),
migrations.RemoveField(
model_name='salary',
name='add_user',
),
migrations.RemoveField(
model_name='salary',
name='confirm_date',
),
migrations.RemoveField(
model_name='salary',
name='confirm_user',
),
migrations.AddField(
model_name='salarydetail',
name='add_date',
field=django_jalali.db.models.jDateTimeField(default='1397-03-28 20:38:00', verbose_name='تاریخ ثبت'),
),
migrations.AddField(
model_name='salarydetail',
name='add_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='salary_register_user', to=settings.AUTH_USER_MODEL, verbose_name='ثبت کننده'),
),
migrations.AddField(
model_name='salarydetail',
name='confirm_date',
field=django_jalali.db.models.jDateTimeField(default='1397-03-28 20:38:00', verbose_name='تاریخ تایید'),
),
migrations.AddField(
model_name='salarydetail',
name='confirm_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='salary_confirm_user', to=settings.AUTH_USER_MODEL, verbose_name='تایید کننده'),
),
]
|
def PatternToNumber(pattern):
if pattern == "" :
return 0
lastSymbol = pattern[-1]
restOfPattern = pattern[0:-1]
return (4*PatternToNumber(restOfPattern)+SymbolToNumber(lastSymbol))
def SymbolToNumber(symbol):
if symbol == 'A':
return 0
elif symbol == 'C':
return 1
elif symbol == 'G':
return 2
elif symbol == 'T':
return 3
def ComputingFrequencies(text, k):
frequencyArray = [0]*(4**k)
for i in range (len(text)-(k-1)):
frequencyArray [PatternToNumber(text[i:i+k])] +=1
return frequencyArray
|
__author__ = 'mactep'
import argparse
from extra.share import igblastp_tools
def printRegionLabels( dom ):
for i in [1,2,3]:
FR_S = "FR"
CDR_S = "CDR"
if len(dom.getFR(i)) > 0:
FR_S = FR_S[:(len(dom.getFR(i))-1)]
if len(dom.getCDR(i)) > 0:
CDR_S = CDR_S[:(len(dom.getCDR(i))-1)]
space = len(dom.getFR(i)) - (len(FR_S) + 1)
print "{0}{1}".format("{0}{1}".format(FR_S, i), " "*space),
if len(dom.getCDR(i)) > 0:
space = len(dom.getCDR(i)) - (len(CDR_S) + 1)
print "{0}{1}".format("{0}{1}".format(CDR_S, i), " "*space),
else:
print "",
def printRegionSeqs( dom ):
for i in [1,2,3]:
print "{0} {1}".format(dom.getFR(i), dom.getCDR(i)),
def printDomainRegions( dom, useMargin = False ):
margin = ""
if useMargin:
margin = " "*4
print "{0}{1}:".format(margin, dom.name),
print "{0}".format(dom.seq)
print "{0} ".format(margin),
printRegionLabels(dom)
print ""
print "{0} ".format(margin),
printRegionSeqs(dom)
print ""
print "{0} ".format(margin),
printRegionLabels(dom)
print ""
def printIgRegions( ig ):
print "{0}:".format(ig.name)
if ig.getVL() != None:
printDomainRegions(ig.getVL().getDomain(), True)
if ig.getVH() != None:
printDomainRegions(ig.getVH().getDomain(), True)
def printDomainsRegions( domDict ):
for dom in domDict:
printDomainRegions(domDict[dom].getDomain())
print ""
def printIgsRegions( igDict ):
for ig in igDict:
printIgRegions(igDict[ig])
print ""
def main():
parser = argparse.ArgumentParser(description="Region Finding Tool")
parser.add_argument('source', action="store", help="source FASTA file")
parser.add_argument('-d', action="store_const", metavar="domain", dest="domain", const=igblastp_tools.DOMAIN_KABAT,
default=igblastp_tools.DOMAIN_IMGT, help="use KABAT domain system (default: IMGT)")
parser.add_argument('-s', metavar="specie", dest="specie", default=igblastp_tools.GERMLINE_HUMAN,
action="store", help="specie that is used to align (default: human)")
args = parser.parse_args()
res = igblastp_tools.runIgBlastp(args.source, args.specie, args.domain)
igDict,domDict = igblastp_tools.parseIgBlastpOut(res, args.source)
printIgsRegions(igDict)
printDomainsRegions(domDict)
if __name__ == "__main__":
main() |
import sys
import os
import requests
import json
def results(ipaddr):
r = requests.get(
f'http://ip-api.com/json/{str(ipaddr)}?fields=status,country,countryCode,reverse,query'
)
return json.dumps(r.json(), indent=4)
def main(argv):
ipaddr = "ipaddr.txt"
if os.path.exists(ipaddr) and os.path.getsize(ipaddr) > 0:
with open(ipaddr, 'r') as f:
w_str = f.read().splitlines()
else:
w_str = list(sys.argv[1:])
for i in w_str:
print(results(i))
if __name__ == "__main__":
main(sys.argv)
|
#!/usr/bin/env python3
# CAUTION this script doesn't check for Remote File Inclusion (RFI)
# DISCLAIMER
# ONLY test this in a server you have permission to do it!!!!!!!
from ArgumentHandler import ArgumentHandler
from termcolor import colored
import PayloadManager
import sys
from pyfiglet import Figlet
from proxies_list import clean_proxies
from Crawler import webcrawler
def main():
try:
ascii_art = Figlet(font='big')
print(colored(ascii_art.renderText('LFITester'), 'yellow'))
arghandler = ArgumentHandler()
if not arghandler.url:
arghandler.parser.print_help(sys.stderr)
exit(1)
if arghandler.creds is not None:
check = True
else:
check = False
if arghandler.enable_proxies:
print(colored("Detected Enabled Proxies. Setting up proxy list...",'green'))
clean_proxies()
print(colored("This script doesn't check for Remote File Inclusion (RFI)", 'blue'))
print(colored("If it doesn't show any results that means it didn't find anything!!!", 'blue'))
if type(arghandler.url) is not list:
if arghandler.crawler:
test_urls = webcrawler(arghandler.url, check, arghandler.creds)
for url in test_urls:
print(colored(f"Testing: {url}\n\n", 'green'))
PayloadManager.Payload(url, arghandler.outfile, arghandler.creds, verbosity=arghandler.verbosity)
else:
print(colored(f"Testing: {arghandler.url}\n\n", 'green'))
PayloadManager.Payload(arghandler.url, arghandler.outfile, arghandler.creds, verbosity=arghandler.verbosity)
else:
if arghandler.crawler:
for url in arghandler.url:
test_urls = webcrawler(url, check, arghandler.creds)
for endpoint in test_urls:
print(colored(f"Testing: {endpoint}\n\n", 'green'))
PayloadManager.Payload(endpoint, arghandler.outfile, arghandler.creds, verbosity = arghandler.verbosity)
else:
for url in arghandler.url:
print(colored(f"Testing: {url}\n\n", 'green'))
PayloadManager.Payload(url, arghandler.outfile, arghandler.creds, verbosity = arghandler.verbosity)
except KeyboardInterrupt:
print('\nGracefully Exiting...\n')
if __name__ == '__main__':
main()
|
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
import logging
import torch
import os
import sys
try:
import distiller
except ImportError:
module_path = os.path.abspath(os.path.join('..'))
sys.path.append(module_path)
import distiller
import common
import pytest
from models import create_model
from apputils import save_checkpoint, load_checkpoint
# Logging configuration
logging.basicConfig(level=logging.INFO)
fh = logging.FileHandler('test.log')
logger = logging.getLogger()
logger.addHandler(fh)
NetConfig = namedtuple("test_config", "arch dataset conv1_name conv2_name bn_name")
#
# Model configurations
#
def simplenet():
return NetConfig(arch="simplenet_cifar", dataset="cifar10",
conv1_name="conv1", conv2_name="conv2",
bn_name=None)
def resnet20_cifar():
return NetConfig(arch="resnet20_cifar", dataset="cifar10",
conv1_name="layer1.0.conv1", conv2_name="layer1.0.conv2",
bn_name="layer1.0.bn1")
def test_ranked_filter_pruning():
ranked_filter_pruning(resnet20_cifar(), ratio_to_prune=0.1)
ranked_filter_pruning(resnet20_cifar(), ratio_to_prune=0.5)
ranked_filter_pruning(simplenet(), ratio_to_prune=0.5)
def test_prune_all_filters():
"""Pruning all of the filteres in a weights tensor of a Convolution
is illegal and should raise an exception.
"""
with pytest.raises(ValueError):
ranked_filter_pruning(resnet20_cifar(), ratio_to_prune=1.0)
def ranked_filter_pruning(config, ratio_to_prune):
"""Test L1 ranking and pruning of filters.
First we rank and prune the filters of a Convolutional layer using
a L1RankedStructureParameterPruner. Then we physically remove the
filters from the model (via "thining" process).
"""
model, zeros_mask_dict = common.setup_test(config.arch, config.dataset)
# Test that we can access the weights tensor of the first convolution in layer 1
conv1_p = distiller.model_find_param(model, config.conv1_name + ".weight")
assert conv1_p is not None
num_filters = conv1_p.size(0)
# Test that there are no zero-filters
assert distiller.sparsity_3D(conv1_p) == 0.0
# Create a filter-ranking pruner
reg_regims = {config.conv1_name + ".weight": [ratio_to_prune, "3D"]}
pruner = distiller.pruning.L1RankedStructureParameterPruner("filter_pruner", reg_regims)
pruner.set_param_mask(conv1_p, config.conv1_name + ".weight", zeros_mask_dict, meta=None)
conv1 = common.find_module_by_name(model, config.conv1_name)
assert conv1 is not None
# Test that the mask has the correct fraction of filters pruned.
# We asked for 10%, but there are only 16 filters, so we have to settle for 1/16 filters
expected_cnt_removed_filters = int(ratio_to_prune * conv1.out_channels)
expected_pruning = expected_cnt_removed_filters / conv1.out_channels
masker = zeros_mask_dict[config.conv1_name + ".weight"]
assert masker is not None
assert distiller.sparsity_3D(masker.mask) == expected_pruning
# Use the mask to prune
assert distiller.sparsity_3D(conv1_p) == 0
masker.apply_mask(conv1_p)
assert distiller.sparsity_3D(conv1_p) == expected_pruning
# Remove filters
conv2 = common.find_module_by_name(model, config.conv2_name)
assert conv2 is not None
assert conv1.out_channels == num_filters
assert conv2.in_channels == num_filters
# Test thinning
distiller.remove_filters(model, zeros_mask_dict, config.arch, config.dataset)
assert conv1.out_channels == num_filters - expected_cnt_removed_filters
assert conv2.in_channels == num_filters - expected_cnt_removed_filters
def test_arbitrary_channel_pruning():
arbitrary_channel_pruning(resnet20_cifar(), channels_to_remove=[0, 2])
arbitrary_channel_pruning(simplenet(), channels_to_remove=[0, 2])
def test_prune_all_channels():
"""Pruning all of the channels in a weights tensor of a Convolution
is illegal and should raise an exception.
"""
with pytest.raises(ValueError):
arbitrary_channel_pruning(resnet20_cifar(),
channels_to_remove=[ch for ch in range(16)])
def test_channel_pruning_conv_bias():
arbitrary_channel_pruning(simplenet(), channels_to_remove=[0, 1])
def arbitrary_channel_pruning(config, channels_to_remove):
"""Test removal of arbitrary channels.
The test receives a specification of channels to remove.
Based on this specification, the channels are pruned and then physically
removed from the model (via a "thinning" process).
"""
model, zeros_mask_dict = common.setup_test(config.arch, config.dataset)
conv2 = common.find_module_by_name(model, config.conv2_name)
assert conv2 is not None
# Test that we can access the weights tensor of the first convolution in layer 1
conv2_p = distiller.model_find_param(model, config.conv2_name + ".weight")
assert conv2_p is not None
assert conv2_p.dim() == 4
num_filters = conv2_p.size(0)
num_channels = conv2_p.size(1)
kernel_height = conv2_p.size(2)
kernel_width = conv2_p.size(3)
cnt_nnz_channels = num_channels - len(channels_to_remove)
# Let's build our 4D mask.
# We start with a 1D mask of channels, with all but our specified channels set to one
channels = torch.ones(num_channels)
for ch in channels_to_remove:
channels[ch] = 0
# Now let's expand back up to a 4D mask
mask = channels.expand(num_filters, num_channels)
mask.unsqueeze_(-1)
mask.unsqueeze_(-1)
mask = mask.expand(num_filters, num_channels, kernel_height, kernel_width).contiguous()
assert mask.shape == conv2_p.shape
assert distiller.density_ch(mask) == (conv2.in_channels - len(channels_to_remove)) / conv2.in_channels
# Cool, so now we have a mask for pruning our channels.
# Use the mask to prune
zeros_mask_dict[config.conv2_name + ".weight"].mask = mask
zeros_mask_dict[config.conv2_name + ".weight"].apply_mask(conv2_p)
all_channels = set([ch for ch in range(num_channels)])
nnz_channels = set(distiller.find_nonzero_channels_list(conv2_p, config.conv2_name + ".weight"))
channels_removed = all_channels - nnz_channels
logger.info("Channels removed {}".format(channels_removed))
# Now, let's do the actual network thinning
distiller.remove_channels(model, zeros_mask_dict, config.arch, config.dataset)
conv1 = common.find_module_by_name(model, config.conv1_name)
logger.info(conv1)
logger.info(conv2)
assert conv1.out_channels == cnt_nnz_channels
assert conv2.in_channels == cnt_nnz_channels
assert conv1.weight.size(0) == cnt_nnz_channels
assert conv2.weight.size(1) == cnt_nnz_channels
if config.bn_name is not None:
bn1 = common.find_module_by_name(model, config.bn_name)
assert bn1.running_var.size(0) == cnt_nnz_channels
assert bn1.running_mean.size(0) == cnt_nnz_channels
assert bn1.num_features == cnt_nnz_channels
assert bn1.bias.size(0) == cnt_nnz_channels
assert bn1.weight.size(0) == cnt_nnz_channels
# Let's test saving and loading a thinned model.
# We save 3 times, and load twice, to make sure to cover some corner cases:
# - Make sure that after loading, the model still has hold of the thinning recipes
# - Make sure that after a 2nd load, there no problem loading (in this case, the
# - tensors are already thin, so this is a new flow)
# (1)
save_checkpoint(epoch=0, arch=config.arch, model=model, optimizer=None)
model_2 = create_model(False, config.dataset, config.arch, parallel=False)
dummy_input = torch.randn(1, 3, 32, 32)
model(dummy_input)
model_2(dummy_input)
conv2 = common.find_module_by_name(model_2, config.conv2_name)
assert conv2 is not None
with pytest.raises(KeyError):
model_2, compression_scheduler, start_epoch = load_checkpoint(model_2, 'checkpoint.pth.tar')
compression_scheduler = distiller.CompressionScheduler(model)
hasattr(model, 'thinning_recipes')
# (2)
save_checkpoint(epoch=0, arch=config.arch, model=model, optimizer=None, scheduler=compression_scheduler)
model_2, compression_scheduler, start_epoch = load_checkpoint(model_2, 'checkpoint.pth.tar')
assert hasattr(model_2, 'thinning_recipes')
logger.info("test_arbitrary_channel_pruning - Done")
# (3)
save_checkpoint(epoch=0, arch=config.arch, model=model_2, optimizer=None, scheduler=compression_scheduler)
model_2, compression_scheduler, start_epoch = load_checkpoint(model_2, 'checkpoint.pth.tar')
assert hasattr(model_2, 'thinning_recipes')
logger.info("test_arbitrary_channel_pruning - Done 2")
if __name__ == '__main__':
test_ranked_filter_pruning()
test_arbitrary_channel_pruning()
test_prune_all_channels()
|
import logging
import numpy as np
import scipy.stats
from anndata import AnnData
_default_filters = (
'filter_quality',
'filter_reads',
'filter_copy_state_diff',
'filter_is_s_phase',
)
def calculate_filter_metrics(
adata: AnnData,
quality_score_threshold=0.75,
read_count_threshold=500000,
copy_state_diff_threshold=1.,
inplace = False,
) -> AnnData:
""" Calculate additional filtering metrics to be used by other filtering methods.
Parameters
----------
adata : AnnData
copy number data on which to calculate filter metrics
quality_score_threshold : float, optional
The minimum quality to set to keep, by default 0.75
read_count_threshold : int, optional
The minimum total mapped reads from hmmcopy to set for keeping, by default 500000
copy_state_diff_threshold : [type], optional
Minimum copy-state difference threshold to set to keep, by default 1.
inplace : bool, optional
Whether to modify passed in AnnData, by default False
Returns
-------
AnnData
AnnData with modified obs if not inplace, otherise, None
Note
----
The following properties are changed:
- AnnData.obs.filter_quality
- AnnData.obs.filter_reads
- AnnData.obs.filter_copy_state_diff
If is_s_phase is a property of AnnData
AnnData.obs.filter_is_s_phase
AnnData.obs.copy_state_diff
AnnData.obs.copy_state_diff_mean
"""
if not inplace:
ndad = adata.copy()
return calculate_filter_metrics(
ndad,
quality_score_threshold,
read_count_threshold,
copy_state_diff_threshold,
inplace = True,
)
# Filter Quality and Filter Reads
if 'quality' in adata.obs.columns:
adata.obs['filter_quality'] = (adata.obs['quality'] > quality_score_threshold)
else:
logging.warning("quality is not in AnnData.obs. Skipping filter_quality")
if 'total_mapped_reads_hmmcopy' in adata.obs.columns:
adata.obs['filter_reads'] = (adata.obs['total_mapped_reads_hmmcopy'] > read_count_threshold)
else:
logging.warning("total_mapped_reads_hmmcopy is not in AnnData.obs. Skipping total_mapped_reads_hmmcopy")
# Copy State Difference Filter
adata.obsm['copy_state_diff'] = np.absolute(adata.layers['copy'] - adata.layers['state'])
adata.obsm['copy_state_diff_mean'] = np.nanmean(adata.obsm['copy_state_diff'], axis=1)
adata.obs['filter_copy_state_diff'] = (adata.obsm['copy_state_diff_mean'] < copy_state_diff_threshold)
# Filter s phase column
if 'is_s_phase' in adata.obs.columns:
adata.obs['filter_is_s_phase'] = ~(adata.obs['is_s_phase'].fillna(False))
else:
logging.warning("No is_s_phase in AnnData.obs. Skipping filter_is_s_phase")
return adata
def filter_cells(
adata: AnnData,
filters = _default_filters,
inplace = False,
) -> AnnData:
"""
Filter poor quality cells based on the filters provided.
Parameters
-------
adata : AnnData
AnnData to preform operation with
filters : list, optional
Filters to apply. Keeps cells where filters are true, by default _default_filters
inplace
Whether to modify passed in AnnData. If False, returns new AnnData.
Returns
-------
AnnData
filtered copy number data
"""
if not inplace:
adata = adata.copy()
return filter_cells(
adata,
filters,
inplace = True,
)
# Ensure cnfilter.calculate_filter_metrics has been called
for filter_option in filters:
if filter_option not in adata.obs.columns:
logging.warning(
f"WARNING: {filter_option} is not found! "
"Skipping. Are you sure `scgenome.pp.calculate_filter_metrics` has been called?"
)
continue
adata = adata[adata.obs[filter_option]]
return adata
|
from rest_framework import pagination
from rest_framework.views import Response
class CustomPageNumberPagination(pagination.PageNumberPagination):
page_size = 15
def get_paginated_response(self, data):
return Response({
'links': {
'next': self.get_next_link(),
'previous': self.get_previous_link()
},
'count': self.page.paginator.count,
'results': data
})
class CustomLimitOffsetPagination(pagination.LimitOffsetPagination):
default_limit = 10
max_limit = 15 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for sql instances using v1beta3 API."""
def GetCertRefFromName(
sql_client, sql_messages, resources, instance_ref, common_name):
"""Get a cert reference for a particular instance, given its common name.
Args:
sql_client: apitools.BaseApiClient, A working client for the sql version to
be used.
sql_messages: module, The module that defines the messages for the sql
version to be used.
resources: resources.Registry, The registry that can create resource refs
for the sql version to be used.
instance_ref: resources.Resource, The instance whos ssl cert is being
fetched.
common_name: str, The common name of the ssl cert to be fetched.
Returns:
resources.Resource, A ref for the ssl cert being fetched. Or None if it
could not be found.
"""
cert = GetCertFromName(sql_client, sql_messages, instance_ref, common_name)
if not cert:
return None
return resources.Create(
collection='sql.sslCerts',
project=instance_ref.project,
instance=instance_ref.instance,
sha1Fingerprint=cert.sha1Fingerprint)
def GetCertFromName(
sql_client, sql_messages, instance_ref, common_name):
"""Get a cert for a particular instance, given its common name.
In versions of the SQL API up to at least v1beta3, the last parameter of the
URL is the sha1fingerprint, which is not something writeable or readable by
humans. Instead, the CLI will ask for the common name. To allow this, we first
query all the ssl certs for the instance, and iterate through them to find the
one with the correct common name.
Args:
sql_client: apitools.BaseApiClient, A working client for the sql version to
be used.
sql_messages: module, The module that defines the messages for the sql
version to be used.
instance_ref: resources.Resource, The instance whos ssl cert is being
fetched.
common_name: str, The common name of the ssl cert to be fetched.
Returns:
resources.Resource, A ref for the ssl cert being fetched. Or None if it
could not be found.
"""
certs = sql_client.sslCerts.List(
sql_messages.SqlSslCertsListRequest(
project=instance_ref.project,
instance=instance_ref.instance))
for cert in certs.items:
if cert.commonName == common_name:
return cert
return None
|
"""
Given an array of integers a and an integer sum, find all of the unique combinations in a that add up to sum.
The same number from a can be used an unlimited number of times in a combination.
Elements in a combination (a1 a2 … ak) must be sorted in non-descending order, while the combinations themselves must be sorted in ascending order.
If there are no possible combinations that add up to sum, the output should be the string "Empty".
Example
For a = [2, 3, 5, 9] and sum = 9, the output should be
combinationSum(a, sum) = "(2 2 2 3)(2 2 5)(3 3 3)(9)".
Input/Output
[execution time limit] 4 seconds (py3)
[input] array.integer a
An array of positive integers.
Guaranteed constraints:
1 ≤ a.length ≤ 12,
1 ≤ a[i] ≤ 9.
[input] integer sum
Guaranteed constraints:
1 ≤ sum ≤ 30.
[output] string
All possible combinations that add up to a given sum, or "Empty" if there are no possible combinations.
"""
def combinationSum(a, target):
# The conversion from list to set to list creates a list of unique elements.
sortedA = sorted(list(set(a)))
combinations = []
def check(nums, partial):
pSum = sum(partial)
for idx, num in enumerate(nums):
if num + pSum < target:
check(nums[idx:], partial + [num])
elif num + pSum == target:
combinations.append(partial + [num])
check(sortedA, [])
if combinations:
return ("(" + ")(".join(" ".join(str(el) for el in combo) for combo in combinations) + ")")
else:
return "Empty"
|
#!/usr/bin/env python
import argparse
import os
import os.path
import shutil
import re
def ParseArgs() :
parser = argparse.ArgumentParser(description='Delete folders based on a regular expression.')
parser.add_argument('-q', '--quiet', action="store_true", help="Delete folders without confirmation.")
parser.add_argument('regex', type=str, help="Regular expression for the folders to delete.")
args = parser.parse_args()
return args
def Run() :
args = ParseArgs()
regex = args.regex
if not str.startswith(regex, '^') :
regex = '^' + regex
if not str.endswith(regex, '$') :
regex = regex + '$'
current_directory = os.getcwd()
dir_list = [dir for dir in os.listdir(current_directory) if os.path.isdir(dir)]
dirs_to_remove = [dir for dir in dir_list if re.match(regex, dir, re.IGNORECASE)]
execute_action = True
if not args.quiet :
print "You are going to delete these folders :"
for dir in dirs_to_remove :
print dir
accepted = raw_input('Do you accept (y/N): ')
if accepted != 'y' :
execute_action = False
if execute_action :
for dir in dirs_to_remove :
shutil.rmtree(dir)
if __name__ == "__main__" :
Run() |
import numpy as np
import csv
import keras
import random
import utm
import math
import keras.optimizers as op
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation, Reshape
from keras.layers import Conv2D, MaxPooling2D, LSTM
from keras.optimizers import SGD, Adam
from keras.utils import plot_model
from keras import losses
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn import preprocessing
#读训练集
directory = '../../predict/'
train_file = 'train_2g.csv'
test_file = "test_2g.csv"
sta_file = "gongcan.csv"
stations = []
#参数
classes_no = 3600
wid = 60
hid = 60
rate = 64
feature_row = 6
feature_col = 5
feature_no = 32
steps = 6
#读取基站信息
with open(directory+sta_file) as f:
reader = csv.reader(f)
for row in reader:
if(reader.line_num != 1):
sta = [0,0,0,0]
for i in range(4):
sta[i] = float(row[i])
stations.append(sta)
sta_num = len(stations)
#find station
def findSta(RID,CID):
for i in range(sta_num):
if(stations[i][0] == RID):
if(stations[i][1] == CID):
return i
return -1
#读取原始数据
ismis = []
with open(directory+train_file) as f:
reader = csv.reader(f)
for row in reader:
if(reader.line_num > 1):
ismis.append(row)
ismis = np.array(ismis,dtype="float")
#随机划分训练集和验证集
def shuffle_train_test(x, y, size):
random.seed(1)
random_list = random.sample(range(size), k=int(0.1*size))
X_Train = []
Y_Train = []
X_Test = []
Y_Test = []
for i in range(size):
if i in random_list:
X_Test.append(x[i])
Y_Test.append(y[i])
else:
X_Train.append(x[i])
Y_Train.append(y[i])
return np.array(X_Train), np.array(Y_Train), np.array(X_Test), np.array(Y_Test)
#处理缺省值
def removeNan(ismi_set):
for i in range(len(ismi_set)):
for j in range(len(ismi_set[i])):
if(ismi_set[i][j] == -999 or ismi_set[i][j] == -1):
ismi_set[i][j] = ismi_set[i-1][j]
return ismi_set
#轨迹划分
def sliptSet(array,steps):
array = np.array(array)
result = []
count = 0
one_set = []
for feature in array:
one_set.append(feature)
count += 1
if count == steps:
result.append(one_set)
one_set = []
count = 0
if count != 0:
makeup = []
for j in range(steps - count):
makeup.append(result[-1][count+j])
for one in one_set:
makeup.append(one)
result.append(makeup)
return result
#计算鸽子id
def calId(array):
len1 = (330420 - 328770)/wid
len2 = (3463487 - 3462224)/hid
to1 = array[0] - 328770
to2 = array[1] - 3462224
id1 = int(to1/len1)
id2 = int(to2/len2)
if id1 >= wid:
id1 = wid-1
if id2 >= hid:
id2 = hid-1
return int(id1+id2*wid)
#计算鸽子中心
def calCenter(no):
len1 = (330420 - 328770)/wid
len2 = (3463487 - 3462224)/hid
id1 = int(no%wid)
id2 = int(no/hid)
to1 = id1*len1+len1/2
to2 = id2*len2+len2/2
return [328770+to1,3462224+to2]
def trainModel(ismi_set):
ismi_set = removeNan(ismi_set)
#获得原生特征矩阵
all_feature = []
all_y = []
for row in ismi_set:
feature = []
for i in range(feature_row):
sample = np.zeros((feature_col),dtype=float)
RID = float(row[4+i*5])
CID = float(row[5+i*5])
no = findSta(RID,CID)
sta = stations[no]
sample[0] = sta[2]
sample[1] = sta[3]
for j in range(3):
sample[2+j] = float(row[6+i*5+j])
feature.append(sample)
feature = np.array(feature)
all_feature.append(feature)
all_y.append([float(row[34]),float(row[35])])
all_feature = np.array(all_feature)
all_y = np.array(all_y)
#归一化
scalerX = preprocessing.StandardScaler()
total = len(all_feature)
all_feature = all_feature.reshape(total*feature_row,feature_col)
all_feature = scalerX.fit_transform(all_feature)
all_feature = all_feature.reshape(total,feature_row,feature_col,1)
#CNN多分类模型
modelCNN = keras.models.load_model("./b_1.CNN")
all_prob = modelCNN.predict(all_feature)
#轨迹个数
track_first = int(ismi_set[0][0])
track_last = int(ismi_set[-1][0])
track_no = track_last - track_first + 1
test_no = 1
train_no = track_no - test_no
#每个轨迹的点数量和其开始位置
every_track_no = np.zeros(track_no,dtype=int)
track_start = np.zeros(track_no,dtype=int)
start = 0
for one in ismi_set:
every_track_no[int(one[0])-track_first] += 1
for i in range(track_no):
track_start[i] = start
start += every_track_no[i]
#根据轨迹划分
feature_set = []
y_set = []
for i in range(track_no):
track_feature = []
track_y = []
#每个轨迹id分为一组
for j in range(every_track_no[i]):
index = j + track_start[i]
row = all_prob[index]
track_feature.append(row)
track_y.append(all_y[index])
#切割
split_track_feature = sliptSet(track_feature,steps)
split_track_y = sliptSet(track_y,steps)
for j in range(len(split_track_feature)):
feature_set.append(split_track_feature[j])
y_set.append(split_track_y[j])
feature_set = np.array(feature_set)
y_set = np.array(y_set)
#划分训练集和测试集
x_train, y_train, x_test, y_test = shuffle_train_test(feature_set, y_set,round(len(all_feature)/steps))
#转换成classes
y_classes = []
for track in y_train:
track_classes = []
for point in track:
classes = np.zeros(classes_no,dtype=int)
u = utm.from_latlon(point[1],point[0])
p_id = calId([u[0],u[1]])
classes[p_id] = 1
track_classes.append(classes)
y_classes.append(track_classes)
y_classes = np.array(y_classes)
print(y_classes.shape)
#训练模型
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-8, amsgrad=False)
model = Sequential()
model.add(LSTM(64*steps, input_shape = (steps,x_train.shape[2])))
print(model.get_layer(index=0).output_shape)
model.add(Reshape((steps,64)))
print(model.get_layer(index=1).output_shape)
model.add(Dense(output_dim=classes_no, activation="softmax"))
model.compile(optimizer=adam,
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train,y_classes,epochs=150, batch_size=64)
model.save('./e.LSTM')
trainModel(ismis)
|
from os.path import join
import redis
from catcher.core.runner import Runner
from catcher.utils.misc import try_get_object
from test.abs_test_class import TestClass
class RedisTest(TestClass):
def __init__(self, method_name):
super().__init__('redis', method_name)
def test_set(self):
self.populate_file('main.yaml', '''---
steps:
- redis:
request:
set:
foo: 11
''')
runner = Runner(self.test_dir, join(self.test_dir, 'main.yaml'), None)
self.assertTrue(runner.run_tests())
r = redis.StrictRedis()
self.assertEqual(b'11', r.get('foo'))
def test_set_complex(self):
self.populate_file('main.yaml', '''---
variables:
complex:
a: 1
b: 'c'
d: [1,2,4]
steps:
- redis:
request:
set:
key: '{{ complex }}'
''')
runner = Runner(self.test_dir, join(self.test_dir, 'main.yaml'), None)
self.assertTrue(runner.run_tests())
r = redis.StrictRedis()
self.assertEqual({'a': 1, 'b': 'c', 'd': [1, 2, 4]}, try_get_object(r.get('key').decode()))
def test_get_number(self):
r = redis.StrictRedis()
r.set('key', 17)
self.populate_file('main.yaml', '''---
steps:
- redis:
request:
get: key
register: {var: '{{ OUTPUT }}'}
- check:
equals: {the: '{{ var }}', is: 17}
''')
runner = Runner(self.test_dir, join(self.test_dir, 'main.yaml'), None)
self.assertTrue(runner.run_tests())
def test_set_get(self):
self.populate_file('main.yaml', '''---
variables:
complex:
a: 1
b: 'c'
d: [1,2,4]
steps:
- redis:
request:
set:
key: '{{ complex }}'
- redis:
request:
get: 'key'
register: {var: '{{ OUTPUT }}'}
- check:
equals: {the: '{{ var }}', is: '{{ complex }}'}
''')
runner = Runner(self.test_dir, join(self.test_dir, 'main.yaml'), None)
self.assertTrue(runner.run_tests())
def test_incr_decr_delete(self):
self.populate_file('main.yaml', '''---
steps:
- redis:
actions:
- request:
set:
foo: 11
- request:
decr: 'foo'
- request:
get: 'foo'
register: {var: '{{ OUTPUT }}'}
- check:
equals: {the: '{{ var }}', is: 10}
- redis:
actions:
- request:
incrby:
foo: 5
- request:
get:
- foo
register: {var: '{{ OUTPUT }}'}
- check:
equals: {the: '{{ var }}', is: 15}
- redis:
request:
delete: foo
''')
runner = Runner(self.test_dir, join(self.test_dir, 'main.yaml'), None)
self.assertTrue(runner.run_tests())
r = redis.StrictRedis()
self.assertIsNone(r.get('foo'))
|
import numpy as np
b = np.arange(24).reshape(2, 12)
print(b)
print(b.ndim)
print('*' * 30)
b = np.arange(24).reshape(2, 12)
print(b)
print(b.size)
print('*' * 30)
b = np.arange(24).reshape(2, 12)
print(b)
print(b.itemsize)
print('*' * 30)
b = np.arange(24).reshape(2, 12)
print(b)
print(b.nbytes)
print('*' * 30)
b = np.arange(24).reshape(2, 12)
print(b)
print('*' * 30)
b.resize(6, 4)
print(b)
print('*' * 30)
print(b.T)
print('*' * 30)
b = np.arange(5)
print(b)
print(b.ndim)
print(b.T)
print('*' * 30)
b = np.array([1.j + 1, 2.j + 3])
print(b)
print('*' * 30)
b = np.array([1.j + 1, 2.j + 3])
print(b)
print(b.real)
print('*' * 30)
b = np.array([1.j + 1, 2.j + 3])
print(b)
print(b.imag)
print('*' * 30)
b = np.array([1.j + 1, 2.j + 3])
print(b.dtype)
print(b.dtype.str)
print('*' * 30)
b = np.arange(4).reshape(2, 2)
print(b)
f = b.flat
print(f)
for item in f:
print('item', item)
print(b.flat[2])
print(b.flat[[1, 3]])
b.flat = 7
print(b)
# or selected elements
b.flat[[1, 3]] = 1
print(b)
|
# -*- coding:utf-8 -*-
# author: bcabezas@apsl.net
from django.db import models
from cms.models.fields import PlaceholderField
from cms.models import CMSPlugin
from easy_thumbnails.files import get_thumbnailer
from django.conf import settings
from django.contrib.staticfiles.finders import find as staticfiles_find
import os
import re
class SliderImage(models.Model):
"""Image class that user django-filer"""
name = models.CharField(max_length=150, blank=True)
description = models.TextField(blank=True)
image = models.ImageField(upload_to="jcslider")
order = models.PositiveIntegerField(default=100)
class Meta:
verbose_name = u'Image'
verbose_name_plural = u'Images'
ordering = ('order', 'name',)
def __unicode__(self):
if self.name:
name = self.name
else:
try:
name = self.image.file.name.split("/")[-1]
except:
name = unicode(self.image)
return "%s" % name
def thumb(self):
thumbnail_options = dict(size=(92, 37), crop=True)
url = get_thumbnailer(self.image).get_thumbnail(thumbnail_options).url
return '<img src="%s">' % url
thumb.allow_tags = True
thumb.short_description = 'Image'
class SliderAlbum(models.Model):
"""Image gallery for slider"""
name = models.CharField(max_length=150)
images = models.ManyToManyField(SliderImage, blank=True)
class Meta:
verbose_name = u'Slider Album'
verbose_name_plural = u'Sliders Album'
def __unicode__(self):
return self.name or ""
class SliderPlugin(CMSPlugin):
title = models.CharField('Title', max_length=255, null=True, blank=True)
album = models.ForeignKey(SliderAlbum)
anim_speed = models.PositiveIntegerField(default=500, help_text="Animation Speed (ms)")
pause_time = models.PositiveIntegerField(default=3000, help_text="Pause time (ms)")
image_width = models.PositiveIntegerField(null=True, blank=True,
help_text="Width for images. Only requided for flexible theme types. Blank for theme spec auto.detection")
image_height = models.PositiveIntegerField(null=True, blank=True,
help_text="Height for images. Only requided for flexible theme types. Blank for theme spec auto.detection")
show_ribbon = models.BooleanField(default=True, help_text="Show ribbon logo")
def __unicode__(self):
if self.title:
return self.title
# def read_theme_css(self):
# cssfile = staticfiles_find("nivo/themes/%(theme)s/%(theme)s.css" % self.__dict__)
# return open(cssfile).read()
# def get_theme_type(self):
# """ Get geometry type from the doc header of css theme file"""
# css = self.read_theme_css()
# rawstr = r""".*Skin Type: (?P<gtype>\w+?)\s"""
# match_obj = re.search(rawstr, css, re.MULTILINE| re.DOTALL)
# gtype = match_obj.group('gtype')
# return gtype
# def get_theme_geometry(self):
# """ Get with and heigth from the doc header of css theme file"""
# css = self.read_theme_css()
# rawstr = r"""Image Width: (?P<width>\d+).*Image Height: (?P<height>\d+)"""
# match_obj = re.search(rawstr, css, re.MULTILINE| re.DOTALL)
# width = match_obj.group('width')
# height = match_obj.group('height')
# return (width, height)
def save(self, *args, **kwargs):
# if self.get_theme_type() != 'flexible':
# width, height = self.get_theme_geometry()
# self.image_width = width
# self.image_height = height
super(SliderPlugin, self).save(*args, **kwargs)
search_fields = ('title',)
|
"""Utilities used by the SpikeGLX interfaces."""
import json
from datetime import datetime
from pathlib import Path
from ....utils import FilePathType
def get_session_start_time(recording_metadata: dict) -> datetime:
"""
Fetches the session start time from the recording_metadata dictionary.
Parameters
----------
recording_metadata : dict
The metadata dictionary as obtained from the Spikelgx recording.
Returns
-------
datetime or None
the session start time in datetime format.
"""
session_start_time = recording_metadata.get("fileCreateTime", None)
if session_start_time.startswith("0000-00-00"):
# date was removed. This sometimes happens with human data to protect the
# anonymity of medical patients.
return
if session_start_time:
session_start_time = datetime.fromisoformat(session_start_time)
return session_start_time
def fetch_stream_id_for_spikelgx_file(file_path: FilePathType) -> str:
"""
Returns the stream_id for a spikelgx file.
Example of file name structure:
Consider the filenames: `Noise4Sam_g0_t0.nidq.bin` or `Noise4Sam_g0_t0.imec0.lf.bin`
The filenames consist of 3 or 4 parts separated by `.`
1. "Noise4Sam_g0_t0" will be the `name` variable. This chosen by the user at recording time.
2. "_gt0_" will give the `seg_index` (here 0)
3. "nidq" or "imec0" will give the `device` variable
4. "lf" or "ap" will be the `signal_kind` variable (for nidq the signal kind is an empty string)
stream_id is the concatenation of `device.signal_kind`
Parameters
----------
file_path : FilePathType
The file_path of spikelgx file.
Returns
-------
str
the stream_id
"""
suffixes = Path(file_path).suffixes
device = next(suffix for suffix in suffixes if "imec" in suffix or "nidq" in suffix)
signal_kind = ""
if "imec" in device:
signal_kind = next(suffix for suffix in suffixes if "ap" in suffix or "lf" in suffix)
stream_id = device[1:] + signal_kind
return stream_id
def get_device_metadata(meta) -> dict:
"""Returns a device with description including the metadata as described here
# https://billkarsh.github.io/SpikeGLX/Sgl_help/Metadata_30.html
Returns
-------
dict
a dict containing the metadata necessary for creating the device
"""
metadata_dict = dict()
if "imDatPrb_type" in meta:
probe_type_to_probe_description = {"0": "NP1.0", "21": "NP2.0(1-shank)", "24": "NP2.0(4-shank)"}
probe_type = str(meta["imDatPrb_type"])
probe_type_description = probe_type_to_probe_description[probe_type]
metadata_dict.update(probe_type=probe_type, probe_type_description=probe_type_description)
if "imDatFx_pn" in meta:
metadata_dict.update(flex_part_number=meta["imDatFx_pn"])
if "imDatBsc_pn" in meta:
metadata_dict.update(connected_base_station_part_number=meta["imDatBsc_pn"])
description_string = "no description"
if metadata_dict:
description_string = json.dumps(metadata_dict)
device_metadata = dict(name="Neuropixel-Imec", description=description_string, manufacturer="Imec")
return device_metadata
|
from webapp import create_app
from webapp.hh_2 import hh_parse
from flask import current_app
app = create_app()
with app.app_context():
hh_parse(current_app.config['BASE_URL'], current_app.config['HEADERS']) |
cislo = int(input("Zadajte číslo: "))
def najdiDelitele(x):
delitele = []
for i in range(x):
if x % (i+1) == 0:
delitele.append(i+1)
return (delitele)
def jePrvocislo(y):
l = najdiDelitele(y)
if(len(l) == 2):
return 1
else:
return 0
def najdiPrvocisla(z):
if(z == 1):
return []
m = najdiDelitele(z)
prvocisla = []
for j in m:
if jePrvocislo(j) == 1:
prvocisla.append(j)
return prvocisla
print("Prvočíselne delitele čísla " + str(cislo) + " sú: " + str(najdiPrvocisla(cislo))) |
# 题目:有5个人坐在一起,问第五个人多少岁?他说比第4个人大2岁。问第4个人岁数,他说比第3个人大2岁。问第三个人,又说比第2人大两岁。问第2个人,说比第一个人大两岁。最后问第一个人,他说是10岁。请问第五个人多大?
a = 10
for i in range(2, 6): # 后面的人比前面的人大两岁
a = a + 2
print("第%d个人%d岁!" % (i, a))
# def age(n, a):
# if n == 1:
# return a
# else:
# return age(n - 1, a) + 2
#
#
# a = age(5, 10)
# print(a)
|
"""Module with functions for making forecast scenarios."""
from __future__ import division
from collections import namedtuple
import logging
from frozendict import frozendict
import numpy as np
import xarray as xr
from fbd_core.etl.computation import weighted_mean, weighted_quantile
from fbd_core.etl.transformation import expand_dimensions
from fbd_core import YearRange
LOGGER = logging.getLogger(__name__)
def weighted_mean_with_extra_dim(data, stat_dims, weights, extra_dim=None):
"""Calculates the weighted-mean. If `extra_dim` is a dimension of `data`
then loop through the `extra_dim` coordinates and calculate coord-specific
ARCs using that coord's specific weights. Otherwise one ARC for all coords.
Args:
data (xarray.DataArray):
Data to compute a weighted mean for.
stat_dims (str, list[str]):
dimension(s) of the dataarray to reduce over
weights (xarray.DataArray):
a 1-D dataarray the same length as the weighted dim, with dimension
name equal to that of the weighted dim. Must be nonnegative.
extra_dim (str):
Extra dimension that exists in `weights` and `data`. It should not
be in `stat_dims`.
Returns:
(xarray.DataArray):
The mean over the given dimension. So it will contain all
dimensions of the input that are not in ``stat_dims``.
Raises:
(ValueError):
* If `weights` has more than 1 dimension while `extra_dim` is None.
* If `extra_dim` is in `stat_dims`.
* If `extra_dim` is not in a dimension of `weights`.
* If `extra_dim` is not in a dimension of `data`.
* If `extra_dim` does must have the same coordinates for `weights`
and `data`.
"""
LOGGER.debug("Entering the `weighted_mean_with_extra_dim` function")
LOGGER.debug("extra_dim:{}".format(extra_dim))
if len(weights.dims) > 1 and not extra_dim:
dim_err_msg = ("`weights` cannot have more than 1 dim if `extra_dim` "
"is None")
LOGGER.error(dim_err_msg)
raise ValueError(dim_err_msg)
elif extra_dim and extra_dim in stat_dims:
dim_err_msg = "{} must cannot be in `stat_dims`".format(extra_dim)
LOGGER.error(dim_err_msg)
raise ValueError(dim_err_msg)
elif extra_dim and extra_dim not in weights.dims:
dim_err_msg = "{} must a dimension of `weights`".format(extra_dim)
LOGGER.error(dim_err_msg)
raise ValueError(dim_err_msg)
elif extra_dim and extra_dim in weights.dims:
if extra_dim and extra_dim not in data.dims:
data = expand_dimensions(data, draw=weights["draw"].values)
elif extra_dim and not data[extra_dim].equals(weights[extra_dim]):
dim_err_msg = ("The {} dimension must have the same coordinates "
"for `weights` and `data`".format(extra_dim))
LOGGER.error(dim_err_msg)
raise ValueError(dim_err_msg)
else:
pass # `data` already has "draw" dim with same coords as `weights`
mean = []
for coord in weights[extra_dim].values:
LOGGER.debug("coord: {}".format(coord))
coord_specific_data = data.loc[{extra_dim: coord}]
coord_specific_weights = weights.loc[{extra_dim: coord}]
coord_specific_mean = weighted_mean(coord_specific_data, stat_dims,
coord_specific_weights)
mean.append(coord_specific_mean)
mean = xr.concat(mean, dim=extra_dim)
else:
mean = weighted_mean(data, stat_dims, weights)
LOGGER.debug("Leaving the `weighted_mean_with_extra_dim` function")
return mean
def weighted_quantile_with_extra_dim(data, quantiles, stat_dims, weights,
extra_dim=None):
"""Calculates the weighted-mean. If `extra_dim` is a dimension of `data`
then loop through the `extra_dim` coordinates and calculate coord-specific
ARCs using that coord's specific weights. Otherwise one ARC for all coords.
Args:
data (xarray.DataArray):
Data to compute a weighted mean for.
quantiles (float or list of float):
quantile(s) to evaluate. Must be <= 1.
stat_dims (str, list[str]):
dimension(s) of the dataarray to reduce over
weights (xarray.DataArray):
a 1-D dataarray the same length as the weighted dim, with dimension
name equal to that of the weighted dim. Must be nonnegative.
extra_dim (str):
Extra dimension that exists in `weights` and `data`. It should not
be in `stat_dims`.
Returns:
(xarray.DataArray):
The mean over the given dimension. So it will contain all
dimensions of the input that are not in ``stat_dims``.
Raises:
(ValueError):
* If `weights` has more than 1 dimension while `extra_dim` is None.
* If `extra_dim` is in `stat_dims`.
* If `extra_dim` is not in a dimension of `weights`.
* If `extra_dim` is not in a dimension of `data`.
* If `extra_dim` does must have the same coordinates for `weights`
and `data`.
"""
LOGGER.debug("Entering the `weighted_quantile_with_extra_dim` function")
LOGGER.debug("extra_dim:{}".format(extra_dim))
if len(weights.dims) > 1 and not extra_dim:
dim_err_msg = ("`weights` cannot have more than 1 dim if `extra_dim` "
"is None")
LOGGER.error(dim_err_msg)
raise ValueError(dim_err_msg)
elif extra_dim and extra_dim in stat_dims:
dim_err_msg = "{} must cannot be in `stat_dims`".format(extra_dim)
LOGGER.error(dim_err_msg)
raise ValueError(dim_err_msg)
elif extra_dim and extra_dim not in weights.dims:
dim_err_msg = "{} must a dimension of `weights`".format(extra_dim)
LOGGER.error(dim_err_msg)
raise ValueError(dim_err_msg)
elif extra_dim and extra_dim in weights.dims:
if extra_dim and extra_dim not in data.dims:
data = expand_dimensions(data, draw=weights["draw"].values)
elif extra_dim and not data[extra_dim].equals(weights[extra_dim]):
dim_err_msg = ("The {} dimension must have the same coordinates "
"for `weights` and `data`".format(extra_dim))
LOGGER.error(dim_err_msg)
raise ValueError(dim_err_msg)
else:
pass # `data` already has "draw" dim with same coords as `weights`
quantile_values = []
for coord in weights[extra_dim].values:
LOGGER.debug("coord: {}".format(coord))
coord_specific_data = data.loc[{extra_dim: coord}]
coord_specific_weights = weights.loc[{extra_dim: coord}]
coord_specific_quantile_values = (
weighted_quantile(da=coord_specific_data, q=quantiles,
dim=stat_dims, ws=coord_specific_weights))
quantile_values.append(coord_specific_quantile_values)
quantile_values = xr.concat(quantile_values, dim=extra_dim)
else:
quantile_values = weighted_quantile(da=data, q=quantiles,
dim=stat_dims, ws=weights)
LOGGER.debug("Leaving the `weighted_quantile_with_extra_dim` function")
return quantile_values
def truncate_dataarray(dataarray, quantile_dims, replace_with_mean=False,
mean_dims=None, weights=None, quantiles=None,
extra_dim=None):
r"""Truncates the dataarray over the given dimensions, meaning that data
outside the upper and lower quantiles, which are taken across the
dimensions ``quantile_dims``, are replaced either with:
1. the upper and lower quantiles themselves.
2. or with the mean of the in-lier data, which is taken across the
dimensions given by ``mean_dims``.
**Note**: If weights are given, then weighted-quantiles and weighted-means
are taken, otherwise the quantiles and means are unweighted.
Args:
dataarray (xarray.DataArray):
dataarray that has at least the dimensions given by ``dims``, and
if ``replace_with_mean`` is True, then also ``mean_dims``.
replace_with_mean (bool, optional):
If True, then replace values outside of the upper and lower
quantiles and with the mean across the dimensions given by
`mean_dims`, if False, then replace with the upper and lower bounds
themselves.
mean_dims (list[str], optional):
dimensions to take mean within the bounds over
quantile_dims (list[str]):
dimensions to take quantiles over -- the quantiles are
used to make the bounds.
weights (xarray.DataArray, optional):
Must have one dimension and can have up two dimensions.
quantiles (tuple[float, float] | list[float, float], optional):
The tuple of two floats representing the quantiles to take.
extra_dim (str):
Extra dimension that exists in `weights` and `data`. It should not
be in `stat_dims`.
Returns:
(xarray.DataArray):
Same shape as the original array, but with truncated values.
Raises:
(ValueError):
If `replace_with_mean` is True, and `mean_dims` is not list of
strings.
"""
LOGGER.debug("Entering the `truncate_dataarray` function")
LOGGER.debug("quantile_dims:{}".format(quantile_dims))
LOGGER.debug("replace_with_mean:{}".format(replace_with_mean))
LOGGER.debug("mean_dims:{}".format(mean_dims))
LOGGER.debug("weights:{}".format(weights))
LOGGER.debug("quantiles:{}".format(quantiles))
LOGGER.debug("extra_dim:{}".format(extra_dim))
if replace_with_mean and not mean_dims:
mean_dims_err_msg = (
"If `replace_with_mean` is True, then `mean_dims` "
"must be a list of strings")
LOGGER.error(mean_dims_err_msg)
raise ValueError(mean_dims_err_msg)
else:
pass # `mean_dims` doesn't can be None
quantiles = (
Quantiles(*sorted(quantiles))
if quantiles else Quantiles(0.05, 0.95))
if weights is not None:
quantile_values = weighted_quantile_with_extra_dim(
dataarray, quantiles, list(quantile_dims), weights, extra_dim)
else:
quantile_values = dataarray.quantile(
quantiles, dim=list(quantile_dims))
lower_da = quantile_values.sel(quantile=quantiles.lower)
upper_da = quantile_values.sel(quantile=quantiles.upper)
if replace_with_mean:
good_indexes = (dataarray >= lower_da) & (dataarray <= upper_da)
inside_da = dataarray.where(good_indexes)
outside_da = dataarray.where(~good_indexes)
if weights is not None:
inside_mean_da = weighted_mean_with_extra_dim(
inside_da, mean_dims, weights, extra_dim)
else:
inside_mean_da = inside_da.mean(mean_dims)
truncated_da = (
inside_da.combine_first(xr.ones_like(outside_da) * inside_mean_da))
else:
expanded_lower_da, _ = xr.broadcast(lower_da, dataarray)
expanded_lower_da = expanded_lower_da.transpose(*dataarray.coords.dims)
expanded_upper_da, _ = xr.broadcast(upper_da, dataarray)
expanded_upper_da = expanded_upper_da.transpose(*dataarray.coords.dims)
truncated_da = dataarray.clip(
min=expanded_lower_da, max=expanded_upper_da)
LOGGER.debug("Leaving the `truncate_dataarray` function")
return truncated_da
def arc(past_data_da, years, weight_exp, stat_dims, statistic, quantiles=None,
diff_over_mean=False, truncate=False, truncate_dims=None,
truncate_quantiles=None, replace_with_mean=False, extra_dim=None):
r"""Makes rate forecasts by forecasting the Annualized Rates-of-Change
(ARC) using either weighted means or weighted quantiles .
The steps for forecasting logged or logitted rates with ARCs are:
(1) Annualized rate differentials (or annualized rates-of-change if data is
in log or logit space) are calculated.
.. Math::
\vec{D_{p}} =
[x_{1991} - x_{1990}, x_{1992} - x_{1991}, ... x_{2016} - x_{2015}]
where :math:`x` are values from ``past_data_da`` for each year and
:math:`\vec{D_p}` is the vector of differentials in the past.
(2) Year weights are used to weight recent years more heavily. Year weights
are made by taking the interval
.. math::
\vec{W} = [1, ..., n]^w
where :math:`n` is the number of past years, :math:`\vec{w}` is the
value given by ``weight_exp``, and :math:`\vec{W}` is the vector of
year weights.
(3) Weighted quantiles or the weighted mean of the annualized
rates-of-change are taken over the dimensions.
.. math::
s = \text{weighted-statistic}(\vec{W}, \vec{D})
where :math:`s` is the weighted quantile or weighted mean.
(4) Future rates-of-change are simulated by taking the interval
.. math::
\vec{D_{f}} = [1, ..., m] * s
where :math:`\vec{D_f}` is the vector of differentials in the future
and :math:`m` is the number of future years to forecast and
(5) Lastly, these future differentials are added to the rate of the last
observed year.
.. math::
\vec{X_{f}} = \vec{D_{f}} + x_{2016} = [x_{2017}, ..., x_{2040}]
where :math:`X_{f}` is the vector of forecasted rates.
Args:
past_data_da (xarray.DataArray):
Past data with a year-id dimension. Must be in log or logit space
in order for this function to actually calculate ARCs, otherwise
it's just calculating weighted statistic of the first differences.
years (YearRange):
past and future year-ids
weight_exp (float | int | xarray.DataArray):
power to raise the increasing year weights -- must be nonnegative.
It can be dataarray, but must have only one dimension, "draw", it
must have the same coordinates on that dimension as
``past_data_da``.
stat_dims (list[str]):
list of dimensions to take quantiles over
statistic (str):
The statistic to use for calculating the ARC of past years. Can
either be "mean" or "quantile".
quantiles (object, optional):
The quantile or quantiles to take on ``past_data``. Defaults to
None, but must be a float, or a iterable of floats if
statistic="quantile".
diff_over_mean (bool, optional):
If True, then take annual differences for means-of-draws, instead
of draws. Defaults to False.
truncate (bool, optional):
If True, then truncates the dataarray over the given dimensions.
Defaults to False.
truncate_dims (list[str], optional):
A list of strings representing the dimensions to truncate over.
truncate_quantiles (object, optional):
The iterable of two floats representing the quantiles to take.
replace_with_mean (bool, optional):
If True and `truncate` is True, then replace values outside of the
upper and lower quantiles taken across "location_id" and "year_id"
and with the mean across "year_id", if False, then replace with the
upper and lower bounds themselves.
extra_dim (str):
Extra dimension that exists in `weights` and `data`. It should not
be in `stat_dims`.
Returns:
(xarray.DataArray):
Forecasts made using the ARC method.
Raises:
ValueError:
If ``statistic`` is not equal to one of the strings "mean" or
"quantile"
ValueError:
If ``weight_exp`` is a negative number
ValueError:
If `truncate` is True, then `truncate_quantiles` must be a list of
floats.
"""
LOGGER.debug("Entering the `arc` function")
LOGGER.debug("years:{}".format(years))
LOGGER.debug("weight_exp:{}".format(weight_exp))
LOGGER.debug("statistic:{}".format(statistic))
LOGGER.debug("stat_dims:{}".format(stat_dims))
LOGGER.debug("quantiles:{}".format(quantiles))
LOGGER.debug("diff_over_mean:{}".format(diff_over_mean))
LOGGER.debug("truncate:{}".format(truncate))
LOGGER.debug("replace_with_mean:{}".format(replace_with_mean))
LOGGER.debug("truncate_quantiles:{}".format(truncate_quantiles))
LOGGER.debug("extra_dim:{}".format(extra_dim))
quantile_is_valid = (
all([isinstance(quantile, float) for quantile in quantiles])
if hasattr(quantiles, "__iter__") else isinstance(quantiles, float))
if truncate and not truncate_dims:
truncate_dims = ["location_id", "year_id"]
if statistic not in ("mean", "quantile"):
stat_arg_err_msg = (
"`statistic` must be one of ('mean', 'quantile'), {} is not valid"
).format(statistic)
LOGGER.error(stat_arg_err_msg)
raise ValueError(stat_arg_err_msg)
elif statistic == "quantile" and not quantile_is_valid:
qnt_arg_err_msg = (
"If `statistic='quantile'`, then `quantiles` must be of type float"
" or a list of floats."
).format(statistic)
LOGGER.error(qnt_arg_err_msg)
raise ValueError(qnt_arg_err_msg)
else:
pass # valid input given for `statistic` arg
stat_dims = list(stat_dims)
trunc_quantile_is_valid = (
all([isinstance(trunc_quantile, float)
for trunc_quantile in truncate_quantiles])
if hasattr(truncate_quantiles, "__iter__") else False)
if truncate and not trunc_quantile_is_valid:
truncate_err_msg = (
"If `truncate` is True, then "
"`truncate_quantiles` must be a list of floats."
)
LOGGER.error(truncate_err_msg)
raise ValueError(truncate_err_msg)
elif truncate and trunc_quantile_is_valid:
truncate_quantiles = Quantiles(*sorted(truncate_quantiles))
else:
pass # `truncate_quantiles` can be None
# Calculate the annual differentials.
if diff_over_mean and "draw" in past_data_da.dims:
annual_diff = past_data_da.mean("draw").sel(
year_id=years.past_years).diff("year_id", n=1)
else:
annual_diff = past_data_da.sel(
year_id=years.past_years).diff("year_id", n=1)
if isinstance(weight_exp, xr.DataArray) and "draw" in weight_exp.dims:
weight_exp = expand_dimensions(
weight_exp, year_id=annual_diff["year_id"].values)
elif isinstance(weight_exp, float) or isinstance(weight_exp, int):
pass # weight_exp can be a float or an integer
else:
weight_exp_err_msg = (
"`weight_exp` must be a float, an int, or an xarray.DataArray "
"with a 'draw' dimension")
LOGGER.error(weight_exp_err_msg)
raise ValueError(weight_exp_err_msg)
year_weights = xr.DataArray(
(np.arange(len(years.past_years) - 1) + 1),
dims="year_id", coords={"year_id": years.past_years[1:]}) ** weight_exp
# If annual-differences were taken over means (`annual_diff` doesn't have
# a "draw" dimension), but `year_weights` does have a "draw" dimension,
# then the draw dimension needs to be expanded for `annual_diff` such that
# the mean is replicated for each draw.
if "draw" in year_weights.dims and "draw" not in annual_diff.dims:
annual_diff = expand_dimensions(annual_diff,
draw=year_weights["draw"].values)
else:
pass # `annual_diff` already has a draw dim, or `year_weights` doesn't
if truncate:
annual_diff = truncate_dataarray(
annual_diff, truncate_dims, replace_with_mean=replace_with_mean,
mean_dims=["year_id"], weights=year_weights,
quantiles=truncate_quantiles, extra_dim=extra_dim)
else:
pass # Annual differences are not truncated
if (xr.DataArray(weight_exp) > 0).any():
if statistic == "mean":
arc_da = weighted_mean_with_extra_dim(
annual_diff, stat_dims, year_weights, extra_dim)
else:
arc_da = weighted_quantile_with_extra_dim(
annual_diff, quantiles, stat_dims, year_weights, extra_dim)
elif (xr.DataArray(weight_exp) == 0).all():
# If ``weight_exp`` is zero, then just take the unweighted mean or
# quantile.
if statistic == "mean":
arc_da = annual_diff.mean(stat_dims)
else:
arc_da = annual_diff.quantile(q=quantiles, dim=stat_dims)
else:
err_msg = "weight_exp must be nonnegative."
LOGGER.error(err_msg)
raise ValueError(err_msg)
# Find future change by multiplying an array that counts the future
# years, by the quantiles, which is weighted if `weight_exp` > 0. We want
# the multipliers to start at 1, for the first year of forecasts, and count
# to one more than the number of years to forecast.
forecast_year_multipliers = xr.DataArray(
np.arange(len(years.forecast_years)) + 1,
dims=["year_id"],
coords={"year_id": years.forecast_years})
future_change = arc_da * forecast_year_multipliers
forecast_data_da = past_data_da.sel(year_id=years.past_end) + future_change
LOGGER.debug("Leaving the `arc` function")
return forecast_data_da
def arc_method(past_data_da, years=None, weight_exp=1.,
reference_scenario="median", reverse_scenarios=False,
quantiles=DEFAULT_SCENARIO_QUANTILES, diff_over_mean=False,
reference_arc_dims=None, scenario_arc_dims=None, truncate=False,
truncate_dims=None, truncate_quantiles=False,
replace_with_mean=False, extra_dim=None):
r"""Makes rate forecasts using the Annualized Rate-of-Change (ARC) method.
Forecasts rates by taking a weighted quantile or weighted mean of
annualized rates-of-change from past data, then walking that weighted
quantile or weighted mean out into future years.
A reference scenarios is made using the weighted median or mean of past
annualized rate-of-change across all past years.
Better and worse scenarios are made using weighted 15th and 85th quantiles
of past annualized rates-of-change across all locations and all past years.
The minimum and maximum are taken across the scenarios (values are
granular, e.g. age/sex/location/year specific) and the minimum is taken as
the better scenario and the maximum is taken as the worse scenario. If
scenarios are reversed (``reverse_scenario = True``) then do the opposite.
Args:
past_data_da (xarray.DataArray):
A dataarray of past data that must at least of the dimensions
``year_id`` and ``location_id``. The ``year_id`` dimension must
have coordinates for all the years in ``years.past_years``.
years (tuple[int] | list[int] | YearRange, optional):
years to include in the past when calculating ARC.
weight_exp (float | int | xarray.DataArray):
power to raise the increasing year weights -- must be nonnegative.
It can be dataarray, but must have only one dimension, "draw", it
must have the same coordinates on that dimension as
``past_data_da``.
reference_scenario (str, optional):
If "median" then the reference scenarios is made using the
weighted median of past annualized rate-of-change across all past
years, "mean" then it is made using the weighted mean of past
annualized rate-of-change across all past years. Defaults to
"median".
reverse_scenarios (bool, optional):
If True, reverse the usual assumption that high=bad and low=good.
For example, we set to True for vaccine coverage, because higher
coverage is better. Defaults to False.
quantiles (iterable[float, float], optional):
The quantiles to use for better and worse scenarios. Defaults to
``0.15`` and ``0.85`` quantiles.
diff_over_mean (bool, optional):
If True, then take annual differences for means-of-draws, instead
of draws. Defaults to False.
reference_arc_dims (list[str], optional):
To calculate the reference ARC, take weighted mean or median over
these dimensions. Defaults to ["year_id"]
scenario_arc_dims (list[str], optional):
To calculate the scenario ARCs, take weighted quantiles over these
dimensions.Defaults to ["location_id", "year_id"]
truncate (bool, optional):
If True, then truncates the dataarray over the given dimensions.
Defaults to False.
truncate_dims (list[str], optional):
A list of strings representing the dimensions to truncate over.
truncate_quantiles (object, optional):
The tuple of two floats representing the quantiles to take.
replace_with_mean (bool, optional):
If True and `truncate` is True, then replace values outside of the
upper and lower quantiles taken across "location_id" and "year_id"
and with the mean across "year_id", if False, then replace with the
upper and lower bounds themselves.
extra_dim (str):
Extra dimension that exists in `weights` and `data`. It should not
be in `stat_dims`.
Returns:
(xarray.DataArray):
Past and future data with reference, better, and worse scenarios.
It will include all the dimensions and coordinates of the input
dataarray and a ``scenario`` dimension with the coordinates 0 for
reference, -1 for worse, and 1 for better. The ``year_id``
dimension will have coordinates for all of the years from
``years.years``.
Raises:
(ValueError):
If ``weight_exp`` is a negative number or if ``reference_scenario``
is not "median" or "mean".
"""
LOGGER.debug("Entering the `arc_method` function")
LOGGER.debug("years:{}".format(years))
LOGGER.debug("weight_exp:{}".format(weight_exp))
LOGGER.debug("reference_scenario:{}".format(reference_scenario))
LOGGER.debug("reverse_scenarios:{}".format(reverse_scenarios))
LOGGER.debug("quantiles:{}".format(quantiles))
LOGGER.debug("diff_over_mean:{}".format(diff_over_mean))
LOGGER.debug("truncate:{}".format(truncate))
LOGGER.debug("replace_with_mean:{}".format(replace_with_mean))
LOGGER.debug("truncate_quantiles:{}".format(truncate_quantiles))
LOGGER.debug("extra_dim:{}".format(extra_dim))
years = YearRange(*years) if years else YearRange(*DEFAULT_YEAR_RANGE)
past_data_da = past_data_da.sel(year_id=years.past_years)
# Create baseline forecasts. Take weighted median or mean only across
# years, so values will be as granular as the inputs (e.g. age/sex/location
# specific)
if reference_scenario == "median":
reference_statistic = "quantile"
reference_quantile = 0.5
elif reference_scenario == "mean":
reference_statistic = "mean"
reference_quantile = None
else:
err_msg = "reference_scenario must be either 'median' or 'mean'"
LOGGER.error(err_msg)
raise ValueError(err_msg)
if truncate and not truncate_dims:
truncate_dims = ["location_id", "year_id"]
truncate_quantiles = (
Quantiles(*sorted(truncate_quantiles))
if truncate_quantiles else Quantiles(0.025, 0.975))
reference_arc_dims = reference_arc_dims or ["year_id"]
reference_da = arc(
past_data_da, years, weight_exp, reference_arc_dims,
reference_statistic, reference_quantile, diff_over_mean=diff_over_mean,
truncate=truncate, truncate_dims=truncate_dims,
truncate_quantiles=truncate_quantiles,
replace_with_mean=replace_with_mean, extra_dim=extra_dim)
forecast_data_da = past_data_da.combine_first(reference_da)
try:
forecast_data_da = forecast_data_da.rename({"quantile": "scenario"})
except ValueError:
pass # There is no "quantile" point coordinate.
forecast_data_da["scenario"] = SCENARIOS["reference"]
# Create better and worse scenario forecasts. Take weighted 85th and 15th
# quantiles across year and location, so values will not be location
# specific (e.g. just age/sex specific).
scenario_arc_dims = scenario_arc_dims or ["location_id", "year_id"]
scenarios_da = arc(
past_data_da, years, weight_exp, scenario_arc_dims, "quantile",
list(quantiles), diff_over_mean=diff_over_mean, truncate=False,
replace_with_mean=replace_with_mean, extra_dim=extra_dim)
scenarios_da = scenarios_da.rename({"quantile": "scenario"})
scenarios_da.coords["scenario"] = [SCENARIOS["better"], SCENARIOS["worse"]]
forecast_data_da = xr.concat([forecast_data_da, scenarios_da],
dim="scenario")
# Get the minimums and maximums across the scenario dimension, and set
# worse scenarios to the worst (max if normal or min if reversed), and set
# better scenarios to the best (min if normal or max if reversed).
low_values = forecast_data_da.min("scenario")
high_values = forecast_data_da.max("scenario")
if reverse_scenarios:
forecast_data_da.loc[{"scenario": SCENARIOS["worse"]}] = low_values
forecast_data_da.loc[{"scenario": SCENARIOS["better"]}] = high_values
else:
forecast_data_da.loc[{"scenario": SCENARIOS["better"]}] = low_values
forecast_data_da.loc[{"scenario": SCENARIOS["worse"]}] = high_values
forecast_data_da = past_data_da.combine_first(forecast_data_da)
forecast_data_da = forecast_data_da.loc[
{"scenario": sorted(forecast_data_da["scenario"])}]
LOGGER.debug("Leaving the `arc_method` function")
return forecast_data_da
def approach_value_by_year(past_data, years, target_year, target_value,
method='linear'):
r"""Method to handle use-cases where the desired level and the year by
which it has to be achieved are known.
For e.g., the Rockefeller project for min-risk diet scenarios, wanted to
see the effect of eradicating diet related risks by 2030 on mortality. For
this we need to reach 0 SEV for all diet related risks by 2030 and keep
the level constant at 0 for further years. Here the target_year is 2030
and target_value is 0.
Args:
past_data (xarray.DataArray):
The past data with all past years.
years (YearRange):
past and future year-ids
target_year (int):
The year at which the target value will be reached.
target_value (int):
The target value that needs to be achieved during the target year.
method (str):
The extrapolation method to be used to calculate the values for
intermediate years (years between years.past_end and target_year).
The method currently supported is: `linear`.
Returns:
(xarray.DataArray):
The forecasted results.
"""
if method == 'linear':
forecast = _linear_then_constant_arc(
past_data, years, target_year, target_value)
else:
err_msg = ("Method {} not recognized. Please see the documentation for"
" the list of supported methods.").format(method)
LOGGER.error(err_msg)
raise ValueError(err_msg)
return forecast
def _linear_then_constant_arc(past_data, years, target_year, target_value):
r"""Makes rate forecasts by linearly extrapolating the point ARC from the
last past year till the target year to reach the target value.
The steps for extrapolating the point ARCs are:
(1) Calculate the rate of change between the last year of the past
data (eg.2017) and ``target_year`` (eg. 2030).
.. Math::
R =
\frac{target\_value - past\_last\_year_value}
{target\_year- past\_last\_year}
where :math:`R` is the slope of the desired linear trend.
(2) Calculate the rates of change between the last year of the past and
each future year by multiplying R with future year weights till
``target_year``.
.. math::
\vec{W} = [1, ..., m]
\vec{F_r} = \vec{W} * R
where :math:`m` is the number of years between the ``target_year`` and
the last year of the past, and :math:`\vec{W}` forms the vector of
year weights.
:math:`\vec{F_r}` contains the linearly extrapolated ARCs for each
future year till the ``target_year``.
(3) Add the future rates :math: `\vec{F_r}` to last year of the past
(eg. 2017) to get the forecasted results.
(4) Extend the forecasted results till the ``forecast_end`` year by
filling the ``target_value`` for all the remaining future years.
Args:
past_data (xarray.DataArray):
The past data with all past years. The data is assumed to be in
normal space.
years (YearRange):
past and future year-ids
target_year (int):
The year at which the target value will be reached.
target_value (int):
The value that needs to be achieved by the `target_year`.
Returns:
(xarray.DataArray):
The forecasted results.
"""
LOGGER.info("Entered `linear_then_constant_arc` function.")
pre_target_years = np.arange(years.forecast_start, target_year+1)
post_target_years = np.arange(target_year+1, years.forecast_end+1)
past_last_year = past_data.sel(year_id=years.past_end)
target_yr_arc = (
target_value - past_last_year) / (target_year - years.past_end)
forecast_year_multipliers = xr.DataArray(
np.arange(len(pre_target_years)) + 1,
dims=["year_id"],
coords={"year_id": pre_target_years})
LOGGER.info("Calculating future rates of change.")
future_change = target_yr_arc * forecast_year_multipliers
forecast_bfr_target_year = past_last_year + future_change
forecast = expand_dimensions(
forecast_bfr_target_year, fill_value=target_value,
year_id=post_target_years)
LOGGER.info("Leaving `linear_then_constant_arc`.")
return forecast
|
import sys
import subprocess as sp
if(len(sys.argv) != 2):
print("Incorrect Usage")
print("Usage: python3 click.py <file containing subdomains>")
sys.exit()
sp.run("mkdir poc", shell=True)
inputFile = open(sys.argv[1],"r")
i=1
for line in inputFile:
line=line.rstrip("\n")
content="<html>\n<head>\n<title>Clickjacking PoC</title>\n</head>\n<body>\n<h3>Copy and paste this URL into your PoC, if clickjacking exists:</h3>\n"
content+="<h1>"+line+"</h1>\n"
content+="<iframe src=\""
content+=line+"\" width=100% height=100% style=\"opacity: 0.5;\"></iframe>"
content+="\n</body>\n</html>"
outFileName="poc/"
outFileName+="file"+str(i)+".html"
i+=1
outFile=open(outFileName,"w")
outFile.write(content)
outFile.close()
content=""
inputFile.close() |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
(r'^$', 'bicalca.monitor.views.show'),
(r'^monitor/add', 'bicalca.monitor.views.add'),
(r'^monitor/save', 'bicalca.monitor.views.save'),
(r'^hasser/$', 'bicalca.needs.views.show'),
(r'^hasser/add', 'bicalca.needs.views.add'),
(r'^hasser/save', 'bicalca.needs.views.save'),
(r'^hasser/bought', 'bicalca.needs.views.bought'),
(r'^login/$', 'bicalca.monitor.views.login_first'),
(r'^log/$', 'bicalca.monitor.views.log'),
(r'^out/$', 'bicalca.monitor.views.out'),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 17 16:07:56 2016
@author: ibackus
"""
import numpy as np
import cPickle as pickle
import okcupidio
import pandas as pd
# RUN FLAGS
do_make_features = True
do_filter_features = True
do_pca = True
# SETTINGS
savename = 'features.p'
minResponseRate = 0.65
nTest = 2000
# CODE
if do_make_features:
# Loads data (fairly raw)
data = okcupidio.loadData()
# Preprocess data
okcupidio.processData(data)
# And make binary features out of it
features, featureNames = okcupidio.buildFeatures(data)
# First, replace -1 with nans
features[features == -1] = np.nan
# Save
pickle.dump((features, featureNames), open(savename, 'w'), 2)
else:
features, featureNames = pickle.load(open(savename, 'r'))
if do_filter_features:
df = pd.DataFrame(data=features, columns=featureNames)
columns = df.columns[df.columns != 'income']
# Ignore data with response rates too low
for col in columns:
if df[col].notnull().mean() < minResponseRate:
df.drop(col, 1, inplace=True)
# Now replace missing data (EXCEPT IN INCOME)
columns = df.columns[df.columns != 'income']
for col in columns:
series = df[col]
nChoice = series.isnull().sum()
if nChoice > 0:
notnulls = series[series.notnull()]
vals = np.random.choice(notnulls, nChoice)
series[series.isnull()] = vals
df[col] = series
# Now remove people without reported incomes
df = df[df.income.notnull()]
# Ignore entries where stddev == 0
columns = df.columns[df.columns != 'income']
for col in columns:
if df[col].std() == 0:
df.drop(col, 1, inplace=True)
# drop last_online
df.drop('last_online', 1, inplace=True)
# Give us arrays to work with
income = df.income
df = df.drop('income', 1)
names = df.columns.tolist()
x = df.values
y = income.values
pickle.dump((x, y, names), open('dataset.p', 'w'), 2)
else:
x, y, names = pickle.load(open('dataset.p', 'r'))
# NOW SPLIT the dataset
y = y.reshape([len(y), 1])
nTrain = len(x) - nTest
np.random.seed(0)
ind = np.random.rand(len(x)).argsort()
trainInd = ind[0:nTrain]
testInd = ind[nTrain:]
xtrain, ytrain = (x[trainInd], y[trainInd])
xtest, ytest = (x[testInd], y[testInd])
# save the split set
pickle.dump((xtrain, ytrain), open('data_train.p', 'w'), 2)
pickle.dump((xtest, ytest), open('data_test.p','w'), 2)
|
import numpy as np
def score_game(game_core):
'''Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число'''
count_ls = []
# фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим!
np.random.seed(1)
random_array = np.random.randint(1, 101, size=(1000))
for number in random_array:
count_ls.append(game_core(number))
score = int(np.mean(count_ls))
print(f"Ваш алгоритм угадывает число в среднем за {score} попыток")
return(score)
def game_core_v2(number):
'''Определяем минимальное и максимальное значения, в которых компьютер
будет искать число, счетчик попыток первоначально устанавливаем на 1,
чтобы при совпадение созданных чисел засчитывался результат'''
count = 1
predict = np.random.randint(1, 101)
min_value = 1
max_value = 100
while number != predict:
count += 1
'''Определяем разность между predict и number и выясняем в дальнейшем является ли
она четной или нет, чтобы сократить количество циклов'''
differense = predict - number
if differense % 2 == 0:
if differense > 0:
max_value = predict - 2
else:
min_value = predict + 2
else:
if differense > 0:
max_value = predict
else:
min_value = predict + 1
'''Рассчитываем число исходя из измененных минимальных и максимальных значений'''
predict = (min_value + max_value) // 2
return count
score_game(game_core_v2)
|
def make_incrementor (n): return lambda x: x + n
f = make_incrementor(2)
g = make_incrementor(6)
print f(42), g(42)
if 0:
print("yes")
else:
print("no")
if "a" in "there" or 6 % 2:
print('true')
else:
print('false')
print(6 % 2)
my_list = [ "cat", 2, "dog", 4]
x = 5 in my_list
print(x)
if x:
print('yes')
else:
print('no')
user_input = int(input("Please provide number: "))
if not user_input % 3:
print("doto")
if 6/7:
print('three')
elif 5 :
print('five')
else:
print('zero')
x == 0
print(not c) |
#输出打印涉及变量的句子
special_number=3
message=str(special_number)+"是33最喜欢的数字!"
print(message) |
from django.urls import path
from .views import upload_csv, download_csv, pause_task, resume_task, cancel_task
urlpatterns = [
path('task/<int:task_id>/pause/', pause_task, name="pause_task"),
path('task/<int:task_id>/resume/', resume_task, name="resume_task"),
path('task/<int:task_id>/cancel/', cancel_task, name="cancel_task"),
path('upload/', upload_csv, name="upload_csv"),
path('download/', download_csv, name="download_csv"),
]
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import ValidationError
import logging
_logger = logging.getLogger(__name__)
class routeusers(models.Model):
_name = 'routeusers'
_rec_name = 'name'
_description = 'routeusers'
_parent_store = True
name = fields.Many2one(comodel_name="res.users", domain="[]", index=True, string="Users", required=True)
parent_path = fields.Char(string="parent_path", index=True)
parent_id = fields.Many2one('busroutes', 'Parent Route', ondelete='restrict',required=True)
parent_left = fields.Integer('Parent Left', index=True)
parent_right = fields.Integer('Parent Right', index=True)
bus_state = fields.Selection(string='Route Status', store=False, related='parent_id.state')
bus_name = fields.Char(string='Bus name', store=False, related='parent_id.parent_id.name')
Route_name = fields.Char(string='Route Name', store=False, related='parent_id.name')
@api.model
def create(self,values):
pid= values['parent_id']
n = values['name']
#raise ValidationError(str(n))
ishaveanotherres = self.env['routeusers'].search_count([('name', '=', n),("bus_state","=","InProgress")])
if ishaveanotherres >0:
raise ValidationError("You Have another Reservation")
routestat = self.env['busroutes'].search([('id', '=', pid)], limit=1).state
if(routestat != "InProgress"):
raise ValidationError("This Route is not available for reservation ")
current = self.env['routeusers'].search_count([('parent_id', '=', pid)])
capacity = self.env['busroutes'].search([('id', '=', pid)], limit=1).capacity
if(current>=capacity):
raise ValidationError("Max Reached : " + str(current) )
#raise ValidationError()
if(self.parent_id.parent_id.capacity>len(self.parent_id.child_ids)):
raise ValidationError("Max Reached")
return super(routeusers,self).create(values)
@api.multi
def write(self, values):
#raise ValidationError("hi")
# Add code here
return super(routeusers, self).write(values)
_sql_constraints = [('OneUserOnly', 'UNIQUE (name,parent_id)', 'User already exists'), ]
|
import numpy as np
import cv2
#put these files on the desktop
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('face.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
increment=0
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
newimg=img[y:y+h,x:x+w]
#save only the face
cv2.imwrite('only_face' + str(increment) + '.jpg', newimg)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
increment=increment+1
cv2.imshow('img',img)
cv2.imwrite('faces.png',img)
#detect smile throughout all images (and get timestamp annotated)
#detect sadness throughout all images (and get timestamp annotated)
|
'''
Created on Mar 10, 2015
@author: puneeth
'''
import pandas, csv, time
from collections import Counter
from astropy.table.row import Row
start = time.time()
print(str(start))
ifname = 'train.csv'
ofname = 'train_data.csv'
print('Read train.csv')
df_train = pandas.read_csv(ifname)
print('Consolidate 40 Soil Types. Use only one Soil Type')
df_train['Soil'] = 0
for i in range(1, 41):
df_train['Soil'] = df_train['Soil'] + i * df_train['Soil_Type' + str(i)]
print('Consolidate 4 Wilderness Areas. Use only one Wilderness Area')
df_train['Wilderness_Area'] = 0
for i in range(1, 5):
df_train['Wilderness_Area'] = df_train['Wilderness_Area'] + i * df_train['Wilderness_Area' + str(i)]
print('Remove 40 Soil Types and 4 Wilderness Areas')
for i in range(1, 41):
df_train.pop('Soil_Type' + str(i))
if i < 5:
df_train.pop('Wilderness_Area' + str(i))
# print('Put above data into train_data.csv')
# df_train.to_csv(ofname, index=False)
tifname = 'test.csv'
tofname = 'test_data.csv'
print('Read test.csv')
df_test = pandas.read_csv(tifname)
print('Consolidate 40 Soil Types. Use only one Soil Type')
df_test['Soil'] = 0
for i in range(1, 41):
df_test['Soil'] = df_test['Soil'] + i * df_test['Soil_Type' + str(i)]
print('Consolidate 4 Wilderness Areas. Use only one Wilderness Area')
df_test['Wilderness_Area'] = 0
for i in range(1, 5):
df_test['Wilderness_Area'] = df_test['Wilderness_Area'] + i * df_test['Wilderness_Area' + str(i)]
print('Remove 40 Soil Types and 4 Wilderness Areas')
for i in range(1, 41):
df_test.pop('Soil_Type' + str(i))
if i < 5:
df_test.pop('Wilderness_Area' + str(i))
# print('Put above data into test_data.csv')
# df_test.to_csv(tofname, index=False)
cover_grouped = Counter(df_train.Cover_Type)
print('Count of each cover type:', cover_grouped, len(cover_grouped))
c = len(cover_grouped)
prob_cover_grouped = Counter(df_train.Cover_Type)
for cover_type in prob_cover_grouped:
prob_cover_grouped[cover_type] = prob_cover_grouped[cover_type] / len(df_train.index)
print('Probability of each cover type:', prob_cover_grouped)
# exit()
print('Count each Feature')
elevation_grouped = (df_train[['Elevation', 'Cover_Type']].groupby(['Elevation', 'Cover_Type'],
as_index=False, sort=False)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)
aspect_grouped = (df_train[['Aspect', 'Cover_Type']].groupby(['Aspect', 'Cover_Type'],
as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)
slope_grouped = (df_train[['Slope', 'Cover_Type']].groupby(['Slope', 'Cover_Type'],
as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)
h_hydro_grouped = (df_train[['Horizontal_Distance_To_Hydrology', 'Cover_Type']].groupby(['Horizontal_Distance_To_Hydrology', 'Cover_Type'],
as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)
v_hydro_grouped = (df_train[['Vertical_Distance_To_Hydrology', 'Cover_Type']].groupby(['Vertical_Distance_To_Hydrology', 'Cover_Type'],
as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)
h_roadways_grouped = (df_train[['Horizontal_Distance_To_Roadways', 'Cover_Type']].groupby(['Horizontal_Distance_To_Roadways', 'Cover_Type'],
as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)
hillshade9am_grouped = (df_train[['Hillshade_9am', 'Cover_Type']].groupby(['Hillshade_9am', 'Cover_Type'],
as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)
hillshadenoon_grouped = (df_train[['Hillshade_Noon', 'Cover_Type']].groupby(['Hillshade_Noon', 'Cover_Type'],
as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)
hillshade3pm_grouped = (df_train[['Hillshade_3pm', 'Cover_Type']].groupby(['Hillshade_3pm', 'Cover_Type'],
as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)
h_fire_grouped = (df_train[['Horizontal_Distance_To_Fire_Points', 'Cover_Type']].groupby(['Horizontal_Distance_To_Fire_Points', 'Cover_Type'],
as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)
soil_grouped = (df_train[['Soil', 'Cover_Type']].groupby(['Soil', 'Cover_Type'],
as_index=False, sort=True)['Soil'].count() + 1) / (cover_grouped[1] + c)
wilderness_grouped = (df_train[['Wilderness_Area', 'Cover_Type']].groupby(['Wilderness_Area', 'Cover_Type'],
as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)
# print(h_fire_grouped[2684])
result_dict = {}
count = 0
print('Start Classifying Test')
loopstart = time.time()
for index, row in df_test.iterrows():
class_count = [[]]
for cover_type in range(1, 8):
# print(row.Horizontal_Distance_To_Roadways, cover_type)
try:
try:
elevation_prob = elevation_grouped[row.Elevation][cover_type]
# print(elevation_prob)
except:
elevation_prob = (0 + 1) / (cover_grouped[cover_type] + c)
try:
aspect_prob = aspect_grouped[row.Aspect][cover_type]
except:
aspect_prob = (0 + 1) / (cover_grouped[cover_type] + c)
try:
slope_prob = slope_grouped[row.Slope][cover_type]
except:
slope_prob = (0 + 1) / (cover_grouped[cover_type] + c)
try:
h_hydro_prob = h_hydro_grouped[row.Horizontal_Distance_To_Hydrology][cover_type]
except:
h_hydro_prob = (0 + 1) / (cover_grouped[cover_type] + c)
try:
v_hydro_prob = v_hydro_grouped[row.Vertical_Distance_To_Hydrology][cover_type]
except:
v_hydro_prob = (0 + 1) / (cover_grouped[cover_type] + c)
try:
h_roadways_prob = h_roadways_grouped[row.Horizontal_Distance_To_Roadways][cover_type]
except:
h_roadways_prob = (0 + 1) / (cover_grouped[cover_type] + c)
try:
hillshade9am_prob = hillshade9am_grouped[row.Hillshade_9am][cover_type]
except:
hillshade9am_prob = (0 + 1) / (cover_grouped[cover_type] + c)
try:
hillshadenoon_prob = hillshadenoon_grouped[row.Hillshade_Noon][cover_type]
except:
hillshadenoon_prob = (0 + 1) / (cover_grouped[cover_type] + c)
try:
hillshade3pm_prob = hillshade3pm_grouped[row.Hillshade_3pm][cover_type]
except:
hillshade3pm_prob = (0 + 1) / (cover_grouped[cover_type] + c)
try:
h_fire_prob = h_fire_grouped[row.Horizontal_Distance_To_Fire_Points][cover_type]
except:
h_fire_prob = (0 + 1) / (cover_grouped[cover_type] + c)
try:
soil_prob = soil_grouped[row.Soil][cover_type]
except:
soil_prob = (0 + 1) / (cover_grouped[cover_type] + c)
try:
wilderness_prob = wilderness_grouped[row.Wilderness_Area][cover_type]
except:
wilderness_prob = (0 + 1) / (cover_grouped[cover_type] + c)
class_cover = (elevation_prob * aspect_prob *
slope_prob *
h_hydro_prob, v_hydro_prob *
h_roadways_prob *
hillshade9am_prob * hillshadenoon_prob * hillshade3pm_prob *
h_fire_prob * soil_prob * wilderness_prob * prob_cover_grouped[cover_type])
class_count.append([class_cover, cover_type])
except:
print()
if count % 20000 == 0:
loopend = time.time()
print('loopend', count, str((loopend - loopstart) / 60))
count = count + 1
class_count.sort(reverse=True)
result_dict[df_test.Id[count]] = class_count[0][1]
# break
f = open("pandababy4.csv", "w")
writer = csv.writer(f)
writer.writerow(['Id', 'Cover_Type'])
for key, value in result_dict.items():
writer.writerow([key, value])
f.close()
end = time.time()
print(str(end))
runtime = float(end - start) / 60
print('Runtime:', str(runtime)) |
'''
Created on Dec 26, 2017
@author: Mark
'''
from _collections import defaultdict
comps = []
hiscore = None
longest = defaultdict(list)
with open("data/Day24") as f:
for l in f:
comp = l.strip().split("/")
comps.append((int(comp[0]), int(comp[1])))
def solve(bridge, used):
global comps
global hiscore
global longest
for c in comps:
tip = bridge[len(bridge) - 1]
if c in used:
continue
if tip[1] == c[0]:
bridgec = bridge[::]
bridgec.append([c, c[1]])
usedc = set(used)
usedc.add(c)
solve(bridgec, usedc)
elif tip[1] == c[1]:
bridgec = bridge[::]
bridgec.append([c, c[0]])
usedc = set(used)
usedc.add(c)
solve(bridgec, usedc)
score = 0
for x in bridge:
c = x[0]
score+= c[0]
score+= c[1]
if hiscore is None or hiscore < score:
hiscore = score
longest[len(bridge)].append(score)
for comp in comps:
bridge = []
used = set()
if 0 in [comp[0], comp[1]]:
if comp[0] == 0:
bridge.append([comp, comp[1]])
used.add(comp)
solve(bridge[::], set(used))
else:
bridge.append([comp, comp[0]])
used.add(comp)
solve(bridge[::], set(used))
score = 0
for x in bridge:
c = x[0]
score+= c[0]
score+= c[1]
if hiscore is None or hiscore < score:
hiscore = score
longest[len(bridge)].append(score)
print "Part 1", hiscore
print "Part 2:", max(longest[max(longest.keys())])
|
#import libraries
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
import math
#Import the Boston Dataset and differentiate Features(data) and Targets
print 'Loading Boston Dataset.....'
boston = datasets.load_boston()
data=boston.data
target=boston.target
tr=data.shape[0] #total rows in the data set i.e. 506
tc=data.shape[1] #total columns in the data set i.e 13
#Combining the Features and Target
#We are appending to 'fulltable' in such a way that 14th Feature of the data is actually the target
fulltable=np.empty([tr,tc+1])
for i in range (0,tr):
fulltable[i][tc]=target[i]
for j in range(0,tc):
fulltable[i][j]=data[i][j]
#print fulltable #to check the table-- First 13 Columns are Features, 14th Column is the Target
#NORMALIZING THE DATASET
print 'Normalizing the Dataset.....'
#Find min and max values amongst all the columns
minvals={}
maxvals={}
for j in range(0,tc+1):
minD=fulltable[0][j]
maxD=fulltable[0][j]
for i in range(0,tr):
if fulltable[i][j] < minD:
minD=fulltable[i][j]
if fulltable[i][j] > maxD:
maxD=fulltable[i][j]
minvals[j]=minD
maxvals[j]=maxD
#Normalize all the values using Min and Max calculate in the previous step
#make a new table normaldata, which is a copy of the original dataset fulltable but with one more condition
#It is not the exact original, but normalized values of original data
normaldata = fulltable.copy()
for j in range(0,tc+1):
for i in range(0, tr):
normaldata[i][j]= (normaldata[i][j] - minvals[j]) / (maxvals[j]-minvals[j])
print 'Normalizing Successful.....'
#After the data has been normalized, we split the data
#splitting data to traing and test in 90:10
print 'Splitting Dataset to Train and Test in Ratio 90:10 .....'
trainX=[]
trainY=[]
testX=[]
testY=[]
for i in range(0,tr):
if i%10==0:
testX.append(normaldata[i][0:tc])
testY.append(normaldata[i][tc])
else:
trainX.append(normaldata[i][0:tc])
trainY.append(normaldata[i][tc])
print 'Train Table has',len(trainX), 'rows, which is', round(float(len(trainX)*100)/(len(trainX)+len(testX)),0),'% of total values'
print 'Test Table has',len(testX), 'rows, which is', round(float(len(testX)*100)/(len(trainX)+len(testX)),0),'% of total values'
#Now we have split the original Normalized data to train and test
#But it will be easy to have the train values all in one table
#So we combine all Train features and Train targets in one table called train
#We are going to use the TABLE TRAIN to do gradient descent
train=[]
for i in range(0,len(trainX)):
feat = trainX[i]
tar=trainY[i]
row=[feat,tar]
train.append(row)
#Similarly we do it for Test Data
test=[]
for i in range(0,len(testX)):
feat = testX[i]
tar=testY[i]
row=[feat,tar]
test.append(row)
#Now that we have created the table TRAIN which contains all the Normalized Features and Target
#To access the certain feature or target, we use the following syntax-
# print train[0][0][0] #TO GET THE FIRST ROW FIRST FEATURE
# print train[0][0][1] #TO GET THE FIRST ROW SECOND FEATURE
# print train[0][1] #TO GET THE TARGET FOR FIRST ROW
#Function to predict value based on input features
def model(b0, bl,t):
terms=[]
for i in range(0,tc):
s =bl[i] * t[i]
terms.append(s)
return b0 + sum(terms)
#function to calculate RMSE
def RMSE(trainY,predicted):
elist=[]
# print elist
for k in range(0, len(predicted)):
# print trainY[k]
errorsq= (trainY[k]-predicted[k])**2
elist.append(errorsq)
# print el
return math.sqrt(sum(elist)/float(len(predicted)))
#different learning rates
learning= [1,0.1,0.01,0.001,0.0001,0.00001]
print 'Performing gradient descent based on the learning rates', learning,'.....'
#Loops for calculating predicted values
#for each learning rate
#and 10 epochs for each learning rate
print 'Now let\'s compute RMS Errors'
for l in range(0,len(learning)):
learning_rate=learning[l]
b0 = 0.0
b = [0 for x in range(tc)]
predictions = []
RMSEVals = []
errorlist = []
epoch = 0
while epoch<10:
predictions = []
for i in range (0, len(train)):
error= model(b0, b, train[i][0]) - train[i][1]
print error
b0=b0- learning_rate*error
for j in range(0, tc):
b[j] = b[j] - learning_rate * error * train[i][0][j]
epoch+=1
#We can use either Train Set or Test Set to calculate RMSE Errors
#I am going to use the TEST Set to calculate RMSE Error
#Calculate RMSE on the TEST SET, and check the performance of the learning curve
for k in range(0,len(test)):
prediction=model(b0,b,test[k][0])
predictions.append(prediction)
RMSEVals.append(RMSE(testY,predictions))
if learning_rate==1:
print 'DAMNNNNN! For LearningRate=1, RMS Error is extremely large, resulting to Inf and eventually is NAN(Not a number)'
else:
print'RMS Errors FOR LEARNING RATE', learning_rate, 'are',RMSEVals
#plotting for each learning rate
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
plt.plot(x, RMSEVals)
plt.title('Learning Rate' + str(learning_rate))
plt.xlabel('Epoch')
plt.ylabel('RMSE')
plt.show()
'''
#TO CALUCLATE RMSE ON TRAIN DATA SET, WE CAN USE THE FOLLOWING LOOP
#AND REPLACE IT WHERE WE ARE CALUCLATING RMSE IN THE MAIN LOOP
for i in range(0,len(train)):
prediction=model(b0,b,train[i][0])
predictions.append(prediction)
RMSEVals.append(RMSE(trainY,predictions))
print'RMS Errors FOR LEARNING RATE', learning_rate, 'are',RMSEVals
#plotting for each learning rate
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
#plt.plot(x, RMSEVals)
#plt.title('Learning Rate' + str(learning_rate))
#plt.xlabel('Epoch')
#plt.ylabel('RMSE')
#plt.show()
'''
print 'Hope you liked the graphs. Now we can analyze and choose the best learning_rate!!'
print 'WOAHHH it is 3:30 am, let\'s just call it a night!'
print 'Goodnight World!!!!'
|
from django.urls import path
from . import views
app_name = 'hello'
urlpatterns = [
#path('<int:id>/<nickname>/', views.index, name='index'),
path('my_name_is_<nickname>.I_am_<int:age>_years_old.', views.index, name='index'),
]
|
"""
Globals (mainly constants)
"""
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
ALL_LAYERS = ["bg1", "bg2", "cache", "obstacle", "dessinable","ramassable", "actionnable", "personnage", "joueur" ]
assert all(s[-1]!='s' for s in ALL_LAYERS),"layername should not end with an s"
ALL_LAYERS_SET = set(ALL_LAYERS)
NON_BG_LAYERS = [l for l in ALL_LAYERS if l not in ["bg1", "bg2"]]
|
#
# The Python Imaging Library.
# $Id$
#
# macOS icns file decoder, based on icns.py by Bob Ippolito.
#
# history:
# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies.
# 2020-04-04 Allow saving on all operating systems.
#
# Copyright (c) 2004 by Bob Ippolito.
# Copyright (c) 2004 by Secret Labs.
# Copyright (c) 2004 by Fredrik Lundh.
# Copyright (c) 2014 by Alastair Houghton.
# Copyright (c) 2020 by Pan Jing.
#
# See the README file for information on usage and redistribution.
#
import io
import os
import struct
import sys
from PIL import Image, ImageFile, PngImagePlugin, features
enable_jpeg2k = features.check_codec("jpg_2000")
if enable_jpeg2k:
from PIL import Jpeg2KImagePlugin
MAGIC = b"icns"
HEADERSIZE = 8
def nextheader(fobj):
return struct.unpack(">4sI", fobj.read(HEADERSIZE))
def read_32t(fobj, start_length, size):
# The 128x128 icon seems to have an extra header for some reason.
(start, length) = start_length
fobj.seek(start)
sig = fobj.read(4)
if sig != b"\x00\x00\x00\x00":
msg = "Unknown signature, expecting 0x00000000"
raise SyntaxError(msg)
return read_32(fobj, (start + 4, length - 4), size)
def read_32(fobj, start_length, size):
"""
Read a 32bit RGB icon resource. Seems to be either uncompressed or
an RLE packbits-like scheme.
"""
(start, length) = start_length
fobj.seek(start)
pixel_size = (size[0] * size[2], size[1] * size[2])
sizesq = pixel_size[0] * pixel_size[1]
if length == sizesq * 3:
# uncompressed ("RGBRGBGB")
indata = fobj.read(length)
im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1)
else:
# decode image
im = Image.new("RGB", pixel_size, None)
for band_ix in range(3):
data = []
bytesleft = sizesq
while bytesleft > 0:
byte = fobj.read(1)
if not byte:
break
byte = byte[0]
if byte & 0x80:
blocksize = byte - 125
byte = fobj.read(1)
for i in range(blocksize):
data.append(byte)
else:
blocksize = byte + 1
data.append(fobj.read(blocksize))
bytesleft -= blocksize
if bytesleft <= 0:
break
if bytesleft != 0:
msg = f"Error reading channel [{repr(bytesleft)} left]"
raise SyntaxError(msg)
band = Image.frombuffer("L", pixel_size, b"".join(data), "raw", "L", 0, 1)
im.im.putband(band.im, band_ix)
return {"RGB": im}
def read_mk(fobj, start_length, size):
# Alpha masks seem to be uncompressed
start = start_length[0]
fobj.seek(start)
pixel_size = (size[0] * size[2], size[1] * size[2])
sizesq = pixel_size[0] * pixel_size[1]
band = Image.frombuffer("L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1)
return {"A": band}
def read_png_or_jpeg2000(fobj, start_length, size):
(start, length) = start_length
fobj.seek(start)
sig = fobj.read(12)
if sig[:8] == b"\x89PNG\x0d\x0a\x1a\x0a":
fobj.seek(start)
im = PngImagePlugin.PngImageFile(fobj)
Image._decompression_bomb_check(im.size)
return {"RGBA": im}
elif (
sig[:4] == b"\xff\x4f\xff\x51"
or sig[:4] == b"\x0d\x0a\x87\x0a"
or sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a"
):
if not enable_jpeg2k:
msg = (
"Unsupported icon subimage format (rebuild PIL "
"with JPEG 2000 support to fix this)"
)
raise ValueError(msg)
# j2k, jpc or j2c
fobj.seek(start)
jp2kstream = fobj.read(length)
f = io.BytesIO(jp2kstream)
im = Jpeg2KImagePlugin.Jpeg2KImageFile(f)
Image._decompression_bomb_check(im.size)
if im.mode != "RGBA":
im = im.convert("RGBA")
return {"RGBA": im}
else:
msg = "Unsupported icon subimage format"
raise ValueError(msg)
class IcnsFile:
SIZES = {
(512, 512, 2): [(b"ic10", read_png_or_jpeg2000)],
(512, 512, 1): [(b"ic09", read_png_or_jpeg2000)],
(256, 256, 2): [(b"ic14", read_png_or_jpeg2000)],
(256, 256, 1): [(b"ic08", read_png_or_jpeg2000)],
(128, 128, 2): [(b"ic13", read_png_or_jpeg2000)],
(128, 128, 1): [
(b"ic07", read_png_or_jpeg2000),
(b"it32", read_32t),
(b"t8mk", read_mk),
],
(64, 64, 1): [(b"icp6", read_png_or_jpeg2000)],
(32, 32, 2): [(b"ic12", read_png_or_jpeg2000)],
(48, 48, 1): [(b"ih32", read_32), (b"h8mk", read_mk)],
(32, 32, 1): [
(b"icp5", read_png_or_jpeg2000),
(b"il32", read_32),
(b"l8mk", read_mk),
],
(16, 16, 2): [(b"ic11", read_png_or_jpeg2000)],
(16, 16, 1): [
(b"icp4", read_png_or_jpeg2000),
(b"is32", read_32),
(b"s8mk", read_mk),
],
}
def __init__(self, fobj):
"""
fobj is a file-like object as an icns resource
"""
# signature : (start, length)
self.dct = dct = {}
self.fobj = fobj
sig, filesize = nextheader(fobj)
if not _accept(sig):
msg = "not an icns file"
raise SyntaxError(msg)
i = HEADERSIZE
while i < filesize:
sig, blocksize = nextheader(fobj)
if blocksize <= 0:
msg = "invalid block header"
raise SyntaxError(msg)
i += HEADERSIZE
blocksize -= HEADERSIZE
dct[sig] = (i, blocksize)
fobj.seek(blocksize, io.SEEK_CUR)
i += blocksize
def itersizes(self):
sizes = []
for size, fmts in self.SIZES.items():
for fmt, reader in fmts:
if fmt in self.dct:
sizes.append(size)
break
return sizes
def bestsize(self):
sizes = self.itersizes()
if not sizes:
msg = "No 32bit icon resources found"
raise SyntaxError(msg)
return max(sizes)
def dataforsize(self, size):
"""
Get an icon resource as {channel: array}. Note that
the arrays are bottom-up like windows bitmaps and will likely
need to be flipped or transposed in some way.
"""
dct = {}
for code, reader in self.SIZES[size]:
desc = self.dct.get(code)
if desc is not None:
dct.update(reader(self.fobj, desc, size))
return dct
def getimage(self, size=None):
if size is None:
size = self.bestsize()
if len(size) == 2:
size = (size[0], size[1], 1)
channels = self.dataforsize(size)
im = channels.get("RGBA", None)
if im:
return im
im = channels.get("RGB").copy()
try:
im.putalpha(channels["A"])
except KeyError:
pass
return im
##
# Image plugin for Mac OS icons.
class IcnsImageFile(ImageFile.ImageFile):
"""
PIL image support for Mac OS .icns files.
Chooses the best resolution, but will possibly load
a different size image if you mutate the size attribute
before calling 'load'.
The info dictionary has a key 'sizes' that is a list
of sizes that the icns file has.
"""
format = "ICNS"
format_description = "Mac OS icns resource"
def _open(self):
self.icns = IcnsFile(self.fp)
self.mode = "RGBA"
self.info["sizes"] = self.icns.itersizes()
self.best_size = self.icns.bestsize()
self.size = (
self.best_size[0] * self.best_size[2],
self.best_size[1] * self.best_size[2],
)
@property
def size(self):
return self._size
@size.setter
def size(self, value):
info_size = value
if info_size not in self.info["sizes"] and len(info_size) == 2:
info_size = (info_size[0], info_size[1], 1)
if (
info_size not in self.info["sizes"]
and len(info_size) == 3
and info_size[2] == 1
):
simple_sizes = [
(size[0] * size[2], size[1] * size[2]) for size in self.info["sizes"]
]
if value in simple_sizes:
info_size = self.info["sizes"][simple_sizes.index(value)]
if info_size not in self.info["sizes"]:
msg = "This is not one of the allowed sizes of this image"
raise ValueError(msg)
self._size = value
def load(self):
if len(self.size) == 3:
self.best_size = self.size
self.size = (
self.best_size[0] * self.best_size[2],
self.best_size[1] * self.best_size[2],
)
px = Image.Image.load(self)
if self.im is not None and self.im.size == self.size:
# Already loaded
return px
self.load_prepare()
# This is likely NOT the best way to do it, but whatever.
im = self.icns.getimage(self.best_size)
# If this is a PNG or JPEG 2000, it won't be loaded yet
px = im.load()
self.im = im.im
self.mode = im.mode
self.size = im.size
return px
def _save(im, fp, filename):
"""
Saves the image as a series of PNG files,
that are then combined into a .icns file.
"""
if hasattr(fp, "flush"):
fp.flush()
sizes = {
b"ic07": 128,
b"ic08": 256,
b"ic09": 512,
b"ic10": 1024,
b"ic11": 32,
b"ic12": 64,
b"ic13": 256,
b"ic14": 512,
}
provided_images = {im.width: im for im in im.encoderinfo.get("append_images", [])}
size_streams = {}
for size in set(sizes.values()):
image = (
provided_images[size]
if size in provided_images
else im.resize((size, size))
)
temp = io.BytesIO()
image.save(temp, "png")
size_streams[size] = temp.getvalue()
entries = []
for type, size in sizes.items():
stream = size_streams[size]
entries.append(
{"type": type, "size": HEADERSIZE + len(stream), "stream": stream}
)
# Header
fp.write(MAGIC)
file_length = HEADERSIZE # Header
file_length += HEADERSIZE + 8 * len(entries) # TOC
file_length += sum(entry["size"] for entry in entries)
fp.write(struct.pack(">i", file_length))
# TOC
fp.write(b"TOC ")
fp.write(struct.pack(">i", HEADERSIZE + len(entries) * HEADERSIZE))
for entry in entries:
fp.write(entry["type"])
fp.write(struct.pack(">i", entry["size"]))
# Data
for entry in entries:
fp.write(entry["type"])
fp.write(struct.pack(">i", entry["size"]))
fp.write(entry["stream"])
if hasattr(fp, "flush"):
fp.flush()
def _accept(prefix):
return prefix[:4] == MAGIC
Image.register_open(IcnsImageFile.format, IcnsImageFile, _accept)
Image.register_extension(IcnsImageFile.format, ".icns")
Image.register_save(IcnsImageFile.format, _save)
Image.register_mime(IcnsImageFile.format, "image/icns")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Syntax: python3 IcnsImagePlugin.py [file]")
sys.exit()
with open(sys.argv[1], "rb") as fp:
imf = IcnsImageFile(fp)
for size in imf.info["sizes"]:
imf.size = size
imf.save("out-%s-%s-%s.png" % size)
with Image.open(sys.argv[1]) as im:
im.save("out.png")
if sys.platform == "windows":
os.startfile("out.png")
|
import requests as rq
from bs4 import BeautifulSoup
import vpn_connect as vc
import json
import time
import re
def retry(file):
def inner(func):
def wrapper():
ret = True
while ret:
ret = False
try:
data = func()
except Exception as e:
print(e)
change_country()
ret = True
vc.kill()
write_file(file,data)
return data
return wrapper
return inner
def get_url(container):
url_container = container.select_one("a")
return url_container.get('href')
def get_name(container):
url_container = container.select_one("a")
return url_container.text.replace('\n','') if url_container else None
def get_path(url):
path = ""
if url:
path_container = re.search(r'.com\/shop\/(.*)', url)
path = path_container.group(1) if path_container else None
return path
def get_name_url(html, actual):
soup = BeautifulSoup(html,'html.parser')
cat_containers = soup.select("div.left-categories ul#left-navi li")
temp = []
same = False
for cat_container in cat_containers:
category = {}
category['url'] = get_url(cat_container)
category['name'] = get_name(cat_container)
category['path'] = get_path(category['url'])
temp.append(category)
if category['name'] == actual:
scategories = read_file('scategories.json')
scategories.append(category)
write_file('scategories.json',scategories)
temp = []
break
return temp
def write_file(file,data):
with open(file,'w') as f:
json.dump(data, f, indent=4)
def read_file(file):
data = []
with open(file) as f:
data = json.load(f)
return data
@retry('categories.json')
def get_categories():
categories = [] if not read_file('categories.json') else read_file('categories.json')
lefts = [{'name':'','url':"https://www.riteaid.com/shop/"}] if not read_file('lefts.json') else read_file('lefts.json')
while lefts:
left = lefts[-1]
url = left['url']
write_file('lefts.json',lefts)
print(f'Scrapping: {url}')
res = rq.get(url)
if not res.ok: raise Exception("Categories Url Failed")
lefts.pop()
temp = get_name_url(res.text, left['name'])
lefts.extend(temp)
categories.extend(temp)
write_file('categories.json',categories)
return categories
def change_country():
vc.kill()
vc.connect('vpn')
print('Changin Country..')
time.sleep(4)
def get_products(html):
soup = BeautifulSoup(html,'html.parser')
products = soup.select('ul.products-grid li a.product-image')
return products
def copy_scategories():
scategories = read_file('scategories.json')
write_file('leftscategories.json', scategories)
def get_purl(products):
return [product.get('href') for product in products]
def paginate(html):
soup = BeautifulSoup(html,'html.parser')
nextb = soup.select_one('li.next-btn')
return True if nextb else False
@retry('products_url.json')
def get_product_links():
categories = [] if not read_file('leftscategories.json') else read_file('leftscategories.json')
cookies = {'CATEGORY_INFO': '{"is_ajax":"1","limit":"36"}'}
fstring = "?limit=36&p={}"
products = [] if not read_file("products_url.json") else read_file("products_url.json")
while categories:
nextp = True
i = read_file('state.json').get('state')
while nextp:
temp = []
nextp = False
url = categories[-1]['url']
print(f"Scrapping: {url}{fstring.format(i)}")
res = rq.get(f"{url}{fstring.format(i)}", cookies=cookies)
write_file('state.json', {"state": i})
if not res.ok: raise Exception("Categories Url Failed")
products_container = get_products(res.text)
purls = get_purl(products_container)
temp = [{'product_url': purl, 'category': categories[-1]['path'], "category_url": f"{url}{fstring.format(i)}"} for purl in purls]
products.extend(temp)
write_file('products_url.json', products)
if paginate(res.text): nextp = True
i = i + 1
categories.pop()
write_file('leftscategories.json',categories)
write_file('state.json', {"state": 1})
return products
def product_soup(html):
product = {}
soup = BeautifulSoup(html, 'html.parser')
hec = soup.select_one('th:contains("Height")+td') #height value
wic = soup.select_one('th:contains("Width")+td') #width value
dec = soup.select_one('th:contains("Depth")+td') #depth value
wec = soup.select_one('th:contains("Product Weight")+td') #weight value
pahec = soup.select_one('th:contains("PackageHeight")+td') #PackageHeight value
pawic = soup.select_one('th:contains("PackageWidth")+td') #PackageWidth value
padec = soup.select_one('th:contains("PackageDepth")+td') #PackageDepth value
pric = soup.select_one('th:contains("Unit Price")+td') #price value
price = pric.text if pric else None
height = hec.text if hec else None
width = wic.text if wic else None
depth = dec.text if dec else None
weight = wec.text if wec else None
package_height = pahec.text if pahec else None
package_width = pawic.text if pawic else None
package_depth = padec.text if padec else None
product['weight'] = weight
product['volume'] = f"{width}x{depth}x{height}"
product['package_volume'] = f"{package_width}x{package_depth}x{package_height}"
skuc = soup.select_one('meta[itemprop="sku"]') #sku content
pricec = soup.select_one('span.price[itemprop="price"]') #price value
imagesc = soup.select('div.images img') #images src
namec = soup.select_one('div.product-name span[itemprop="name"]') #name value
product['sku'] = skuc.get('content')
product['price'] = pricec.text if pricec else price
product['name'] = namec.text if namec else None
product['images'] = [imagec.get('src') for imagec in imagesc]
product['UPC'] = product['images'][0].split('/')[-1].split('.')[0] if product['images'] else None
return product
@retry('products.json')
def product_scrapper():
products = [] if not read_file('leftproducts.json') else read_file('leftproducts.json')
fproducts = [] if not read_file('products.json') else read_file('products.json')
while products:
product = products[-1]
url = product.get('product_url')
res = rq.get(url)
print(f"Scrapping: {url}")
if not res.ok and res.status_code != 404: raise Exception("Products Url Failed")
products.pop()
write_file('leftproducts.json', products)
if res.status_code != 404:
match = re.findall(r'([^\/]*)', product.get('category'))
match = [mat for mat in match if mat != '']
_type = match[0] if match else None
subtype = match[-1] if match else None
mtype = product.get('category')
temp = product_soup(res.text)
temp['type'] = _type
temp['subtype'] = subtype
temp['mtype'] = mtype
temp['url'] = url
fproducts.append(temp)
write_file('products.json', fproducts)
print('Saved!')
else: print("The product doesn't exist!")
return products
|
suites_menu = ('link_text', 'Suites', 'Suites menu')
tests_menu = ('link_text', 'Tests', 'Tests menu')
pages_menu = ('link_text', 'Pages', 'Pages menu')
reports_menu = ('link_text', 'Reports', 'Reports menu')
project_settings_menu = ('link_text', 'Settings', 'Settings menu')
environments_menu = ('link_text', 'Environments', 'Environments menu')
global_settings_menu = ('link_text', 'Global Settings', 'Global Settings menu')
users_menu = ('link_text', 'Users', 'Users menu')
user_menu = ('id', 'userMenu', 'User menu')
profile_menu = ('link_text', 'Profile', 'User Profile menu')
logout_menu = ('link_text', 'Log out', 'Logout menu')
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import (
detail,
edit,
list as _list,
)
from django.urls import reverse_lazy
from ..models import (
Contact,
ContactEmail
)
__all__ = (
'ContactEmailView',
'ContactEmailListView',
'ContactEmailCreateView',
'ContactEmailUpdateView',
)
class OwnMailsMixin:
def get_queryset(self):
contact_emails_accounts = (
self.model.objects
.filter(
contact_id=self.kwargs.get('contact_pk')
)
)
return contact_emails_accounts
class ContactEmailListView(LoginRequiredMixin, OwnMailsMixin, _list.ListView):
model = ContactEmail
login_url = reverse_lazy('auth_login')
redirect_field_name = 'redirect_to'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
contact_id = self.kwargs.get('contact_pk')
contact = Contact.objects.get(id=contact_id)
context.update(contact=contact)
return context
class ContactEmailView(LoginRequiredMixin, OwnMailsMixin, detail.DetailView):
model = ContactEmail
login_url = reverse_lazy('auth_login')
redirect_field_name = 'redirect_to'
class ContactEmailCreateView(LoginRequiredMixin, edit.CreateView):
model = ContactEmail
fields = ['email']
success_url = reverse_lazy('contacts:contact-list')
login_url = reverse_lazy('auth_login')
redirect_field_name = 'redirect_to'
def form_valid(self, form):
contact_pk = self.kwargs.get('contact_pk')
current_contact = Contact.objects.get(pk=contact_pk)
form.instance.contact = current_contact
return super().form_valid(form)
class ContactEmailUpdateView(LoginRequiredMixin, edit.UpdateView):
model = ContactEmail
fields = ['email']
success_url = reverse_lazy('contacts:contact-list')
login_url = reverse_lazy('auth_login')
redirect_field_name = 'redirect_to'
|
import numpy as np
import cv2
from remove import median_filter
cap = cv2.VideoCapture('./green.mov')
allFrames = []
frames = np.arange(0, 10700, 15)
ret = True
WINDOW_SIZE = 40
while(cap.isOpened() and ret):
ret, frame = cap.read()
if ret:
allFrames.append(np.array(frame))
allFrames = np.array(allFrames)
framesWeWant = np.array([allFrames[i] for i in frames])
height, width, layers = allFrames[0].shape
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video = cv2.VideoWriter('green_video.avi',fourcc,8,(width,height))
for i in range(len(framesWeWant)):
if (i < WINDOW_SIZE):
video.write(np.uint8(median_filter(framesWeWant[i: i + WINDOW_SIZE+1: 10,:,:,:])))
elif (i >= len(framesWeWant) - WINDOW_SIZE):
video.write(np.uint8(median_filter(framesWeWant[i-WINDOW_SIZE: i+1: 10,:,:,:])))
else:
video.write(np.uint8(median_filter(framesWeWant[i-WINDOW_SIZE: i+WINDOW_SIZE + 1: 10,:,:,:])))
cap.release()
video.release()
cv2.destroyAllWindows()
|
#coding=UTF-8
'''
Created on Jan 7, 2011
@author: elvin
'''
import settings
import urllib
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp import util
from google.appengine.ext import db
from google.appengine.ext import webapp
from core import TemplateAPI, User, UserAccess, UserPermissionAPI, Block, Blocks, SiteError
class Music(db.Model):
music = blobstore.BlobReferenceProperty()
title = db.StringProperty(multiline=False, required=True)
content = db.TextProperty(required=True)
published = db.BooleanProperty(default=True, required=True)
owner = db.ReferenceProperty(User, collection_name='music_owners', required=True)
created = db.DateTimeProperty(auto_now_add=True, required=True)
updated = db.DateTimeProperty(auto_now=True, required=True)
class MusicAdd(TemplateAPI):
def get(self):
self.access_init()
if self.access.check('music_add'):
data = {'title': settings.music_add_title,
'content_editor': self.ckeditor.editor('music_content'),
'submit_url': blobstore.create_upload_url(self.get_redirect(settings.music_save_url)),
'cancel_url': self.get_redirect(settings.music_add_redirect_url)}
self.render_html('music', 'music_add_form', data)
else:
self.render_error(403)
class MusicSave(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
access = UserAccess(self.request.cookies)
if access.check('music_add'):
musics = self.get_uploads('music')
blob_info = musics[0]
if blob_info.content_type in settings.music_allowed_types:
incorrect_type = False
elif 'all' in settings.music_allowed_types:
incorrect_type = False
else:
incorrect_type = True
if blob_info.size > settings.music_max_upload_size:
blobstore.delete(blob_info.key())
if self.request.get('frame'):
self.redirect('%s?frame=%s' % (settings.music_error_url_string % settings.music_error_oversize_type, self.request.get('frame')))
else:
self.redirect(settings.music_error_url_string % settings.music_error_oversize_type)
elif incorrect_type:
blobstore.delete(blob_info.key())
if self.request.get('frame'):
self.redirect('%s?frame=%s' % (settings.music_error_url_string % settings.music_error_incorrect_type_type, self.request.get('frame')))
else:
self.redirect(settings.music_error_url_string % settings.music_error_incorrect_type_type)
else:
music_reference = blob_info.key()
title = unicode(self.request.get('title'))
content = db.Text(unicode(self.request.get('music_content')))
published = bool(self.request.get('published'))
owner = access.current_user
music = Music(music=music_reference, title=title, content=content, published=published, owner=owner)
music.put()
if self.request.get('frame'):
self.redirect('%s?frame=%s' % (settings.music_save_redirect_url, self.request.get('frame')))
else:
self.redirect(settings.music_save_redirect_url)
else:
if self.request.query_string:
self.redirect('%s?frame=%s' % (settings.music_error_url_string % settings.music_error_forbidden_type, self.request.get('frame')))
else:
self.redirect(settings.music_error_url_string % settings.music_error_forbidden_type)
class MusicEdit(TemplateAPI):
def get(self, id):
self.access_init()
if self.access.check('music_edit'):
try:
music = Music.get_by_id(int(id))
self.manipulator.formater(music, [{'name': 'title', 'type': 'unicode', 'trim': settings.music_title_trimmed},
{'name': 'content', 'type': 'html', 'escape': True}])
data = {'title': settings.music_edit_title,
'music': music,
'content_editor': self.ckeditor.editor('music_content', music.content_formated),
'src': self.get_redirect(settings.music_output_url_string % (str(music.music.key()), str(music.music.filename))),
'submit_url': self.get_redirect(settings.music_edit_url_string % str(music.key().id())),
'cancel_url': self.get_redirect(settings.music_edit_redirect_url),}
self.render_html('music', 'music_edit_form', data)
except:
self.render_error(404)
else:
self.render_error(403)
def post(self, id):
self.access_init()
if self.access.check('music_edit'):
try:
music = Music.get_by_id(int(id))
music.title = unicode(self.request.get('title'))
music.content = db.Text(unicode(self.request.get('music_content')))
music.published = bool(self.request.get('published'))
music.put()
self.redirect_to(settings.music_edit_redirect_url)
except:
self.render_error(404)
else:
self.render_error(403)
class MusicDelete(TemplateAPI):
def get(self, id):
self.access_init()
if self.access.check('music_delete'):
try:
music = Music.get_by_id(int(id))
self.manipulator.formater(music, [{'name': 'title', 'type': 'unicode', 'trim': settings.music_title_trimmed},
{'name': 'content', 'type': 'html', 'escape': False}])
data = {'title': settings.music_delete_title,
'music': music,
'delete_question': settings.music_delete_form_label_question_string % music.title_formated,
'src': self.get_redirect(settings.music_output_url_string % (str(music.music.key()), str(music.music.filename))),
'submit_url': self.get_redirect(settings.music_delete_url_string % str(music.key().id())),
'cancel_url': self.get_redirect(settings.music_delete_redirect_url)}
self.render_html('music', 'music_delete_form', data)
except:
self.render_error(404)
else:
self.render_error(403)
def post(self, id):
self.access_init()
if self.access.check('music_delete'):
try:
music = Music.get_by_id(int(id))
blobstore.delete(music.music.key())
music.delete()
self.redirect_to(settings.music_delete_redirect_url)
except:
self.render_error(404)
else:
self.render_error(403)
class MusicManagement(TemplateAPI):
def get(self):
self.access_init()
if self.access.check('music_edit') or self.access.check('music_delete'):
query = Music.all()
query.order('title')
self.pager(query.count(), settings.musics_per_page, settings.music_management_url)
musics = query.fetch(settings.musics_per_page, self.pager_offset())
musics_extended = []
for music in musics:
self.manipulator.formater(music, [{'name': 'title', 'type': 'unicode', 'trim': settings.music_title_trimmed},
{'name': 'content', 'type': 'html', 'escape': False}])
music.src = self.get_redirect(settings.music_output_url_string % (str(music.music.key()), str(music.music.filename)))
music.view_url = self.get_redirect(settings.music_view_url_string % str(music.key().id()))
music.edit_url = self.get_redirect(settings.music_edit_url_string % str(music.key().id()))
music.delete_url = self.get_redirect(settings.music_delete_url_string % str(music.key().id()))
musics_extended.append(music)
data = {'title': settings.music_management_title,
'musics': musics_extended,
'pager': self.render_pager(),
'add_url': self.get_redirect(settings.music_add_url),
'permissions_url': self.get_redirect(settings.music_permissions_url),
'music_add': self.access.check('music_add'),
'music_edit': self.access.check('music_edit'),
'music_delete': self.access.check('music_delete'),
'music_permissions': self.access.admin_user}
self.render_html('music', 'music_management_form', data)
else:
self.render_error(403)
class MusicPermissions(UserPermissionAPI):
def get(self):
self.get_form(permissions=['music_view', 'music_add', 'music_edit', 'music_delete'], url=settings.music_permissions_url, title=settings.music_permissions_title)
def post(self):
values = self.request.get_all('user-permissions')
self.save_permissions(permissions=['music_view', 'music_add', 'music_edit', 'music_delete'], values=values, url=settings.music_permissions_url)
class MusicView(TemplateAPI):
def get(self, id):
self.access_init()
if self.access.check('music_view') or self.access.check('music_edit') or self.access.check('music_delete'):
try:
music = Music.get_by_id(int(id))
if music.published or self.access.check('music_edit') or self.access.check('music_delete'):
self.manipulator.formater(music, [{'name': 'title', 'type': 'unicode', 'trim': settings.music_title_trimmed},
{'name': 'content', 'type': 'html', 'escape': False}])
src = self.get_redirect(settings.music_output_url_string % (str(music.music.key()), str(music.music.filename)))
data = {'title': music.title_formated,
'music': music,
'src': src,
'like_url': self.get_redirect(settings.music_view_url_string % str(music.key().id()))}
self.render_html('music', 'music_view', data)
else:
self.render_error(404)
except:
self.render_error(404)
else:
self.render_error(403)
class MusicAPI():
def __init__(self, templateapi):
self.templateapi = templateapi
def get_music(self, key, published_filter = True):
if key:
try:
music = Music.get(key)
if (published_filter and music.published) or not published_filter:
self.templateapi.manipulator.formater(music, [{'name': 'title', 'type': 'unicode', 'trim': settings.music_title_trimmed},
{'name': 'content', 'type': 'html', 'escape': False}])
music.src = self.templateapi.get_redirect(settings.music_output_url_string % (str(music.music.key()), str(music.music.filename)))
music.view_url = self.templateapi.get_redirect(settings.music_view_url_string % str(music.key().id()))
music.like_url = self.templateapi.get_redirect(settings.music_view_url_string % str(music.key().id()))
return music
else:
return None
except:
return None
return None
def get_musics(self, keys, published_filter = True, limit = settings.musics_per_page, offset = 0):
if keys:
try:
musics = Music.get(keys[offset:limit + offset])
musics_extended = []
for music in musics:
if (published_filter and music.published) or not published_filter:
self.templateapi.manipulator.formater(music, [{'name': 'title', 'type': 'unicode', 'trim': settings.music_title_trimmed},
{'name': 'content', 'type': 'html', 'escape': False}])
music.src = self.templateapi.get_redirect(settings.music_output_url_string % (str(music.music.key()), str(music.music.filename)))
music.view_url = self.templateapi.get_redirect(settings.music_view_url_string % str(music.key().id()))
music.like_url = self.templateapi.get_redirect(settings.music_view_url_string % str(music.key().id()))
musics_extended.append(music)
return musics_extended
except:
return None
return None
class MusicOutput(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, musicname):
try:
key = str(urllib.unquote(key))
blob_info = blobstore.BlobInfo.get(key)
self.send_blob(blob_info)
except:
self.redirect(settings.site_error_url_string % '404')
class MusicError(TemplateAPI):
def get(self, type):
if type == settings.music_error_oversize_type:
title = settings.music_error_oversize_title
error_message = settings.music_error_oversize_message_string % settings.music_max_upload_size
self.render_error(403, title, error_message)
elif type == settings.music_error_incorrect_type_type:
title = settings.music_error_incorrect_type_title
error_message = settings.music_error_incorrect_type_message_string % ', '.join(settings.music_allowed_types)
self.render_error(403, title, error_message)
elif type == settings.music_error_forbidden_type:
self.render_error(403)
else:
self.render_error(404)
app = webapp.WSGIApplication(
[(settings.music_error_url, MusicError),
(settings.music_permissions_url, MusicPermissions),
(settings.music_management_url, MusicManagement),
(settings.music_add_url, MusicAdd),
(settings.music_save_url, MusicSave),
(settings.music_edit_url, MusicEdit),
(settings.music_delete_url, MusicDelete),
(settings.music_view_url, MusicView),
(settings.music_output_url, MusicOutput),],
debug=False)
'''def main():
application = webapp.WSGIApplication(
[(settings.music_error_url, MusicError),
(settings.music_permissions_url, MusicPermissions),
(settings.music_management_url, MusicManagement),
(settings.music_add_url, MusicAdd),
(settings.music_save_url, MusicSave),
(settings.music_edit_url, MusicEdit),
(settings.music_delete_url, MusicDelete),
(settings.music_view_url, MusicView),
(settings.music_output_url, MusicOutput),],
debug=False)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()''' |
from django.conf import settings
from django.contrib.postgres.fields import HStoreField
from django.db import models
from s3direct.fields import S3DirectField
class Gallery(models.Model):
title = models.CharField(max_length=55)
description = models.TextField(blank=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, default=1)
def __unicode__(self):
return u'{}'.format(self.title)
class Photo(models.Model):
title = models.CharField(max_length=55)
description = models.TextField(blank=True)
parent = models.ForeignKey(Gallery, related_name='photos')
image = S3DirectField(dest='imgs', default='')
metadata = HStoreField(null=True, blank=True)
|
months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def days(y, m):
if m != 1:
return months[m]
if y % 4 != 0:
return 28
if y % 100 != 0:
return 29
if y % 400 != 0:
return 28
return 29
sun = 0
day = 0
for year in range(1900, 2001):
for month in range(0, 12):
if day == 6 and year >= 1901:
sun += 1
day = (day + days(year, month)) % 7
print(sun)
|
import FWCore.ParameterSet.Config as cms
import copy
process = cms.Process('runZtoMuTau')
# import of standard configurations for RECOnstruction
# of electrons, muons and tau-jets with non-standard isolation cones
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
#process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.MessageLogger.cerr.FwkReport.reportEvery = 1
#process.MessageLogger.cerr.threshold = cms.untracked.string('INFO')
#process.MessageLogger.suppressInfo = cms.untracked.vstring()
process.MessageLogger.suppressWarning = cms.untracked.vstring("PATTriggerProducer",)
process.load('Configuration/StandardSequences/GeometryIdeal_cff')
process.load('Configuration/StandardSequences/MagneticField_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = cms.string('START42_V13::All')
# import particle data table
# needed for print-out of generator level information
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
#--------------------------------------------------------------------------------
# import sequences for PAT-tuple production
process.load("TauAnalysis.Configuration.producePatTuple_cff")
process.load("TauAnalysis.Configuration.producePatTupleZtoMuTauSpecific_cff")
# import sequence for event selection
process.load("TauAnalysis.Configuration.selectZtoMuTau_cff")
process.load("TauAnalysis.RecoTools.filterDataQuality_cfi")
# import sequence for filling of histograms, cut-flow table
# and of run + event number pairs for events passing event selection
process.load("TauAnalysis.Configuration.analyzeZtoMuTau_cff")
# import configuration parameters for submission of jobs to CERN batch system
# (running over skimmed samples stored on CASTOR)
from TauAnalysis.Configuration.recoSampleDefinitionsZtoMuTau_7TeV_cfi import *
from TauAnalysis.Configuration.recoSampleDefinitionsZtoMuTau_10TeV_cfi import *
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# print memory consumed by cmsRun
# (for debugging memory leaks)
#process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck",
# ignoreTotal = cms.untracked.int32(1) # default is one
#)
#
#process.printGenParticleList = cms.EDAnalyzer("ParticleListDrawer",
# src = cms.InputTag("genParticles"),
# maxEventsToPrint = cms.untracked.int32(100)
#)
#
# print debug information whenever plugins get loaded dynamically from libraries
# (for debugging problems with plugin related dynamic library loading)
#process.add_(cms.Service("PrintLoadingPlugins"))
#--------------------------------------------------------------------------------
process.DQMStore = cms.Service("DQMStore")
process.saveZtoMuTauPlots = cms.EDAnalyzer("DQMSimpleFileSaver",
outputFileName = cms.string('plotsZtoMuTau.root')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
##'file:/data1/veelken/CMSSW_4_2_x/skims/AHtoMuTau_negSVfitMass_lorenzo.root'
'file:/data1/veelken/CMSSW_4_2_x/skims/skimGenZtoMuTauWithinAcc_Ztautau_2011Jun30v2_AOD.root'
)
#skipBadFiles = cms.untracked.bool(True)
)
##from PhysicsTools.PatAlgos.tools.cmsswVersionTools import pickRelValInputFiles
##process.source = cms.Source("PoolSource",
## fileNames = cms.untracked.vstring(
## pickRelValInputFiles( cmsswVersion = 'CMSSW_4_2_0_pre8'
## , relVal = 'RelValTTbar'
## , globalTag = 'START42_V7'
## , numberOfFiles = 1
## )
## )
##)
#--------------------------------------------------------------------------------
# import utility function for configuring PAT trigger matching
from PhysicsTools.PatAlgos.tools.trigTools import switchOnTrigger
switchOnTrigger(process, hltProcess = 'HLT', outputModule = '')
process.patTrigger.addL1Algos = cms.bool(False)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for switching pat::Tau input
# to different reco::Tau collection stored on AOD
from PhysicsTools.PatAlgos.tools.tauTools import *
# comment-out to take reco::CaloTaus instead of reco::PFTaus
# as input for pat::Tau production
#switchToCaloTau(process)
# comment-out to take shrinking dR = 5.0/Et(PFTau) signal cone
# instead of fixed dR = 0.07 signal cone reco::PFTaus
# as input for pat::Tau production
#switchToPFTauShrinkingCone(process)
#switchToPFTauFixedCone(process)
# comment-out to take new HPS + TaNC combined tau id. algorithm
#switchToPFTauHPSpTaNC(process)
# disable preselection on of pat::Taus
# (disabled also in TauAnalysis/RecoTools/python/patPFTauConfig_cfi.py ,
# but re-enabled after switching tau collection)
process.cleanPatTaus.preselection = cms.string('')
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for managing pat::Jets
from PhysicsTools.PatAlgos.tools.jetTools import *
# uncomment to replace caloJets by pfJets
switchJetCollection(process, jetCollection = cms.InputTag("ak5PFJets"), outputModule = '')
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for managing pat::METs
from TauAnalysis.Configuration.tools.metTools import *
# uncomment to add pfMET
# set Boolean swich to true in order to apply type-1 corrections
addPFMet(process, correct = False)
# uncomment to replace caloMET by pfMET in all di-tau objects
process.load("TauAnalysis.CandidateTools.diTauPairProductionAllKinds_cff")
replaceMETforDiTaus(process, cms.InputTag('patMETs'), cms.InputTag('patPFMETs'))
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for changing cut values
from TauAnalysis.Configuration.tools.changeCut import changeCut
# change upper limit on tranverse impact parameter of muon track to 2mm
changeCut(process, "selectedPatMuonsTrkIP", 0.2, attribute = "IpMax")
# switch between TaNC and HPS tau id. discriminators
#changeCut(process, "selectedPatTausLeadTrkPt", "tauID('leadingTrackPtCut') > 0.5")
#changeCut(process, "selectedPatTausForMuTauLeadTrkPt", "tauID('leadingTrackPtCut') > 0.5")
#changeCut(process, "selectedPatTausTaNCdiscr", "tauID('byTaNCloose') > 0.5")
#changeCut(process, "selectedPatTausForMuTauTaNCdiscr", "tauID('byTaNCloose') > 0.5")
changeCut(process, "selectedPatTausLeadTrkPt", "tauID('decayModeFinding') > 0.5")
changeCut(process, "selectedPatTausForMuTauLeadTrkPt", "tauID('decayModeFinding') > 0.5")
changeCut(process, "selectedPatTausTaNCdiscr", "tauID('byLooseCombinedIsolationDeltaBetaCorr') > 0.5")
changeCut(process, "selectedPatTausForMuTauTaNCdiscr", "tauID('byLooseCombinedIsolationDeltaBetaCorr') > 0.5")
# disable calorimeter muon veto for now...
changeCut(process, "selectedPatTausForMuTauCaloMuonVeto", "tauID('againstMuonTight') > -1.")
# change lower limit on separation required between muon and tau-jet to dR > 0.5
changeCut(process, "selectedMuTauPairsAntiOverlapVeto", "dR12 > 0.5")
changeCut(process, "selectedMuTauPairsAntiOverlapVetoLooseMuonIsolation", "dR12 > 0.5")
# change upper limit on muon + MET transverse mass to 50 GeV
#changeCut(process, "selectedMuTauPairsMt1MET", "mt1MET < 50.")
#changeCut(process, "selectedMuTauPairsMt1METlooseMuonIsolation", "mt1MET < 50.")
changeCut(process, "selectedMuTauPairsMt1MET", "mt1MET < 40.")
changeCut(process, "selectedMuTauPairsMt1METlooseMuonIsolation", "mt1MET < 40.")
# enable cut on Pzeta variable
#changeCut(process, "selectedMuTauPairsPzetaDiff", "(pZeta - 1.5*pZetaVis) > -20.")
#changeCut(process, "selectedMuTauPairsPzetaDiffLooseMuonIsolation", "(pZeta - 1.5*pZetaVis) > -20.")
changeCut(process, "selectedMuTauPairsPzetaDiff", "(pZeta - 1.5*pZetaVis) > -1000.")
changeCut(process, "selectedMuTauPairsPzetaDiffLooseMuonIsolation", "(pZeta - 1.5*pZetaVis) > -1000.")
# change isolation treshold for second muon used in di-muon veto to 0.30 * muon Pt
changeCut(process, "selectedPatMuonsForZmumuHypothesesLoosePFRelIso", 0.30, "sumPtMax")
#--------------------------------------------------------------------------------
# before starting to process 1st event, print event content
process.printEventContent = cms.EDAnalyzer("EventContentAnalyzer")
process.filterFirstEvent = cms.EDFilter("EventCountFilter",
numEvents = cms.int32(1)
)
process.o = cms.Path(process.filterFirstEvent + process.printEventContent)
process.p = cms.Path(
process.producePatTupleZtoMuTauSpecific
# + process.printGenParticleList # uncomment to enable print-out of generator level particles
# + process.printEventContent # uncomment to enable dump of event content after PAT-tuple production
+ process.selectZtoMuTauEvents
+ process.analyzeZtoMuTauSequence
+ process.saveZtoMuTauPlots
)
process.q = cms.Path(process.dataQualityFilters)
process.schedule = cms.Schedule(process.o, process.q, process.p)
#--------------------------------------------------------------------------------
# import utility function for switching HLT InputTags when processing
# RECO/AOD files produced by MCEmbeddingTool
from TauAnalysis.MCEmbeddingTools.tools.switchInputTags import switchInputTags
#
# comment-out to switch HLT InputTags
#switchInputTags(process)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for factorization
from TauAnalysis.Configuration.tools.factorizationTools import enableFactorization_runZtoMuTau
#
# define "hook" for enabling/disabling factorization
# in case running jobs on the CERN batch system
# (needs to be done after process.p has been defined)
#__#factorization#
##enableFactorization_runZtoMuTau(process)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for applyting Z-recoil corrections to MET
from TauAnalysis.Configuration.tools.mcToDataCorrectionTools import applyZrecoilCorrection_runZtoMuTau
##applyZrecoilCorrection_runZtoMuTau(process)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# import utility function for disabling estimation of systematic uncertainties
from TauAnalysis.Configuration.tools.sysUncertaintyTools import enableSysUncertainties_runZtoMuTau
#
# define "hook" for keeping enabled/disabling estimation of systematic uncertainties
# in case running jobs on the CERN batch system
# (needs to be done after process.p has been defined)
#__#systematics#
##enableSysUncertainties_runZtoMuTau(process)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# disable event-dump output
# in order to reduce size of log-files
if hasattr(process, "disableEventDump"):
process.analyzeZtoMuTauEvents.eventDumps = cms.VPSet()
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# disable accessing generator level information
# if running on data
from TauAnalysis.Configuration.tools.switchToData import switchToData
switchToData(process)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#
process.producePatTupleAll = cms.Sequence(process.producePatTuple + process.producePatTupleZtoMuTauSpecific)
#
# define "hook" for enabling/disabling production of PAT-tuple event content,
# depending on whether RECO/AOD or PAT-tuples are used as input for analysis
#
#__#patTupleProduction#
if not hasattr(process, "isBatchMode"):
process.p.replace(process.producePatTupleZtoMuTauSpecific, process.producePatTuple + process.producePatTupleZtoMuTauSpecific)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# CV: provisional switch to "old" HPS tau reconstruction (01/26/2011)
# as it is included in CMSSW_3_8_x release series
#
# --> remove hpsPFTauProducer from reco::PFTau sequence
# --> switch pat::Tau input to HPS tau collection
#
print("CV: switching to **old** HPS !!")
#while process.p.remove(process.PFTau): pass
switchToPFTauHPS(process)
process.cleanPatTaus.preselection = cms.string('')
changeCut(process, "selectedPatTausLeadTrk", "tauID('decayModeFinding') > 0.5")
changeCut(process, "selectedPatTausForMuTauLeadTrk", "tauID('decayModeFinding') > 0.5")
changeCut(process, "selectedPatTausLeadTrkPt", "tauID('decayModeFinding') > 0.5")
changeCut(process, "selectedPatTausForMuTauLeadTrkPt", "tauID('decayModeFinding') > 0.5")
changeCut(process, "selectedPatTausTaNCdiscr", "tauID('byLooseCombinedIsolationDeltaBetaCorr') > 0.5")
changeCut(process, "selectedPatTausForMuTauTaNCdiscr", "tauID('byLooseCombinedIsolationDeltaBetaCorr') > 0.5")
changeCut(process, "selectedPatTausForMuTauCaloMuonVeto", "tauID('againstMuonTight') > -1.")
process.ewkTauId.PFTauProducer = cms.InputTag("hpsPFTauProducer")
process.ewkTauId.Prediscriminants.leadTrack.Producer = cms.InputTag('hpsPFTauDiscriminationByDecayModeFinding')
process.ewkTauId.Prediscriminants.leadTrackPt.Producer = cms.InputTag('hpsPFTauDiscriminationByDecayModeFinding')
process.ewkTauId.Prediscriminants.TaNCloose.Producer = cms.InputTag('hpsPFTauDiscriminationByLooseIsolation')
process.ewkTauId.Prediscriminants.againstMuon.Producer = cms.InputTag('hpsPFTauDiscriminationByTightMuonRejection')
process.ewkTauId.Prediscriminants.againstElectron.Producer = cms.InputTag('hpsPFTauDiscriminationByLooseElectronRejection')
# disable muon momentum scale corrections
process.patMuonsMuScleFitCorrectedMomentum.doApplyCorrection = cms.bool(False)
# restrict input collections to AOD event content
from TauAnalysis.Configuration.tools.switchToAOD import switchToAOD
switchToAOD(process)
#--------------------------------------------------------------------------------
process.load("TauAnalysis/RecoTools/vertexMultiplicityVsRhoPFNeutralReweight_cfi")
process.producePatTupleAll += process.produceVertexMultiplicityVsRhoPFNeutralReweights
processDumpFile = open('runZtoMuTau.dump', 'w')
print >> processDumpFile, process.dumpPython()
|
"""유저의 회원가입, 로그인, 로그아웃, 내 정보 확인, 내 정보 수정 뷰들이 담겨있습니다."""
import json
import bcrypt
import jwt
from django.shortcuts import render
from django.views import View
from django.http import HttpResponse, JsonResponse
from django.db import IntegrityError
from eatexpress.settings import SECRET_KEY, HASH
from user.models import User
from eatexpress.decorator import member_verification
class CreateAccount(View):
def post(self, request):
"""
회원가입을 진행합니다. 이메일과, 닉네임의 중복을 막습니다.
"""
data = json.loads(request.body)
try:
if User.objects.filter(email=data['email']).exists():
return JsonResponse({"message": "이미 존재하는 이메일입니다"}, status=400)
if User.objects.filter(nickname=data['nickname']).exists():
return JsonResponse({"message": "이미 존재하는 닉네임입니다"}, status=400)
password_crypt = bcrypt.hashpw(
data['password'].encode('utf-8'), bcrypt.gensalt())
password_crypt = password_crypt.decode('utf-8')
new_user = User(
# abstractuser 기본제공하는 username
username=data['username'],
nickname=data['nickname'],
email=data['email'],
password=password_crypt,
phone_number=data['phone_number'],
address=data['address'],
gender=['gender']
)
new_user.save()
return JsonResponse({'message': '가입을 축하합니다'}, status=200)
except IntegrityError:
return JsonResponse({"error": "이미 존재하는 회원정보"}, status=400)
except KeyError:
return JsonResponse({'error': '올바르지 않은 키 값'}, status=400)
class LogIn(View):
def post(self, request):
"""
로그인을 진행합니다.
email=data['email'] : 입력받은 이메일
checkpw() : 함수를 이용해 db의 비밀번호와 입력받은 비밀번호의 일치성 여부를 확인합니다.
그리고 프론트에게 토큰을 전달하면 로그인이 끝납니다.
"""
data = json.loads(request.body)
try:
if User.objects.filter(email=data['email']).exists():
user = User.objects.get(email=data['email'])
if bcrypt.checkpw(data['password'].encode('utf-8'), user.password.encode('utf-8')):
token = jwt.encode(
{'user_id': user.id}, SECRET_KEY, algorithm=HASH)
return JsonResponse({"token": token}, status=200)
else:
return JsonResponse({"message": "틀린 비밀번호"}, status=401)
return JsonResponse({"message": "존재하지 않는 이메일아이디"}, status=401)
except KeyError:
return JsonResponse({"message": "INVALID_KEYS"}, status=401)
class LogOut(View):
@member_verification
def post(self, request):
pass
class Myinfo(View):
@member_verification
def post(self, request):
"""
개인정보를 수정합니다.
아이디(이메일)은 보통 수정못하게 하였습니다.
username, nickname, phone_number 은 중복이 불가하게하여 타인의 정보를 접근못하게 하였습니다.
"""
user_id = request.userid
try:
data = json.loads(request.body)
username = data['username']
nickname = data['nickname']
# password=password_crypt,
phone_number = data['phone_number']
address = data['address']
if User.objects.filter(id=user_id).exists():
User.objects.filter(id=user_id).update(
username=username, nickname=nickname, phone_number=phone_number, address=address)
return JsonResponse({"message": "유저정보가 변경되었습니다"}, status=200)
except KeyError:
return JsonResponse({'error': '올바르지 않은 키 값'}, status=400)
except IntegrityError:
return JsonResponse({'error': '이미 존재하는 값'}, status=400)
@member_verification
def get(self, request):
"""
개인정보를 확인합니다.
"""
user_id = request.userid
info = User.objects.get(id=user_id)
my_info = []
j = {
'username': info.username,
'nickname': info.nickname,
'email': info.email,
'phone_number': info.phone_number,
'address': info.address,
'gender': info.gender
}
my_info.append(j)
return JsonResponse({'info': my_info}, status=200)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from PySide6.QtWidgets import QApplication, QMainWindow, QDialog
from PySide6 import QtCore
# импортируем связанный py файл с нашим ui файлом
from design_calculator import Ui_MainWindow
class MainWindow(QMainWindow):
def __init__(self):
# вызовем метод родительского класса
super(MainWindow, self).__init__()
# создадим объект
self.ui = Ui_MainWindow()
# инициализируем нашу форму
self.ui.setupUi(self)
# Соединим сигналы со слотами
self.ui.b1.clicked.connect(self.pushed_button)
self.ui.b2.clicked.connect(self.pushed_button)
self.ui.b3.clicked.connect(self.pushed_button)
self.ui.b4.clicked.connect(self.pushed_button)
self.ui.b5.clicked.connect(self.pushed_button)
self.ui.b6.clicked.connect(self.pushed_button)
self.ui.b7.clicked.connect(self.pushed_button)
self.ui.b8.clicked.connect(self.pushed_button)
self.ui.b9.clicked.connect(self.pushed_button)
self.ui.b0.clicked.connect(self.pushed_button)
self.ui.plus.clicked.connect(self.pushed_button)
self.ui.minus.clicked.connect(self.pushed_button)
self.ui.multiplication.clicked.connect(self.pushed_button)
self.ui.division.clicked.connect(self.pushed_button)
self.ui.equal.clicked.connect(self.equal)
self.ui.clear.clicked.connect(self.clear)
self.ui.backspace.clicked.connect(self.backspace)
self.ui.sqrt.clicked.connect(self.sqrt)
self.ui.plus_minus.clicked.connect(self.minus_plus)
self.ui.point.clicked.connect(self.pushed_button)
# значение экрана
self.screen = ''
self.minus = False
# функция при нажатии на кнопку
def pushed_button(self):
self.button = self.sender()
self.screen += self.button.text()
self.ui.screen.setText(self.screen)
def equal(self):
self.screen = str(eval(self.screen))
print(self.screen)
position = self.screen.find('.')
if position != -1:
if len(self.screen[position+1:])>3:
rounded = self.screen[position+1:position+4]
self.screen = f'{self.screen[:position+1]}{rounded}'
print(self.screen)
self.ui.screen.setText(self.screen)
else:
self.ui.screen.setText(self.screen)
self.ui.screen.setText(self.screen)
def clear(self):
self.screen = ''
self.ui.screen.setText('0')
self.minus = False
def backspace(self):
self.screen = self.screen[:-1]
self.ui.screen.setText(self.screen)
def sqrt(self):
self.screen = str(int(self.screen)**0.5)
self.ui.screen.setText(self.screen)
def minus_plus(self):
if not self.minus:
self.screen = f'-{self.screen}'
self.minus = True
else:
self.screen = self.screen[1:]
self.minus = False
self.ui.screen.setText(self.screen)
if __name__ == "__main__":
# Создадим Qt Application
app = QApplication(sys.argv)
# Создадим и покажем главное окно
window = MainWindow()
# Показываем окно
window.show()
# Запустим приложение
sys.exit(app.exec_())
|
# Eu estou usando o exercício 4 da lista de programação 5 para executar este exercício.
def main():
arquivo = open('texto.txt', 'r')
conteudo = arquivo.read()
lista_palavras = conteudo.split()
acronimo = ''
for palavra in lista_palavras:
acronimo = acronimo + palavra[0]
acronimo = acronimo.upper()
arq_saida = open('acronimos.txt', 'w')
print(acronimo, file=arq_saida)
arquivo.close()
arq_saida.close()
print("Arquivo gravado com sucesso no arquivo de saída. \n Tchau!")
main()
|
#!/usr/bin/env python
import subprocess
from wrappers_settings import *
import sys
if len(sys.argv) > 3:
path = check_username(sys.argv[1])
size = sys.argv[2]
lvm_name = sys.argv[3]
run_command(["lvcreate", "-n", path, "-L%sG" % size, lvm_name])
sys.exit(0)
sys.exit(255)
|
import Gato
def alimentar(Animales.Animal):
if(tipoalimento=tipoalimento):
self.tipoalimento+="Dogui"
self.cantidadalimento=cantidadalimento
else:
print("El gato tiene hambre")
def TomarAgua(Animales.Animal):
if(self.CantidadAgua.CantidadAgua):
self.cantidadalimento.cantidadalimento
else:
print("El gato tiene hambre y sed")
|
import abc
import inspect
import json
import os
import multiprocessing
import time
import numpy as np
import tensorflow as tf2
from ..utils.misc import time_block, colorize
from ..utils.exception import NotSamplingError
tf = tf2.compat.v1
tf.disable_v2_behavior()
class Base(abc.ABC):
"""Base class for all recommendation models.
Parameters
----------
task : str
Specific task, either rating or ranking.
data_info : `DataInfo` object
Object that contains useful information for training and predicting.
lower_upper_bound : list or tuple, optional
Lower and upper score bound for rating task.
"""
def __init__(self, task, data_info, lower_upper_bound=None):
self.task = task
if task == "rating":
self.global_mean = data_info.global_mean
if lower_upper_bound is not None:
assert isinstance(lower_upper_bound, (list, tuple)), (
"must contain both lower and upper bound if provided")
self.lower_bound = lower_upper_bound[0]
self.upper_bound = lower_upper_bound[1]
else:
self.lower_bound, self.upper_bound = data_info.min_max_rating
# print(f"lower bound: {self.lower_bound}, "
# f"upper bound: {self.upper_bound}")
elif task != "ranking":
raise ValueError("task must either be rating or ranking")
self.default_prediction = (
data_info.global_mean
if task == "rating"
else 0.0
)
@abc.abstractmethod
def fit(self, train_data, **kwargs):
"""Train model on the training data.
Parameters
----------
train_data : `TransformedSet` object
Data object used for training.
"""
raise NotImplementedError
@abc.abstractmethod
def predict(self, user, item, **kwargs):
"""Predict score for given user and item.
Parameters
----------
user : int or array_like
User id or batch of user ids.
item : int or array_like
Item id or batch of item ids.
Returns
-------
prediction : int or array_like
Predicted scores for each user-item pair.
"""
raise NotImplementedError
@abc.abstractmethod
def recommend_user(self, user, n_rec, **kwargs):
"""Recommend a list of items for given user.
Parameters
----------
user : int
User id to recommend.
n_rec : int
number of recommendations to return.
Returns
-------
result : list of tuples
A recommendation list, each recommendation
contains an (item_id, score) tuple.
"""
raise NotImplementedError
def convert_id(self, user, item, inner_id=False):
if not inner_id:
user = (
[self.data_info.user2id[user]]
if isinstance(user, int)
else [self.data_info.user2id[u] for u in user]
)
item = (
[self.data_info.item2id[item]]
if isinstance(item, int)
else [self.data_info.item2id[i] for i in item]
)
else:
user = [user] if isinstance(user, int) else user
item = [item] if isinstance(item, int) else item
return np.asarray(user), np.asarray(item)
def _check_unknown(self, user, item):
unknown_user_indices = list(
np.where(np.logical_or(user >= self.n_users, user < 0))[0]
)
unknown_item_indices = list(
np.where(np.logical_or(item >= self.n_items, item < 0))[0]
)
unknown_user = (list(user[unknown_user_indices])
if unknown_user_indices
else None)
unknown_item = (list(item[unknown_item_indices])
if unknown_item_indices
else None)
unknown_index = list(
set(unknown_user_indices) | set(unknown_item_indices)
)
unknown_num = len(unknown_index)
if unknown_num > 0:
# temp conversion, will convert back in the main model
user[unknown_index] = 0
item[unknown_index] = 0
unknown_str = (f"Detect {unknown_num} unknown interaction(s), "
f"including user: {unknown_user}, "
f"item: {unknown_item}, "
f"will be handled as default prediction")
print(f"{colorize(unknown_str, 'red')}")
return unknown_num, unknown_index, user, item
def _check_unknown_user(self, user):
if 0 <= user < self.n_users:
return user
else:
unknown_str = (f"detect unknown user {user}, "
f"return default recommendation")
print(f"{colorize(unknown_str, 'red')}")
return
@staticmethod
def _check_has_sampled(data, verbose):
if not data.has_sampled and verbose > 1:
exception_str = (f"During training, "
f"one must do whole data sampling "
f"before evaluating on epochs.")
raise NotSamplingError(f"{colorize(exception_str, 'red')}")
@staticmethod
def _check_interaction_mode(recent_num, random_num):
if recent_num is not None:
assert isinstance(recent_num, int), "recent_num must be integer"
mode = "recent"
num = recent_num
elif random_num is not None:
assert isinstance(random_num, int), "random_num must be integer"
mode = "random"
num = random_num
else:
mode = "recent"
num = 10 # by default choose 10 recent interactions
return mode, num
@staticmethod
def _decide_sparse_indices(data_info):
return False if not data_info.sparse_col.name else True
@staticmethod
def _decide_dense_values(data_info):
return False if not data_info.dense_col.name else True
@staticmethod
def _sparse_feat_size(data_info):
if (data_info.user_sparse_unique is not None
and data_info.item_sparse_unique is not None):
return max(np.max(data_info.user_sparse_unique),
np.max(data_info.item_sparse_unique)) + 1
elif data_info.user_sparse_unique is not None:
return np.max(data_info.user_sparse_unique) + 1
elif data_info.item_sparse_unique is not None:
return np.max(data_info.item_sparse_unique) + 1
@staticmethod
def _sparse_field_size(data_info):
return len(data_info.sparse_col.name)
@staticmethod
def _dense_field_size(data_info):
return len(data_info.dense_col.name)
@staticmethod
def show_start_time():
start_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
print(f"Training start time: {colorize(start_time, 'magenta')}")
def save_params(self, path):
hparams = dict()
arg_names = list(inspect.signature(self.__init__).parameters.keys())
arg_names.remove("data_info")
for p in arg_names:
hparams[p] = self.all_args[p]
param_path = os.path.join(path, "hyper_parameters.json")
with open(param_path, 'w') as f:
json.dump(hparams, f, separators=(',', ':'))
@classmethod
def load_params(cls, path, data_info):
if not os.path.exists(path):
raise OSError(f"file folder {path} doesn't exists...")
param_path = os.path.join(path, "hyper_parameters.json")
with open(param_path, 'r') as f:
hparams = json.load(f)
hparams.update({"data_info": data_info})
return hparams
class TfMixin(object):
def __init__(self, tf_sess_config=None):
self.cpu_num = multiprocessing.cpu_count()
self.sess = self._sess_config(tf_sess_config)
def _sess_config(self, tf_sess_config=None):
if not tf_sess_config:
# Session config based on:
# https://software.intel.com/content/www/us/en/develop/articles/tips-to-improve-performance-for-popular-deep-learning-frameworks-on-multi-core-cpus.html
tf_sess_config = {
"intra_op_parallelism_threads": 0,
"inter_op_parallelism_threads": 0,
"allow_soft_placement": True,
"device_count": {"CPU": self.cpu_num}
}
# os.environ["OMP_NUM_THREADS"] = f"{self.cpu_num}"
config = tf.ConfigProto(**tf_sess_config)
return tf.Session(config=config)
def train_pure(self, data_generator, verbose, shuffle, eval_data, metrics,
**kwargs):
for epoch in range(1, self.n_epochs + 1):
with time_block(f"Epoch {epoch}", verbose):
train_total_loss = []
for user, item, label, _, _ in data_generator(
shuffle, self.batch_size
):
feed_dict = {self.user_indices: user,
self.item_indices: item,
self.labels: label}
if hasattr(self, "is_training"):
feed_dict.update({self.is_training: True})
train_loss, _ = self.sess.run(
[self.loss, self.training_op], feed_dict=feed_dict)
train_total_loss.append(train_loss)
if verbose > 1:
train_loss_str = "train_loss: " + str(
round(float(np.mean(train_total_loss)), 4)
)
print(f"\t {colorize(train_loss_str, 'green')}")
class_name = self.__class__.__name__.lower()
if class_name.startswith("svd"):
# set up parameters for prediction evaluate
self._set_latent_factors()
self.print_metrics(eval_data=eval_data, metrics=metrics,
**kwargs)
print("="*30)
def train_feat(self, data_generator, verbose, shuffle, eval_data, metrics,
**kwargs):
for epoch in range(1, self.n_epochs + 1):
if self.lr_decay:
print(f"With lr_decay, epoch {epoch} learning rate: "
f"{self.sess.run(self.lr)}")
with time_block(f"Epoch {epoch}", verbose):
train_total_loss = []
for u, i, label, si, dv in data_generator(
shuffle, self.batch_size
):
feed_dict = self._get_feed_dict(u, i, si, dv, label, True)
train_loss, _ = self.sess.run(
[self.loss, self.training_op], feed_dict)
train_total_loss.append(train_loss)
if verbose > 1:
train_loss_str = "train_loss: " + str(
round(float(np.mean(train_total_loss)), 4)
)
print(f"\t {colorize(train_loss_str, 'green')}")
self.print_metrics(eval_data=eval_data, metrics=metrics,
**kwargs)
print("="*30)
def train_seq(self):
pass # TODO: combine train_feat and train_seq
def _get_feed_dict(self, user_indices, item_indices, sparse_indices,
dense_values, label, is_training):
feed_dict = {
self.user_indices: user_indices,
self.item_indices: item_indices,
self.is_training: is_training
}
if self.sparse:
feed_dict.update({self.sparse_indices: sparse_indices})
if self.dense:
feed_dict.update({self.dense_values: dense_values})
if label is not None:
feed_dict.update({self.labels: label})
return feed_dict
def _get_seq_feed_dict(self, u_interacted_seq, u_interacted_len,
user_indices, item_indices, label, sparse_indices,
dense_values, is_training):
feed_dict = {
self.user_interacted_seq: u_interacted_seq,
self.user_interacted_len: u_interacted_len,
self.user_indices: user_indices,
self.item_indices: item_indices,
self.is_training: is_training
}
if self.sparse:
feed_dict.update({self.sparse_indices: sparse_indices})
if self.dense:
feed_dict.update({self.dense_values: dense_values})
if label is not None:
feed_dict.update({self.labels: label})
return feed_dict
def assign_oov(self):
assign_ops = []
for v in tf.trainable_variables():
if hasattr(self, "user_variables"):
for vu in self.user_variables:
if v.name.startswith(vu):
size = v.get_shape().as_list()[1]
zero_op = tf.IndexedSlices(
tf.zeros([1, size], dtype=tf.float32),
[self.n_users]
)
assign_ops.append(v.scatter_update(zero_op))
if hasattr(self, "item_variables"):
for vi in self.item_variables:
if v.name.startswith(vi):
size = v.get_shape().as_list()[1]
zero_op = tf.IndexedSlices(
tf.zeros([1, size], dtype=tf.float32),
[self.n_items]
)
assign_ops.append(v.scatter_update(zero_op))
self.sess.run(assign_ops)
def save_tf_model(self, path, model_name):
model_path = os.path.join(path, model_name)
saver = tf.train.Saver()
saver.save(self.sess, model_path, write_meta_graph=True)
@classmethod
def load_tf_model(cls, path, model_name, data_info):
model_path = os.path.join(path, model_name)
hparams = cls.load_params(path, data_info)
model = cls(**hparams)
model._build_model()
if hasattr(model, "user_last_interacted"):
model._set_last_interacted()
# saver = tf.train.import_meta_graph(os.path.join(path, model_name + ".meta"))
saver = tf.train.Saver()
saver.restore(model.sess, model_path)
return model
def save_variables(self, path, model_name):
variable_path = os.path.join(path, f"{model_name}_variables")
variables = dict()
for v in tf.global_variables():
variables[v.name] = self.sess.run(v)
np.savez_compressed(variable_path, **variables)
@classmethod
def load_variables(cls, path, model_name, data_info):
variable_path = os.path.join(path, f"{model_name}_variables.npz")
variables = np.load(variable_path)
hparams = cls.load_params(path, data_info)
model = cls(**hparams)
model._build_model()
if hasattr(model, "user_last_interacted"):
model._set_last_interacted()
# model.sess.run(tf.trainable_variables()[0].initializer)
# print(model.sess.run(tf.trainable_variables()[0]))
assign_ops = []
for v in tf.global_variables():
assign_ops.append(v.assign(variables[v.name]))
# v.load(variables[v.name], session=model.sess)
model.sess.run(assign_ops)
# print(model.sess.run(tf.trainable_variables()[0]))
return model
|
#
# A number of functions which can be used to add various types of noise to
# exact simulations to create fake data
#
# This file is part of PINTS.
# Copyright (c) 2017, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
#
import numpy as np
def add_independent_noise(values, sigma):
"""
Adds independent Gaussian noise (``iid N(0,sigma)``) to a list of simulated
values.
"""
return values + np.random.normal(0, sigma, values.shape)
def AR1(rho, sigma, T):
"""
Creates an autoregressive order 1 series ``vX[t+t] ~ rho * vX[t-1] + e(t)``
where ``e(t) ~ N(0,sqrt(sigma^2 / (1 - rho^2)))`` with ``vX[0] = 0``, of
length ``T``. This choice of parameterisation ensures that the AR1 process
has a mean of 0 and a standard deviation of sigma.
"""
vX = np.zeros(T)
for t in range(1, T):
vX[t] = rho * vX[t - 1] + np.random.normal(
0, sigma * np.sqrt(1 - rho**2))
return vX
def add_AR1_noise(values, rho, sigma):
"""
Adds autoregressive order 1 noise to data. i.e. the errors follow
``e(t) ~ rho * e(t-1) + v(t)``, where ``v(t) ~ iid N(0,sigma)``.
"""
return values + AR1(rho, sigma, values.shape)
def AR1_unity(rho, sigma, T):
"""
Creates an autoregressive order 1 series
``vX[t+t] ~ (1 - rho) + rho * vT[t-1] + e(t)``
where ``e(t) ~ N(0,sqrt(sigma^2 / (1 - rho^2)))`` with ``vX[0] = 0``, of
length ``T``. This choice of parameterisation ensures that the AR1 process
has mean 1 and a standard deviation of sigma.
"""
vX = np.zeros(T)
vX[0] = 1
for t in range(1, T):
vX[t] = (1 - rho) + rho * vX[t - 1] + np.random.normal(
0, sigma * np.sqrt(1 - rho**2))
return vX
def multiply_AR1_noise(values, rho, sigma):
"""
Multiplies signal by a noise process that follows an autoregressive order 1
process of mean 1."""
return values * AR1_unity(rho, sigma, values.shape)
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import sys
from time import sleep
plot_mode = False
n_epoch_scale = 1
if(len(sys.argv) > 1):
if sys.argv[1][0] == 'p':
print("executing: "+sys.argv[1]+" -> "+str(len(sys.argv[1]))+" Plot mode enabled")
plot_mode = True
if sys.argv[1][0] == 'h':
print("executing: h -> "+str(len(sys.argv[1]))+" High performance mode enabled")
n_epoch_scale = 10
sleep(5)
if(len(sys.argv) > 2):
if sys.argv[1][0] == 'p' or sys.argv[2][0] == 'p':
print("executing: "+sys.argv[1]+" -> "+str(len(sys.argv[1]))+" Plot mode enabled")
plot_mode = True
if sys.argv[1][0] == 'h' or sys.argv[2][0] == 'h':
print("executing: h -> "+str(len(sys.argv[1]))+" High performance mode enabled")
n_epoch_scale = 10
# Import neccasary libraries
import numpy as np
import pandas as pd
import os
if plot_mode:
import matplotlib.pyplot as plt
#get_ipython().magic(u'matplotlib inline')
np.random.seed(0)
def gen_data(file,normalization):
train_data=pd.read_csv(file)
train_data=train_data.drop('dummy',1)
train_data=train_data.drop('id',1)
day=list()
month=list()
year=list()
for item in train_data['date']:
day.append(item.split('/')[1])
month.append(item.split('/')[0])
year.append(item.split('/')[2])
day=np.asarray(day).astype(int)
month=np.asarray(month).astype(int)
year=np.asarray(year).astype(int)
train_data.insert(loc=0, column='year', value=year)
train_data.insert(loc=0, column='month', value=month)
train_data.insert(loc=0, column='day', value=day)
train_data=train_data.drop('date',1)
train_data.head()
x_train = train_data.drop('price', 1)
y_train = train_data['price']
#Normlization
if normalization==True:
x_train = (x_train - x_train.mean()) / (x_train.max() - x_train.min())
else:
action='Do nothing'
# add the bias column
ones = np.ones(x_train.shape[0])
x_train.insert(loc=0, column='Intercept', value=ones)
return x_train,y_train
def gen_test_data(file,normalization):
train_data=pd.read_csv(file)
train_data=train_data.drop('dummy',1)
train_data=train_data.drop('id',1)
day=list()
month=list()
year=list()
for item in train_data['date']:
day.append(item.split('/')[1])
month.append(item.split('/')[0])
year.append(item.split('/')[2])
day=np.asarray(day).astype(int)
month=np.asarray(month).astype(int)
year=np.asarray(year).astype(int)
train_data.insert(loc=0, column='year', value=year)
train_data.insert(loc=0, column='month', value=month)
train_data.insert(loc=0, column='day', value=day)
train_data=train_data.drop('date',1)
train_data.head()
x_train = train_data
#Normlization
if normalization==True:
x_train = (x_train - x_train.mean()) / (x_train.max() - x_train.min())
else:
action='Do nothing'
# add the bias column
ones = np.ones(x_train.shape[0])
x_train.insert(loc=0, column='Intercept', value=ones)
return x_train
def solve_lr(x_train,y_train,alpha,n_epoch):
# Option 1 --> set w as random values between 0 and 1
w=np.random.rand(x_train.shape[1])
# Option 2 --> set w as zero
#w=np.zeros(x_train.shape[1])
w=np.matrix(w).T
X=np.matrix(x_train)
y=np.matrix(y_train)
grad_norm=10000
counters = list()
sse_s = list()
counter=0
while grad_norm>0.1:
e=X*w-y.T
grad=np.multiply(e,X,dtype=np.float64) #float128
grad=np.sum(grad,axis=0)/X.shape[0]
# case 1 separate w0 and other w terms
#w[0] = w[0] - alpha * e[0]
#w[1:] = w[1:] - alpha * grad[1:]
# case to follow the same implementation for all w terms
w = w - alpha * grad
sse=np.dot(e.T,e)[0,0]/X.shape[0]
grad_norm=np.square(grad.T*grad,dtype=np.float64)[0,0] #float128
#print(0.5*sse)
counter+=1
counters.append(counter)
sse_s.append(0.5*sse)
if counter >= (n_epoch*n_epoch_scale):
print('maximum iteration limit reached!')
break
return w,counters,sse_s
def solve_lrn(x_train,y_train,alpha,landa,n_epoch):
# Option 1 --> set w as random values between 0 and 1
w=np.random.rand(x_train.shape[1])
# Option 2 --> set w as zero
#w=np.zeros(x_train.shape[1])
w=np.matrix(w).T
X=np.matrix(x_train)
y=np.matrix(y_train)
grad_norm=1000
counters = list()
sse_s = list()
counter=0
while grad_norm>0.01:
e=X*w-y.T
#print (e)
#print (X)
grad=np.multiply(e,X,dtype=np.float64) #float128
grad=np.sum(grad,axis=0)/X.shape[0]
w = np.squeeze(np.asarray(w))
grad = np.squeeze(np.asarray(grad, dtype=np.float64))
#print(grad)
# case 1 separate w0 and other w terms
#print('shape of w is {}'.format(w.shape))
#print('w[0]: {}'.format(w[0]))
#print('w[1:]: {}'.format(w[1:]))
#print('grad[0]: {}'.format(grad[0]))
#print('grad[1:: {}'.format(grad[1:]))
#print('operation w[0] - alpha * grad[0,0] is \n{}'.format(w[0] - alpha * grad[0]))
#print('w[0,1:] = w[0,1:] - alpha * grad[0,1:]+landa/X.shape[0]*w[0,1:] is \n {}'.format(w[1:] - alpha * grad[1:]+landa/X.shape[0]*w[1:]))
w[0] = w[0] - alpha * grad[0]
w[1:] = w[1:] - alpha * grad[1:]-landa/X.shape[0]*w[1:]
w=np.asmatrix(w).T
grad=np.asmatrix(grad)
e=X*w-y.T
# print(sum([x**2 for x in e])) cross-check for sse calculation
sse=np.dot(e.T,e)[0,0]/X.shape[0]
#print(np.dot(e.T,e))
#print (sse)
#sleep(5)
#print(sum([x**2 for x in grad.T])) #cross-check for grad_norm calculation
grad_norm=np.sum(np.multiply(grad,grad,dtype=np.float64), dtype=np.float64) #float128
grad_norm=np.sqrt(grad_norm)
#print(grad_norm)
#print(0.5*sse)
counter+=1
counters.append(counter)
sse_s.append(0.5*sse)
if counter >= (n_epoch*n_epoch_scale):
print('maximum iteration limit reached!')
break
return w,counters,sse_s
def test(w,x_train,y_train):
X=np.matrix(x_train)
y=np.matrix(y_train)
e=X*w-y.T
sse=np.dot(e.T,e)[0,0]/X.shape[0]
return sse
def predict(w,x_test):
X=np.matrix(x_test)
y=X*w
return y
# In[3]:
# ========== Part 0.(a) ================
train_data=pd.read_csv("resources/PA1_train.csv")
train_data=train_data.drop('dummy',1)
train_data=train_data.drop('id',1)
# In[4]:
# ========== Part 0.(b) ================
day=list()
month=list()
year=list()
for item in train_data['date']:
day.append(item.split('/')[1])
month.append(item.split('/')[0])
year.append(item.split('/')[2])
day=np.asarray(day).astype(int)
month=np.asarray(month).astype(int)
year=np.asarray(year).astype(int)
train_data.insert(loc=0, column='year', value=year)
train_data.insert(loc=0, column='month', value=month)
train_data.insert(loc=0, column='day', value=day)
train_data=train_data.drop('date',1)
train_data.head()
# In[5]:
# ========== Part 0.(c) ================
print("Category proportions for categorical columns \n")
categs=['waterfront','view','condition','grade']
for item in categs:
print(train_data.groupby(item).agg({'price':'count'})/train_data.shape[0]*100)
print("\nStandard deviation for numerical columns \n")
print(train_data.std().drop(categs,0))
print("\nMean for numerical columns \n")
print(train_data.mean().drop(categs,0))
range_col=train_data.max()-train_data.min()
print("\nRange for numerical columns \n")
print(range_col.astype(float).drop(categs,0))
# In[6]:
# ========== Part 0.(d) ================
if plot_mode:
plt.figure(0)
plt.plot(train_data['sqft_living15'],train_data['price'],'ro')
plt.xlabel('square footage')
plt.ylabel('price')
plt.figure(1)
plt.plot(train_data['bedrooms'],train_data['price'],'ro')
plt.xlabel('bedrooms')
plt.ylabel('price')
# In[7]:
# ========== Part 0.(e) ================
x_train=train_data.drop('price',1)
y_train=train_data['price']
x_train=(x_train - x_train.mean()) / (x_train.max() - x_train.min())
ones=np.ones(x_train.shape[0])
x_train.insert(loc=0, column='Intercept', value=ones)
x_train.head()
w=np.random.rand(x_train.shape[1])
# In[9]:
print('Part 1 -------------------')
# ============= Part 1.a =======================s
data=gen_data("resources/PA1_dev.csv",normalization=True)
x_cross=data[0]
y_cross=data[1]
#alphas=[3,2,1.99,1.5,1.2,1.1,1,0.1,1e-2,1e-3,1e-4,1e-5,1e-6,1e-7]
alphas=[1,0.1,1e-2,1e-3,1e-4,1e-5,1e-6,1e-7]
if plot_mode:
plt.figure(3)
dev_sse=list()
for a in alphas:
results = solve_lr(x_train, y_train, alpha=a,n_epoch=1000)
if plot_mode:
plt.xlabel('Iterations')
plt.ylabel('SSE')
plt.plot(results[1],results[2])
if plot_mode:
plt.legend(['alpha= {}'.format(x) for x in alphas], loc='upper right')
plt.show()
# In[10]:
# ============= Part 1.b =======================s
training_sse=list()
dev_sse=list()
for a in alphas:
results = solve_lr(x_train, y_train, alpha=a,n_epoch=1000)
w=results[0]
sse=test(w,x_cross,y_cross)
dev_sse.append(sse)
training_sse.append(results[2][-1])
print('training sse for all the alpha values are:\n {}:\n'.format(training_sse))
print('dev sse for all the alpha values are:\n {}:\n'.format(dev_sse))
if plot_mode:
plt.figure(5)
plt.xlabel('alpha')
plt.ylabel('SSE')
plt.plot(alphas,dev_sse)
plt.plot(alphas,training_sse)
plt.legend(['Validation SSE','Training SSE'], loc='upper right')
plt.show()
# In[11]:
# ============= Part 1.c =======================s
features=list(train_data._info_axis[:-1])
results = solve_lr(x_train, y_train, alpha=0.001,n_epoch=1000)
w=results[0]
features.insert(0,'Intercept')
for i in range(len(w)):
print('weight of {} is {}'.format(features[i],w[0,i]))
# In[12]:
# ============= Part 2.a =======================s
from math import log
landas=[1e-5,1e-3,1e-2,1e-1,1,10] # IF you add 100 to the batch SSE will explode. Use it in report
if plot_mode:
plt.figure(5)
for l in landas:
results = solve_lrn(x_train, y_train, alpha=0.001,landa=l,n_epoch=1000)
if plot_mode:
plt.xlabel('Iterations')
plt.ylabel('SSE')
plt.plot(results[1],results[2])
if plot_mode:
plt.legend(['Lambda= {}'.format(x) for x in landas], loc='upper right')
plt.show()
training_sse=list()
dev_sse=list()
for l in landas:
results = solve_lrn(x_train, y_train, alpha=0.1,landa=l,n_epoch=100)
w=results[0]
sse=test(w,x_cross,y_cross)
dev_sse.append(sse)
training_sse.append(results[2][-1])
print('training sse for all the lambda values are:\n {}:\n'.format(training_sse))
print('dev sse for all the lambda values are:\n {}:\n'.format(dev_sse))
if plot_mode:
plt.figure(6)
plt.xlabel('lambda')
plt.ylabel('SSE')
plt.plot([log(landa,10) for landa in landas],dev_sse)
plt.plot([log(landa,10) for landa in landas],training_sse)
plt.legend(['Validation SSE','Training SSE'], loc='upper right')
plt.show()
# In[128]:
# JUST CHECKING STUFF, NOT FOR THE HOMEWORK
# a=np.arange(5)
'''
print('if a is an ndarray')
print('shape of a is {}'.format(a.shape))
print('a is {}'.format(a))
print('np.dot(a.T,a) is {}'.format(np.dot(a,a.T)))
print('a.T*a is {}'.format(a*a.T))
a=np.asmatrix(a)
print('\nif a is matrix')
print('shape of a is {}'.format(a.shape))
print('a is {}'.format(a))
print('np.dot(a.T,a) is {}'.format(np.dot(a,a.T)))
print('a.T*a is {}'.format(a*a.T))
'''
# In[14]:
# ============= Part 3.a =======================s
data=gen_data("resources/PA1_train.csv",normalization=False)
x_train=data[0]
y_train=data[1]
data=gen_data("resources/PA1_train.csv",normalization=False)
x_cross=data[0]
y_cross=data[1]
#alphas=[1,1e-3,1e-6,1e-9,1e-15,1e-30,1e-100,0]
alphas=[1,1e-3,1e-6,1e-9,1e-15,0]
training_sse=list()
dev_sse=list()
for a in alphas:
results = solve_lr(x_train, y_train, alpha=a,n_epoch=100)
w=results[0]
sse=test(w,x_cross,y_cross)
dev_sse.append(sse)
training_sse.append(results[2][-1])
print('training sse for all the alpha values are:\n {}:\n'.format(training_sse))
print('dev sse for all the alpha values are:\n {}:\n'.format(dev_sse))
if plot_mode:
plt.figure(5)
plt.xlabel('alpha')
plt.ylabel('SSE')
plt.plot(alphas,dev_sse)
plt.plot(alphas,training_sse)
plt.legend(['Validation SSE','Training SSE'], loc='upper right')
plt.show()
# In[13]:
# Predictions:
data=gen_data("resources/PA1_train.csv",normalization=True)
x_train=data[0]
y_train=data[1]
x_test=gen_test_data("resources/PA1_test.csv",normalization=True)
print("x_train is:")
print(x_train)
print("y_train is:")
print(y_train)
results = solve_lrn(x_train, y_train, alpha=0.001,landa=0.001,n_epoch=10000)
print("results returned:")
print (results)
w=results[0]
y_test=predict(w,x_test)
y_test=np.squeeze(np.asarray(y_test))
print("Predicted y is:")
print(y_test)
np.savetxt("resources/Predicted_y.csv", y_test, delimiter=",")
print(w)
# In[227]:
x1 = np.arange(12).reshape((6, 2))
x1 = np.asmatrix(x1)
print('shape of x1: {}'.format(x1.shape))
x2 = np.arange(6)
x2 = np.asmatrix(x2).T
print('shape of x2: {}'.format(x2.shape))
print('x1: \n')
print(x1)
print('x2: \n')
print(x2)
mult=np.multiply(x1, x2)
print('mult is: {}'.format(mult))
mult_sum=np.sum(mult,axis=0)
print('\nsum_mult is {}'.format(mult_sum))
mult_sum[0]
# In[ ]:
|
from weakref import WeakKeyDictionary
import pytest
from mock import Mock, patch
from nameko.containers import ServiceContainer, WorkerContext
from nameko.testing.services import dummy, entrypoint_hook
from nameko_sqlalchemy.database import (
DB_URIS_KEY,
Database,
Session,
)
from sqlalchemy import Column, String, create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import declarative_base
DeclBase = declarative_base(name='examplebase')
class ExampleModel(DeclBase):
__tablename__ = 'example'
key = Column(String, primary_key=True)
value = Column(String)
@pytest.fixture
def config():
return {
DB_URIS_KEY: {
'exampleservice:examplebase': 'sqlite:///:memory:'
}
}
@pytest.fixture
def container(config):
return Mock(
spec=ServiceContainer, config=config, service_name='exampleservice'
)
@pytest.fixture
def dependency_provider(container):
return Database(DeclBase).bind(container, 'database')
def test_setup(dependency_provider):
dependency_provider.setup()
assert dependency_provider.db_uri == 'sqlite:///:memory:'
assert isinstance(dependency_provider.engine, Engine)
def test_stop(dependency_provider):
dependency_provider.setup()
assert dependency_provider.engine
dependency_provider.stop()
assert not hasattr(dependency_provider, 'engine')
class TestWorkerScopeSessionUnit:
def test_get_dependency(self, dependency_provider):
dependency_provider.setup()
worker_ctx = Mock(spec=WorkerContext)
db = dependency_provider.get_dependency(worker_ctx)
assert dependency_provider.dbs[worker_ctx] is db
assert db._worker_session is None
session = db.session
assert isinstance(session, Session)
assert db._worker_session is session
def test_multiple_workers(self, dependency_provider):
dependency_provider.setup()
worker_ctx_1 = Mock(spec=WorkerContext)
db_1 = dependency_provider.get_dependency(worker_ctx_1)
assert isinstance(db_1.session, Session)
assert dependency_provider.dbs[worker_ctx_1].session is db_1.session
worker_ctx_2 = Mock(spec=WorkerContext)
db_2 = dependency_provider.get_dependency(worker_ctx_2)
assert isinstance(db_2.session, Session)
assert dependency_provider.dbs[worker_ctx_2].session is db_2.session
assert dependency_provider.dbs == WeakKeyDictionary({
worker_ctx_1: db_1,
worker_ctx_2: db_2
})
def test_weakref(self, dependency_provider):
dependency_provider.setup()
worker_ctx = Mock(spec=WorkerContext)
db = dependency_provider.get_dependency(worker_ctx)
assert isinstance(db.session, Session)
assert dependency_provider.dbs[worker_ctx].session is db.session
del worker_ctx
assert dependency_provider.dbs == WeakKeyDictionary({})
def test_worker_teardown(self, dependency_provider):
dependency_provider.setup()
worker_ctx = Mock(spec=WorkerContext)
db = dependency_provider.get_dependency(worker_ctx)
assert isinstance(db.session, Session)
assert dependency_provider.dbs[worker_ctx].session is db.session
db.session.add(ExampleModel())
assert db.session.new
dependency_provider.worker_teardown(worker_ctx)
assert worker_ctx not in dependency_provider.dbs
assert not db.session.new # session.close() rolls back new objects
class TestGetSessionContextManagerUnit:
@pytest.fixture
def db(self, dependency_provider):
dependency_provider.setup()
worker_ctx = Mock(spec=WorkerContext)
return dependency_provider.get_dependency(worker_ctx=worker_ctx)
@patch.object(Session, 'rollback')
@patch.object(Session, 'commit')
@patch.object(Session, 'close')
def test_comits_and_closes(self, close, commit, rollback, db):
with db.get_session() as session:
assert isinstance(session, Session)
commit.assert_called()
rollback.assert_not_called()
close.assert_called()
@patch.object(Session, 'rollback')
@patch.object(Session, 'commit')
@patch.object(Session, 'close')
def test_rolls_back_and_closes(self, close, commit, rollback, db):
with pytest.raises(Exception):
with db.get_session() as session:
assert isinstance(session, Session)
raise Exception('Yo!')
commit.assert_not_called()
rollback.assert_called()
close.assert_called()
class BaseTestEndToEnd:
@pytest.fixture
def db_uri(self, tmpdir):
return 'sqlite:///{}'.format(tmpdir.join("db").strpath)
@pytest.fixture
def container(self, container_factory, db_uri):
engine = create_engine(db_uri)
ExampleModel.metadata.create_all(engine)
config = {
DB_URIS_KEY: {
'exampleservice:examplebase': db_uri
}
}
container = container_factory(self.ExampleService, config)
container.start()
return container
class TestGetSessionEndToEnd(BaseTestEndToEnd):
class ExampleService(object):
name = 'exampleservice'
db = Database(DeclBase)
@dummy
def write(self, key, value):
obj = ExampleModel(key=key, value=value)
session = self.db.get_session()
session.add(obj)
session.commit()
session.close()
@dummy
def read(self, key):
session = self.db.get_session()
value = session.query(ExampleModel).get(key).value
session.close()
return value
def test_successful_write_and_read(slf, container, db_uri):
# write through the service
with entrypoint_hook(container, 'write') as write:
write(key='spam', value='ham')
# verify changes written to disk
entries = list(
create_engine(db_uri).execute(
'SELECT key, value FROM example LIMIT 1'))
assert entries == [('spam', 'ham',)]
# read through the service
with entrypoint_hook(container, 'read') as read:
assert read('spam') == 'ham'
class TestGetSessionContextManagerEndToEnd(BaseTestEndToEnd):
class ExampleService(object):
name = 'exampleservice'
db = Database(DeclBase)
@dummy
def write(self, key, value):
with self.db.get_session() as session:
obj = ExampleModel(key=key, value=value)
session.add(obj)
@dummy
def read(self, key):
with self.db.get_session() as session:
return session.query(ExampleModel).get(key).value
def test_successful_write_and_read(slf, container, db_uri):
# write through the service
with entrypoint_hook(container, 'write') as write:
write(key='spam', value='ham')
# verify changes written to disk
entries = list(
create_engine(db_uri).execute(
'SELECT key, value FROM example LIMIT 1'))
assert entries == [('spam', 'ham',)]
# read through the service
with entrypoint_hook(container, 'read') as read:
assert read('spam') == 'ham'
class TestWorkerScopeSessionEndToEnd(BaseTestEndToEnd):
class ExampleService(object):
name = 'exampleservice'
db = Database(DeclBase)
@dummy
def write(self, key, value):
obj = ExampleModel(key=key, value=value)
self.db.session.add(obj)
self.db.session.commit()
@dummy
def read(self, key):
return self.db.session.query(ExampleModel).get(key).value
def test_successful_write_and_read(slf, container, db_uri):
# write through the service
with entrypoint_hook(container, 'write') as write:
write(key='spam', value='ham')
# verify changes written to disk
entries = list(
create_engine(db_uri).execute(
'SELECT key, value FROM example LIMIT 1'))
assert entries == [('spam', 'ham',)]
# read through the service
with entrypoint_hook(container, 'read') as read:
assert read('spam') == 'ham'
|
from django.urls import path
from . import views
from .views import HouseListView, HouseDetailView, HouseCreateView, HouseUpdateView, HouseDeleteView
urlpatterns = [
path('', HouseListView.as_view(), name='home'),
path('<int:pk>/', HouseDetailView.as_view(), name='detail'),
path('new/', HouseCreateView.as_view(), name='new listing'),
path('register/', views.register, name='register'),
path('<int:pk>/update', HouseUpdateView.as_view(), name='update'),
path('<int:pk>/delete', HouseDeleteView.as_view(), name='delete'),
path('profile/', views.profile, name='profile'),
path('article/', views.article, name='article'),
path('email/', views.send_mail, name='email')
]
|
def main():
try:
import math
import numpy as np
filein = open("billboard.in", "r")
file = open("billboard.out", "w")
print("files opened")
nums = np.zeros((2,4))
print("matrix created")
for i in range(0,2):
parts = filein.readline().split(' ')
for j in range(0, len(parts)):
nums[i,j] = int(parts[j])
'''
file.write("testing")
nums[0,0] = 1
nums[0,1] = 0
nums[0,2] = 4
nums[0,3] = 5
nums[1,0] = 0
nums[1,1] = 3
nums[1,2] = 6
nums[1,3] = 8
'''
print("matrix filled")
lheight = nums[0,3] - nums[0, 1]
lwidth = nums[0, 2] - nums[0, 0]
cheight = nums[1, 3] - nums[1,1]
cwidth = nums[1,2] - nums[1,0]
theight = lheight
twidth = lwidth
print("variables initialize")
# l left/right = nums[0,0/2]; l bottom/top = nums[0, 1/3]
# c left/right = nums[1,0/2]; c bottom/top = nums[1, 1/3]
# cow billboard overlaps left or right side of lawn billboard
# cow billboard is taller than lawn billboard
# cow top is higher than lawn top
# cow bottom is lower than lawn bottom
if(cheight >= lheight and nums[1,3] >= nums[0,3] and nums[1,1] <= nums[0,1]):
print("height conditional success")
# if cowboard is to the left of lawn billboard & overlaps board
if(nums[1,0] <= nums[0,0] and nums[1,2] > nums[0,0]):
# tarp width - (cow right x - lawn left x)
twidth -= nums[1,2] - nums[0,0]
# if cowboard is to the right of lawn billboard & overlaps
elif(nums[1,2] >= nums[0,2] and nums[1,0] < nums[0,2]):
# tarp width - (lawn right - cow left)
twidth -= nums[0,2] - nums[1,0]
# if all width covered by cow billboard, return 0
if(twidth <= 0):
file.write("0")
return
# cow billboard overlaps top or right side of lawn billboard
# cow billboard is wider than law billboard
# cow left is lefter than lawn left
# cow right is righter than lawn right
if(cwidth >= lwidth and nums[1,0] <= nums[0,0] and nums[1,2] >= nums[0,2]):
print("width conditional success")
# if cowboard is higher than lawn billboard & overlaps
if(nums[1,3] >= nums[0,3] and nums[1,1] <= nums[0,3]):
print("higher overlap success")
# tarp height - (lawn top - cow bottom)
print("theight: ", theight, " diff: ", nums[0,3] - nums[1,1])
theight -= nums[0,3] - nums[1,1]
# if cowboard is lower than lawn billboard & overlaps
elif(nums[1,1] <= nums[0,1] and nums[1,3] >= nums[0,1]):
print("lower overlap success")
# tarp height - (cow top - lawn bottom)
theight -= nums[1,3] - nums[0,1]
file.write(str(int(twidth*theight)))
file.close()
except:
print("exception")
if __name__=="__main__":
main()
|
import imdb_crawler as ic
if __name__ == "__main__":
generos = ['adventure', 'documentary', 'reality_tv', 'game_show']
pag = 1
url = 'http://www.imdb.com/search/title/?genres={}&title_type=' \
'tv_series,mini_series&page={}&ref_=adv_nxt'.format(generos[0], 1)
ic.crawler_tvseries(tipo="adventure", pagina=106)
|
'''
Nalu Zou, Yuming Tsang, Jerome Orille
3/13/20
Project Part 2 - Crew Member Data Wrangling
This is a test file used for testing similar code
in 'dept_info.py' and 'crew_info.py.'
'''
from test_util import assert_equals
from crew_dept_info import populate_department, total_department
import pandas as pd
from ast import literal_eval
def main():
# Import data
crew = pd.read_csv('crew_info.csv')
# Randomly get one row
sample = crew.sample()
# Change data type of values in 'crew' column
# from string to list of dictionaries
member = sample['crew']
member = member.apply(literal_eval)
# Populate crew type information
sample['department'] = member.apply(populate_department)
count_departments = total_department(sample['department'])
print(count_departments)
tot_crew_types = count_departments['Count'].sum()
# Populate information for total number of crew members
sample['total_crew'] = member.apply(lambda x: len(x))
count_total_crew = sample[['title', 'total_crew']]
count_total_crew = count_total_crew.rename(
columns={'title': 'Movie', 'total_crew': 'Total Crew per Movie'})
print(count_total_crew)
tot_crew = count_total_crew['Total Crew per Movie'].sum()
# Check to see if number of crew types matches total crew
print('Checking number of crew types to total crew...')
assert_equals(tot_crew_types, tot_crew)
print('Total count from count_departments: ' + str(tot_crew_types))
print('Total count from count_total_crew: ' + str(tot_crew))
if __name__ == '__main__':
main() |
import os
DB_HOST = os.environ.get('DB_HOST')
DB_PORT = int(os.environ.get('DB_PORT'))
DB_USER = os.environ.get('DB_USER')
DB_PASSWORD = os.environ.get('DB_PASSWORD')
DB_DATABASE = os.environ.get('DB_DATABASE')
DB_TABLE = os.environ.get('DB_TABLE') |
import numpy
def take_input():
#Taking input from the user.
message = 'Please enter the number of type of stylization you want:\n'+'1)normal stylization\n'+'2)black and white\n'+'3)line stylization \n'
typeI = int(input(message))
filename = str(input('Enter filename with extension :'))
N = int(input('Enter number of light sources :'))
points = numpy.zeros((N,3))
for i in range(N):
message = 'give co-ordinates of light source-'+str(i+1)+' :'
points[i] = [float(x) for x in input(message).split(',')]
return typeI,filename,N,points
|
import numpy as np
import matplotlib.pyplot as plt
# Create red points centered at (-2, -2)
red_points = np.random.randn(50, 2) - 2 * np.ones((50, 2))
# Create blue points centered at (2, 2)
blue_points = np.random.randn(50, 2) + 2 * np.ones((50, 2))
# plt.interactive(False)
# Plot the red and blue points
plt.scatter(red_points[:, 0], red_points[:, 1], color='red')
plt.scatter(blue_points[:, 0], blue_points[:, 1], color='blue')
# Plot a line y = -x
x_axis = np.linspace(-4, 4, 100)
y_axis = -x_axis
plt.plot(x_axis, y_axis)
plt.show(block=True) |
import random
from corpustools.corpus.classes.lexicon import Corpus, Inventory, Segment, FeatureMatrix, Word
from corpustools import __version__ as currentPCTversion
def force_update2(corpus):
corpus = Corpus(None, update=corpus)
for word in [x for x in corpus]:
word2 = Word(update=word)
corpus.remove_word(word)
corpus.add_word(word2)
corpus.inventory = modernize_inventory_attributes(corpus.inventory)
corpus.inventory, corpus.specifier = modernize_features(corpus.inventory, corpus.specifier)
return corpus
def set_defaults(corpus):
corpus = Corpus(corpus.name, update=corpus)
corpus.inventory = Inventory(update=corpus.inventory)
word_list = list()
for word in corpus:
word2 = Word(update=word)
word_list.append(word2)
corpus.update_wordlist(word_list)
return corpus
def force_update(corpus):
#This runs through known incompatibilities with previous version of PCT and tries to patch them all up. This gets
#called from the LoadCorpusDialog.forceUpdate() in iogui.py
corpus = set_defaults(corpus)
if not hasattr(corpus.inventory, 'segs'):
#setattr(corpus.inventory, 'segs', {'#': Segment('#')})
setattr(corpus.inventory,
'segs',
{symbol: Segment(symbol) for symbol in Inventory.inventory_attributes['non_segment_symbols']})
has_segs = [seg for seg in corpus.inventory.segs if not seg in Inventory.inventory_attributes['non_segment_symbols']]
segs = set()
#some old copora have a different spelling/transcription attribute set-up
for word in corpus:
if not hasattr(word, '_transcription'):
setattr(word, 'Transcription', getattr(word, 'transcription'))
word._transcription = word.Transcription
del word.transcription
if not hasattr(word, '_spelling'):
word._spelling = word.spelling
word._corpus = corpus
for seg in word.transcription:
segs.add(seg)
corpus.remove_word()
word = Word(update=word)
if not has_segs:
for seg in segs:
corpus.inventory.segs[seg] = Segment(seg,corpus.specifier.specify(seg))
corpus.inventory = modernize_inventory_attributes(corpus.inventory)
corpus.inventory, corpus.specifier = modernize_features(corpus.inventory, corpus.specifier)
corpus.inventory.isNew = False
if not corpus.specifier.possible_values or len(corpus.specifier.possible_values) < 2:
f_values = set()
for seg in corpus.inventory:
if seg == '#':
continue
features = corpus.specifier.specify(seg)
f_values.update(features.values())
f_values.add('n')
corpus.specifier.possible_values = f_values
return corpus
def need_update(corpus):
if hasattr(corpus, '_version') and corpus._version == currentPCTversion:
return False
else:
setattr(corpus, '_version', currentPCTversion)
return True
def modernize_inventory_attributes(inventory):
for attribute,default in Inventory.inventory_attributes.items():
if not hasattr(inventory, attribute):
setattr(inventory, attribute, default)
has_segs = [s for s in inventory.segs if not s in Inventory.inventory_attributes['non_segment_symbols']]
if not has_segs and inventory._data:
#in an older version, inventory._data was a list of segs, but with the model/view set up,
#this is changed
inventory.segs = inventory._data.copy()
inventory._data = list()
if hasattr(inventory, 'vowel_feature'):
#multiple vowel features are allowed, but earlier version only allowed a single one
inventory.vowel_features = [inventory.vowel_feature]
del inventory.vowel_feature
return inventory
def modernize_specifier(specifier):
#In older versions of PCT, the FeatureMatrix returns Segments, instead of feature dicts
for seg in specifier.matrix.keys():
if seg == '#':
continue
if isinstance(specifier.matrix[seg], Segment):
specifier.matrix[seg] = specifier.matrix[seg].features
#In some SPE matrices, uppercase [EXTRA] and [LONG] appear in specifier.features, but lower case [extra] and [long]
#are used in the actual feature specifications. This next step forces the .features list to match the specifications
features = sorted(list(specifier.matrix[seg].keys()))
setattr(specifier, '_features', features)
return FeatureMatrix(specifier.name, specifier) # this adds new class methods too
def modernize_features(inventory, specifier):
specifier = modernize_specifier(specifier)
for seg in inventory:
if seg == '#':
continue
if isinstance(seg.features, Segment):
inventory[seg.symbol].features = specifier.matrix[seg.symbol]
return inventory, specifier |
import sys
from androguard.core.bytecodes import apk
from androguard.core.bytecodes import dvm
import pandas as pd
import serial
import time
dataset_perm_list = ["android.permission.BIND_WALLPAPER", "android.permission.FORCE_BACK", "android.permission.READ_CALENDAR", "android.permission.BODY_SENSORS", "android.permission.READ_SOCIAL_STREAM", "android.permission.READ_SYNC_STATS", "android.permission.INTERNET",
"android.permission.CHANGE_CONFIGURATION", "android.permission.BIND_DREAM_SERVICE", "android.permission.HARDWARE_TEST", "com.android.browser.permission.WRITE_HISTORY_BOOKMARKS", "com.android.launcher.permission.INSTALL_SHORTCUT", "android.permission.BIND_TV_INPUT",
"android.permission.BIND_VPN_SERVICE", "android.permission.BLUETOOTH_PRIVILEGED", "android.permission.WRITE_CALL_LOG", "android.permission.CHANGE_WIFI_MULTICAST_STATE", "android.permission.BIND_INPUT_METHOD", "android.permission.SET_TIME_ZONE", "android.permission.WRITE_SYNC_SETTINGS",
"android.permission.WRITE_GSERVICES", "android.permission.SET_ORIENTATION", "android.permission.BIND_DEVICE_ADMIN", "android.permission.MANAGE_DOCUMENTS", "android.permission.FORCE_STOP_PACKAGES", "android.permission.WRITE_SECURE_SETTINGS", "android.permission.CALL_PRIVILEGED",
"android.permission.MOUNT_FORMAT_FILESYSTEMS", "android.permission.SYSTEM_ALERT_WINDOW", "android.permission.ACCESS_LOCATION_EXTRA_COMMANDS", "android.permission.BRICK", "android.permission.DUMP", "android.permission.CHANGE_WIFI_STATE", "android.permission.RECORD_AUDIO",
"android.permission.MODIFY_PHONE_STATE", "android.permission.READ_PROFILE", "android.permission.ACCOUNT_MANAGER", "android.permission.SET_ANIMATION_SCALE", "android.permission.SET_PROCESS_LIMIT", "android.permission.CAPTURE_SECURE_VIDEO_OUTPUT", "android.permission.SET_PREFERRED_APPLICATIONS",
"android.permission.ACCESS_ALL_DOWNLOADS", "android.permission.SET_DEBUG_APP", "android.permission.STOP_APP_SWITCHES", "android.permission.BLUETOOTH", "android.permission.ACCESS_WIFI_STATE", "android.permission.SET_WALLPAPER_HINTS", "android.permission.BIND_NOTIFICATION_LISTENER_SERVICE",
"android.permission.MMS_SEND_OUTBOX_MSG", "android.permission.CONTROL_LOCATION_UPDATES", "android.permission.UPDATE_APP_OPS_STATS", "android.permission.REBOOT", "android.permission.BROADCAST_WAP_PUSH", "com.android.launcher3.permission.READ_SETTINGS", "android.permission.ACCESS_NETWORK_STATE",
"android.permission.STATUS_BAR", "android.permission.WRITE_USER_DICTIONARY", "com.android.browser.permission.READ_HISTORY_BOOKMARKS", "android.permission.BROADCAST_PACKAGE_REMOVED", "android.permission.RECEIVE_SMS", "android.permission.WRITE_CONTACTS", "android.permission.READ_CONTACTS",
"android.permission.BIND_APPWIDGET", "android.permission.SIGNAL_PERSISTENT_PROCESSES", "android.permission.INSTALL_LOCATION_PROVIDER", "android.permission.ACCESS_DOWNLOAD_MANAGER_ADVANCED", "android.permission.WRITE_SETTINGS", "android.permission.MASTER_CLEAR", "android.permission.READ_INPUT_STATE",
"android.permission.MANAGE_APP_TOKENS", "android.permission.BIND_REMOTEVIEWS", "com.android.email.permission.ACCESS_PROVIDER", "android.permission.BIND_VOICE_INTERACTION", "com.android.launcher.permission.WRITE_SETTINGS", "com.android.gallery3d.filtershow.permission.READ", "android.permission.BIND_PRINT_SERVICE",
"android.permission.MODIFY_AUDIO_SETTINGS", "android.permission.USE_SIP", "android.permission.WRITE_APN_SETTINGS", "android.permission.ACCESS_SURFACE_FLINGER", "android.permission.FACTORY_TEST", "android.permission.READ_LOGS", "android.permission.PROCESS_OUTGOING_CALLS", "android.permission.UPDATE_DEVICE_STATS",
"android.permission.SEND_DOWNLOAD_COMPLETED_INTENTS", "android.permission.WRITE_CALENDAR", "android.permission.NFC", "android.permission.MANAGE_ACCOUNTS", "android.permission.SEND_SMS", "android.permission.INTERACT_ACROSS_USERS_FULL", "android.permission.ACCESS_MOCK_LOCATION",
"android.permission.BIND_ACCESSIBILITY_SERVICE", "android.permission.CAPTURE_AUDIO_OUTPUT", "android.permission.WRITE_SMS", "android.permission.GET_TASKS", "android.permission.DELETE_PACKAGES", "android.permission.ACCESS_CHECKIN_PROPERTIES", "android.permission.SEND_RESPOND_VIA_MESSAGE",
"android.permission.MEDIA_CONTENT_CONTROL", "android.permission.DOWNLOAD_WITHOUT_NOTIFICATION", "android.permission.RECEIVE_BOOT_COMPLETED", "android.permission.VIBRATE", "android.permission.DIAGNOSTIC", "android.permission.WRITE_PROFILE", "android.permission.CALL_PHONE",
"android.permission.FLASHLIGHT", "android.permission.READ_PHONE_STATE", "android.permission.CHANGE_COMPONENT_ENABLED_STATE", "android.permission.CLEAR_APP_USER_DATA", "android.permission.BROADCAST_SMS", "android.permission.KILL_BACKGROUND_PROCESSES", "android.permission.READ_FRAME_BUFFER",
"android.permission.SUBSCRIBED_FEEDS_WRITE", "android.permission.CAMERA", "android.permission.RECEIVE_MMS", "android.permission.WAKE_LOCK", "android.permission.ACCESS_DOWNLOAD_MANAGER", "com.android.launcher3.permission.WRITE_SETTINGS", "android.permission.DELETE_CACHE_FILES",
"android.permission.RESTART_PACKAGES", "android.permission.GET_ACCOUNTS", "android.permission.SUBSCRIBED_FEEDS_READ", "android.permission.CHANGE_NETWORK_STATE", "android.permission.READ_SYNC_SETTINGS", "android.permission.DISABLE_KEYGUARD", "com.android.launcher.permission.UNINSTALL_SHORTCUT",
"android.permission.USE_CREDENTIALS", "android.permission.READ_USER_DICTIONARY", "android.permission.WRITE_MEDIA_STORAGE", "android.permission.ACCESS_COARSE_LOCATION", "com.android.email.permission.READ_ATTACHMENT", "android.permission.SET_POINTER_SPEED", "android.permission.BACKUP",
"android.permission.EXPAND_STATUS_BAR", "android.permission.BLUETOOTH_ADMIN", "android.permission.ACCESS_FINE_LOCATION", "android.permission.LOCATION_HARDWARE", "android.permission.PERSISTENT_ACTIVITY", "android.permission.REORDER_TASKS", "android.permission.BIND_TEXT_SERVICE",
"android.permission.DEVICE_POWER", "android.permission.SET_WALLPAPER", "android.permission.READ_CALL_LOG", "android.permission.WRITE_EXTERNAL_STORAGE", "android.permission.GET_PACKAGE_SIZE", "android.permission.WRITE_SOCIAL_STREAM", "android.permission.READ_EXTERNAL_STORAGE",
"android.permission.INSTALL_PACKAGES", "android.permission.AUTHENTICATE_ACCOUNTS", "com.android.launcher.permission.READ_SETTINGS", "com.android.alarm.permission.SET_ALARM", "android.permission.INTERNAL_SYSTEM_WINDOW", "android.permission.CLEAR_APP_CACHE", "android.permission.CAPTURE_VIDEO_OUTPUT",
"android.permission.GET_TOP_ACTIVITY_INFO", "android.permission.INJECT_EVENTS", "android.permission.SET_ACTIVITY_WATCHER", "android.permission.READ_SMS", "android.permission.BATTERY_STATS", "android.permission.GLOBAL_SEARCH", "android.permission.BIND_NFC_SERVICE", "android.permission.PACKAGE_USAGE_STATS",
"android.permission.SET_ALWAYS_FINISH", "android.permission.ACCESS_DRM", "android.permission.BROADCAST_STICKY", "android.permission.MOUNT_UNMOUNT_FILESYSTEMS"]
micro_dataset_perm_list = ["android.permission.ACCESS_WIFI_STATE", "android.permission.READ_LOGS", "android.permission.CAMERA", "android.permission.READ_PHONE_STATE", "android.permission.CHANGE_NETWORK_STATE", "android.permission.READ_SMS", "android.permission.CHANGE_WIFI_STATE", "android.permission.RECEIVE_BOOT_COMPLETED", "android.permission.DISABLE_KEYGUARD",
"android.permission.RESTART_PACKAGES", "android.permission.GET_TASKS", "android.permission.SEND_SMS", "android.permission.INSTALL_PACKAGES", "android.permission.SET_WALLPAPER", "android.permission.READ_CALL_LOG", "android.permission.READ_CONTACTS", "android.permission.WRITE_APN_SETTINGS", "android.permission.READ_EXTERNAL_STORAGE", "android.permission.WRITE_CONTACTS",
"com.android.browser.permission.READ_HISTORY_BOOKMARKS", "android.permission.WRITE_SETTINGS"]
def getPermissions(filename):
'''
input: filename
output: permission list compatible with dataset
'''
# global dataset_perm_list #taken from header of dataset
global micro_dataset_perm_list # taken from header of dataset
if(filename[0] is not None):
app = dvm.APK(filename[0])
per = app.get_permissions() # androguard func to get perms
one_hot_perm_list = [None]*21 # hardcoded
j = 0
for i in micro_dataset_perm_list:
if(i in per):
one_hot_perm_list[j] = 1
else:
one_hot_perm_list[j] = 0
j = j+1
# print(one_hot_perm_list)
return one_hot_perm_list
def getPermissions_from_csv(csv_file):
try:
df = pd.read_csv(csv_file[0])
except:
print(csv_file)
headers = list(df.columns.values)
for dfi in headers:#converting to numerals
df[dfi] = pd.to_numeric(df[dfi], errors='coerce')
#one_hot_perm_list = [None]*21 #hardcoded
dfi = df[micro_dataset_perm_list] #selecting the columns from the list for inputs
dfo = df[["type"]] #selecting the output
input_perms = dfi.values
type_list = dfo.values
required_output = []
for i in type_list:
if (i == 0):
required_output.append([1,0])
elif (i == 1):
required_output.append([0,1])
return input_perms, required_output #permission and its corresponding type
def test_inp_vec_extractor(perm): #systest
x = "float input[1]["+ str(len(perm)) +"] = {"
for i in perm:
x = x + str(i) + ','
x = x[:-1]
'''
try:
print(x[22:]+",malware")
except:
print("\n")
'''
x = x + "};"
with open(sys.argv[3], 'w') as fh: #header file output
fh.write("{}\n".format(x))
# print(perm)
def csv_test_inp(n):
csv_file = sys.argv[2:]
perms, output = getPermissions_from_csv(csv_file)
'''
# to print the required values
for i in range(len(perms)):
print(perms[i]),
print(output[i])
'''
test_inp_vec_extractor(perms[int(n)])
with open(sys.argv[5], 'a') as fh: # output type
fh.write("{}\n".format((output[int(n)])[1]))
def serial_communicator(): #serialtest
ser = serial.Serial('/dev/ttyUSB1', 115200, timeout=3)
file = sys.argv[2:]
perm = getPermissions(file)
message = ""
for p in perm:
message = message + str(p) + ' '
message = message[:-1]
message = message + '\n'
#print(message)
ser.write(message.encode('utf-8'))
# ser.close()
# with serial.Serial('/dev/ttyUSB1', 115200, timeout=3) as sp:
line = ""
flag = True
# polling -- the only cause of delay
while(flag):
time.sleep(0.1)
ser.write(message.encode('utf-8'))
try:
line = ser.read(40)
flag = False
except:
line = ""
if(len(line) < 30):
flag = True
out = line.split()
benign = out[1].decode('utf-8')
mal = out[2].decode('utf-8')
if(float(mal) < float(benign)):
print("benign " + benign)
else:
print("malware "+ mal)
if(str(sys.argv[1]) == "serialtest"):
serial_communicator() #for showing demo
elif(str(sys.argv[1]) == "systest"):
file = sys.argv[2:] # apk input
perm = getPermissions(file)
test_inp_vec_extractor(perm) #for software testing
elif(str(sys.argv[1]) == "csvtest"):
csv_test_inp(sys.argv[4]) #for calculating result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.