text stringlengths 38 1.54M |
|---|
import re
from collections import defaultdict
reg = defaultdict(int)
subt = {
'inc': '+=',
'dec': '-=',
'if': 'if'
}
with open('input.txt') as file:
for line in file:
line = re.sub(r'^(.+)(if.+)$', r'\2:\1', line)
line = re.sub(r'\b([a-z]+)\b', lambda m: subt.get(m.group(1), 'reg["{}"]').format(m.group(1)), line)
exec(line)
print(max(v for k, v in reg.items())) |
from typing import List
from collections import defaultdict
class Solution:
def countSubTrees(self, n: int, edges: List[List[int]], labels: str) -> List[int]:
"""
William's solution
treat this tree as a undiredted graph
"""
edge_dict = defaultdict(list)
for a, b in edges:
edge_dict[a].append(b)
edge_dict[b].append(a)
ans = [0] * n
count = [0] * 26
def dfs(node: int = 0, previous_node: int = -1):
# level_count is the key
level_count = count[ord(labels[node]) - ord('a')]
count[ord(labels[node]) - ord('a')] += 1
# visiting all the neighbours except the previous node
for next_node in edge_dict[node]:
if next_node ^ previous_node:
dfs(next_node, node)
ans[node] = count[ord(labels[node]) - ord('a')] - level_count
dfs()
return ans
|
first = "Josephia"
last = "Jones"
bros = 2
sises = 1
print(first + last)
print(bros+sises)
# this throws an error
print(first + bros)
|
from django.shortcuts import render,HttpResponse
from rest_framework import generics
from .serializer import EmpSerializer,AccountSerializer
from .models import Emp,Account
def home(request):
return HttpResponse("<h1>Welcome To Django RestProject</h1>")
# Insert Data
class CreateEmp(generics.CreateAPIView):
serializer_class=EmpSerializer
# Display Data
class EmpListView(generics.ListAPIView):
queryset=Emp.objects.all()
serializer_class=EmpSerializer
# Insert and Display
class CreateEmpListView(generics.ListCreateAPIView):
queryset=Emp.objects.all()
serializer_class=EmpSerializer
class GetEmp(generics.RetrieveAPIView):
queryset=Emp.objects.all()
serializer_class=EmpSerializer
class UpdateEmp(generics.UpdateAPIView):
queryset=Emp.objects.all()
serializer_class=EmpSerializer
class DeleteEmp(generics.DestroyAPIView):
queryset=Emp.objects.all()
serializer_class=EmpSerializer
class RUpdateEmp(generics.RetrieveUpdateAPIView):
queryset=Emp.objects.all()
serializer_class=EmpSerializer
class RDeleteEmp(generics.RetrieveDestroyAPIView):
queryset=Emp.objects.all()
serializer_class=EmpSerializer
class RUDEmp(generics.RetrieveUpdateDestroyAPIView):
queryset=Emp.objects.all()
serializer_class=EmpSerializer
# *************** Account *************************
# Insert Data
class CreateAccount(generics.CreateAPIView):
serializer_class=AccountSerializer
# Display Data
class AccountListView(generics.ListAPIView):
queryset = Account.objects.all()
serializer_class=AccountSerializer
class CreateAccountListView(generics.ListCreateAPIView):
queryset = Account.objects.all()
serializer_class=AccountSerializer
# ModelViewSet - All CRUD operation
from rest_framework import viewsets
from .models import Singer,Songs
from .serializer import UserSerializer,SingerSerializer,SongsSerializer
from django.contrib.auth.models import User
class EmpViewSet(viewsets.ModelViewSet):
queryset=Emp.objects.all()
serializer_class=EmpSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset=User.objects.all()
serializer_class=UserSerializer
class SingerViewSet(viewsets.ModelViewSet):
queryset=Singer.objects.all()
serializer_class=SingerSerializer
class SongsViewSet(viewsets.ModelViewSet):
queryset=Songs.objects.all()
serializer_class=SongsSerializer
|
class OMG :
def print() :
print("Oh my god")
>>> >>> myStock = OMG() # OMG.print(mystock) 가 돼야함.
>>> myStock.print() |
#! /usr/bin/env python
import i3ipc
import time
hide_border_delay = .1
i3 = i3ipc.Connection()
def on_window_focus(i3, event):
window_id = event.container.props.id
i3.command('[con_id=' + str(window_id) + '] border pixel 1')
time.sleep(hide_border_delay)
i3.command('[con_id=' + str(window_id) + '] border pixel 0')
i3.on('window::focus', on_window_focus)
i3.main()
|
from abc import ABC, abstractmethod
class Gate:
def __init__(self, gate_id, attendent):
self.gate_id = gate_id
self.attendent = attendent
class EntranceGate(Gate):
def __init__(self, gate_id, attendent):
super().__init__(gate_id, attendent)
def process_ticket(self, vehicle):
pass
class ExitGate(Gate):
def __init__(self, gate_id, attendent):
super().__init__(gate_id, attendent)
def process_payment(self, ticket, payment_type):
pass
|
# This function is used to calculate the size of a given sbox
def SboxSize(sbox):
s = format(len(sbox), "b")
num_of_1_in_the_binary_experission_of_the_len_of_sbox = s.count("1")
assert num_of_1_in_the_binary_experission_of_the_len_of_sbox == 1
return (len(s) - 1)
# Return the value of the bitproduct function Pi_u(x)
def BitProduct(u, x):
if (u & x) == u:
return 1
else:
return 0
# Retrieve the truth table of the boolean function Pi_u(y), where y = sbox(x)
def GetTruthTable(sbox, u):
temp = [u for i in range(len(sbox))]
table = map(BitProduct, temp, sbox)
return table
# Process the truth table to get the ANF of the boolean function
def ProcessTable(table):
# we use table size to calculate the SBOXSIZE
SBOXSIZE = SboxSize(table)
for i in range(0, SBOXSIZE):
for j in range(0, 2**i):
for k in range(0, 2**(SBOXSIZE - 1 - i)):
table[k + 2**(SBOXSIZE - 1 - i) + j*(2**(SBOXSIZE - i))] =\
table[k + 2**(SBOXSIZE - 1 - i) + j*(2**(SBOXSIZE - i))] ^\
table[k + j*(2**(SBOXSIZE - i))]
# Return the ANF of the sbox, moreover, we also return the ANF of boolean function which
# is the product of some coordinates of the sbox output
def CreatANF(sbox):
ANF = [[]for i in range(0, len(sbox))]
for i in range(1, len(sbox)):
table = GetTruthTable(sbox, i)
ProcessTable(table)
sqr = []
for j in range(0, len(sbox)):
if table[j] != 0:
sqr.append(j)
ANF[i] = sqr
return ANF
# Return all the division trails of a given sbox
def CreateDivisionTrails(sbox):
ANF = CreatANF(sbox)
SBOXSIZE = SboxSize(sbox)
INDP = []
# add zero vector into the division trails
sqr = [0 for i in range(2 * SBOXSIZE)]
INDP.append(sqr)
# start from the non-zero vector
for i in range(1, len(sbox)):
sqn = []
# start from the non-zero vector
for j in range(1, len(sbox)):
flag = False
for entry in ANF[j]:
if (i | entry) == entry:
flag = True
break
if flag:
sqn1 = []
flag_add = True
for t1 in sqn:
if (t1 | j) == j:
flag_add = False
break
elif (t1 | j) == t1:
sqn1.append(t1)
if flag_add:
for t2 in sqn1:
sqn.remove(t2)
sqn.append(j)
for num in sqn:
a = format(i, "0256b")
b = format(num, "0256b")
a = list(reversed(map(int, list(a))))
b = list(reversed(map(int, list(b))))
a = a[0:SBOXSIZE]
b = b[0:SBOXSIZE]
a.reverse()
b.reverse()
INDP.append((a+b))
return INDP
# Write all division trails of an sbox into a file
def PrintfDivisionTrails(fileobj, sbox):
INDP = CreateDivisionTrails(sbox)
fileobj.write("Division Trails of sbox:\n")
for l in INDP:
fileobj.write(str(l) + "\n")
fileobj.write("\n")
if __name__ == "__main__":
# PRESENT Sbox
sbox = [0xc, 0x5, 0x6, 0xb, 0x9, 0x0, 0xa, 0xd, 0x3, 0xe, 0xf, 0x8, 0x4, 0x7, 0x1, 0x2]
filename = "DivisionTrails.txt"
fileobj = open(filename, "w")
PrintfDivisionTrails(fileobj, sbox)
fileobj.close() |
from datetime import date
from django.core.exceptions import ValidationError
from django.test import TestCase
from apps.general_services.validators import person_validation, id_validators, general_validation
class TestValidation(TestCase):
# GENERAL VALIDATION
def test_valid_phone_number_1(self):
self.assertIsNone(
general_validation.validate_phone_number(
"(+385)/91-104-9786"
))
def test_valid_phone_number_4(self):
self.assertIsNone(
general_validation.validate_phone_number(
" (+385)/91-104-9786 "
))
def test_valid_phone_number_2(self):
self.assertIsNone(
general_validation.validate_phone_number(
"0911049786"
))
def test_valid_phone_number_3(self):
self.assertIsNone(
general_validation.validate_phone_number(
"+385911049786"
))
def test_invalid_phone_number_text(self):
self.assertRaisesMessage(ValidationError,
'Phone number must only contain numbers and +, () or / characters',
general_validation.validate_phone_number(
"+a85911049786"
))
def test_invalid_phone_number_symbol(self):
self.assertRaisesMessage(ValidationError,
'Phone number must only contain numbers and +, () or / characters',
general_validation.validate_phone_number(
"+38*911049786"
))
# AGE VALIDATION
def test_valid_dob(self):
self.assertIsNone(person_validation.validate_age(date(1997, 3, 16)))
def test_invalid_dob_underage(self):
self.assertRaises(ValidationError,
person_validation.validate_age, date(2005, 1, 1))
def test_invalid_dob_below_zero(self):
self.assertRaises(ValidationError,
person_validation.validate_age, date(2022, 1, 1))
# OIB VALIDATION
def test_valid_oib(self):
self.assertIsNone(id_validators.validate_pid("38263212113"))
def test_invalid_oib(self):
self.assertRaisesMessage(ValidationError, 'PID is invalid, please check your input', id_validators.validate_pid,
"38263212112")
def test_oib_length_short(self):
self.assertRaisesMessage(ValidationError, 'PID is of an invalid length', id_validators.validate_pid,
"3826321211")
def test_oib_length_long(self):
self.assertRaisesMessage(ValidationError, 'PID is of an invalid length', id_validators.validate_pid,
"382632121123")
def test_oib_numeric_text(self):
self.assertRaisesMessage(ValidationError, 'PID is not entirely numeric', id_validators.validate_pid,
"382632121A3")
def test_oib_numeric_space(self):
self.assertRaisesMessage(ValidationError, 'PID is not entirely numeric', id_validators.validate_pid,
"382632121 3")
def test_oib_numeric_symbol(self):
self.assertRaisesMessage(ValidationError, 'PID is not entirely numeric', id_validators.validate_pid,
"382632121/3")
def test_oib_trim_whitespace(self):
self.assertIsNone(id_validators.validate_pid(" 38263212113 "))
# BID VALIDATION
def test_valid_bid(self):
self.assertIsNone(id_validators.validate_bid("01130234"))
def test_bid_trim_whitespace(self):
self.assertIsNone(id_validators.validate_bid(" 01130234 "))
def test_invalid_bid(self):
self.assertRaisesMessage(ValidationError,
'Business ID number is invalid',
id_validators.validate_bid, "01130233")
def test_bid_length_short(self):
self.assertRaisesMessage(ValidationError,
'Business ID number is of an invalid length',
id_validators.validate_bid, "0113023")
def test_bid_length_long(self):
self.assertRaisesMessage(ValidationError,
'Business ID number is of an invalid length',
id_validators.validate_bid, "011302345")
def test_bid_numeric_text(self):
self.assertRaisesMessage(ValidationError,
'Businnes ID number is not entirely numeric',
id_validators.validate_bid, "011a0234")
def test_bid_numeric_space(self):
self.assertRaisesMessage(ValidationError,
'Businnes ID number is not entirely numeric',
id_validators.validate_bid, "011302 3")
def test_bid_numeric_symbol(self):
self.assertRaisesMessage(ValidationError,
'Businnes ID number is not entirely numeric',
id_validators.validate_bid, "011334/3")
# CITY_ID VALIDATION
def test_cityid_valid(self):
self.assertIsNone(id_validators.validate_city_id("03123"))
def test_cityid_whitespace(self):
self.assertIsNone(id_validators.validate_city_id(" 03123 "))
def test_invalid_city_id(self):
self.assertRaisesMessage(ValidationError,
'City ID number is invalid',
id_validators.validate_city_id, "03124")
def test_city_id_length_short(self):
self.assertRaisesMessage(ValidationError,
'City ID number is of an invalid length',
id_validators.validate_city_id, "3123")
def test_city_id_length_long(self):
self.assertRaisesMessage(ValidationError,
'City ID number is of an invalid length',
id_validators.validate_city_id, "031240")
def test_city_id_numeric_text(self):
self.assertRaisesMessage(ValidationError,
'City ID number is not entirely numeric',
id_validators.validate_city_id, "0312a")
def test_city_id_numeric_space(self):
self.assertRaisesMessage(ValidationError,
'City ID number is not entirely numeric',
id_validators.validate_city_id, "031 4")
def test_city_id_numeric_symbol(self):
self.assertRaisesMessage(ValidationError,
'City ID number is not entirely numeric',
id_validators.validate_city_id, "031/3")
|
#!/usr/bin/env python
import os
import sys
import commands
BLOCK_SIZE = 4096
#AdjustPartitionSizeForVerity.results = {}
def GetVerityMetadataSize(partition_size):
cmd = "./bin/build_verity_metadata.py -s %d"
cmd %= partition_size
status, output = commands.getstatusoutput(cmd)
if status:
print output
return False, 0
return True, int(output)
def GetVerityFECSize(partition_size):
cmd = "./bin/fec -s %d" % partition_size
status, output = commands.getstatusoutput(cmd)
if status:
print output
return False, 0
return True, int(output)
def GetVeritySize(partition_size, fec_supported):
success, verity_tree_size = GetVerityTreeSize(partition_size)
if not success:
return 0
success, verity_metadata_size = GetVerityMetadataSize(partition_size)
if not success:
return 0
verity_size = verity_tree_size + verity_metadata_size
if fec_supported:
success, fec_size = GetVerityFECSize(partition_size + verity_size)
if not success:
return 0
return verity_size + fec_size
return verity_size
def GetVerityTreeSize(partition_size):
cmd = "./bin/build_verity_tree -s %d"
cmd %= partition_size
status, output = commands.getstatusoutput(cmd)
if status:
print output
return False, 0
return True, int(output)
def AdjustPartitionSizeForVerity(partition_size, fec_supported):
"""Modifies the provided partition size to account for the verity metadata.
This information is used to size the created image appropriately.
Args:
partition_size: the size of the partition to be verified.
Returns:
The size of the partition adjusted for verity metadata.
"""
# key = "%d %d" % (partition_size, fec_supported)
# if key in AdjustPartitionSizeForVerity.results:
# return AdjustPartitionSizeForVerity.results[key]
hi = partition_size
if hi % BLOCK_SIZE != 0:
hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
# verity tree and fec sizes depend on the partition size, which
# means this estimate is always going to be unnecessarily small
lo = partition_size - GetVeritySize(hi, fec_supported)
result = lo
# do a binary search for the optimal size
while lo < hi:
i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
size = i + GetVeritySize(i, fec_supported)
if size <= partition_size:
if result < i:
result = i
lo = i + BLOCK_SIZE
else:
hi = i
# AdjustPartitionSizeForVerity.results[key] = result
return result
def main(argv):
size = int(argv[1])
print AdjustPartitionSizeForVerity(size,1)
sys.exit(1)
if __name__ == '__main__':
main(sys.argv[0:])
|
from math import*
p=int(input("Insira a vida inicial:"))
D1=int(input("Insira o primeiro valor do dado:"))
D2=int(input("insira o segundo valor do dado:"))
a= (sqrt(5*D1)) + ((pi)**(D2/3))
dano= p-a+1
print(int(dano)) |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 20:12:50 2020
@author: Erika Montana
"""
from ..utils import sql_utils as sql
def main(conn, label_config, table_name, start_date, end_date,
preprocessing_prefix):
"""
Creates table in destination schema containing the primary key and the label for each observation
Keyword Arguments:
conn: connection to database
label_config: config file
table_name: name of resulting table
start_date - string in 'YYYY-MM-DD' format indicating beginning of temporal group
end_date - string in 'YYYY-MM-DD' format indicating end of temporal group
preprocessing_prefix - prefix for the preprocessed tables
"""
label_sql = label_config['query']
label_sql = label_sql.replace('{prefix}', preprocessing_prefix)
label_sql = label_sql.replace('{start_date}', start_date)
label_sql = label_sql.replace('{end_date}', end_date)
drop_sql = f'drop table if exists {table_name};'
create_sql = f'create table {table_name} as ({label_sql});'
sql.run_sql_from_string(conn, drop_sql)
sql.run_sql_from_string(conn, create_sql)
|
import os
import csv
# You will be give a set of poll data called election_data.csv. The dataset is composed of three columns:
#Voter ID, County, and Candidate. Your task is to create a Python script that analyzes
#the votes and calculates each of the following:
#The total number of votes cast
#A complete list of candidates who received votes
#The percentage of votes each candidate won
#The total number of votes each candidate won
#The winner of the election based on popular vote.
poll_data = os.path.join("Resources", "election_data.csv")
total_votes = []
with open(poll_data) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
header = next(csvreader)
def poll_results(poll_data):
voter_id = int(poll_data[0])
country = str(poll_data[1])
candidate = str(poll_data[2])
total_votes = sum(voter_id)
print(total_votes)
|
from typing import List
class Solution:
def maxProfit(self, prices: List[int], fee: int) -> int:
size = len(prices)
hold, free = [0] * size, [0] * size
hold[0] = -prices[0]
for i in range(1, size):
hold[i] = max(hold[i - 1], free[i - 1] - prices[i])
free[i] = max(free[i - 1], hold[i - 1] + prices[i] - fee)
return free[-1]
def main():
sol = Solution()
print(sol.maxProfit(prices = [1,3,2,8,4,9], fee = 2))
print(sol.maxProfit(prices = [1,3,7,5,10,3], fee = 3))
if __name__ == '__main__':
main() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 16 13:06:18 2018
@author: jlaplaza
"""
|
# dictionaries: uses key-value pairs
myCat = {'size': 'fat', 'color': 'gray', 'disposition': 'loud'}
myCat['size'] # 'fat'
'My cat has' + myCat['color'] + 'fur.' # 'My cat has gray fur.'
spam = {12345: 'Luggage combination', 42: 'The Answer'}
# dictionaries are different than lists
[1, 2, 3] == [3, 2, 1] # False
# order matters
eggs = {'name': 'Zophie', 'species': 'cat', 'age': 8}
ham = {'species': 'cat', 'name': 'Zophie', 'age': 8}
eggs == ham # True
# order does not matter
# checking a key that does not exist in the dict
eggs['color'] # KeyError
# to check if a key does exist:
'name' in eggs # True
'name' not in eggs # False
# dictionaries, like lists, are muteable (changeable)
list(eggs.keys()) # ['name', 'species', 'age']
list(eggs.values()) # ['Zophie', 'cat', 8]
list(eggs.items()) # [('name', 'Zophie'), ('Species', 'cat'), ('age', 8)]
# using .items creates paired tuples
# print keys
for k in eggs.keys():
print(k)
# name
# species
# age
for v in eggs.values():
print(v)
# Zophie
# cat
# 8
for k, v in eggs.items():
print(k, v)
# name Zophie
# species cat
# age 8
for i in eggs.items():
print(i)
# ('name', 'Zophie')
# ('species', 'cat')
# ('age', 8)
# tuples are like lists except that are immuteable and use ()
# checking if a key or value is in a dictionary
'cat' in eggs.values() # True
# it's tedious to check if a key is in dict. Will crash program
# so we can use an 'if' statement
if 'color' in eggs:
print(eggs['colors'])
# but this is also tedious, so we should use the 'get' method
eggs.get('age', 0) # if age is not in, return 0
eggs.get('color', '') # this returns
picnicItems = {'apples': 5, 'cups': 2}
print('I am bringing ' + str(picnicItems.get('napkins', 0)) +
' to the picnic.')
# I am bringing 0 to the picnic
# without the 0 option, would result in a KeyError
# adding a key-value pair to a dict IF the key wasn't already in the dict
eggs = {'name': 'Zophie', 'species': 'cat', 'age': 8}
eggs.setdefault('color', 'black')
eggs # {'name': 'Zophie', 'color', 'black', 'species': 'cat', 'age': 8}
# you can not change the color to orange with the same command
|
"""
Test suite for the django_project.context_processors module
"""
import pytest
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import RequestFactory
from django_project import context_processors
pytestmark = pytest.mark.django_db
class LDAPUserMock:
ldap_username = "ldap_user"
class LDAPBackendMock:
def populate_user(self, request):
return LDAPUserMock()
class TestDjangoProjectContextProcessors:
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
def test_is_ldap_authenticated_user(self, settings, monkeypatch):
test_user = User.objects.get(username="api")
rf = RequestFactory()
request = rf.get(reverse("productdb:home"))
request.user = test_user
result = context_processors.is_ldap_authenticated_user(request)
assert "IS_LDAP_ACCOUNT" in result, "Should provide a variable that indicates that the user is LDAP " \
"authenticated"
assert result["IS_LDAP_ACCOUNT"] is False
# when using the LDAP integration, a custom LDAP backend exists for the user
# if they are readable, the account is an LDAP account
settings.LDAP_ENABLE = True
request = rf.get(reverse("productdb:home"))
request.user = test_user
result = context_processors.is_ldap_authenticated_user(request)
assert "IS_LDAP_ACCOUNT" in result
assert result["IS_LDAP_ACCOUNT"] is False
# mock the custom LDAP backend
monkeypatch.setattr(context_processors, "LDAPBackend", LDAPBackendMock)
request = rf.get(reverse("productdb:home"))
request.user = test_user
result = context_processors.is_ldap_authenticated_user(request)
assert "IS_LDAP_ACCOUNT" in result
assert result["IS_LDAP_ACCOUNT"] is True
|
from datetime import datetime
class Util:
@staticmethod
def iso_to_unix(dt_format: str) -> int:
utc_dt = datetime.strptime(dt_format, '%Y-%m-%dT%H:%M:%S.%fZ')
# Convert UTC datetime to seconds since the Epoch
timestamp = (utc_dt - datetime(1970, 1, 1)).total_seconds()
return int(timestamp)
|
'''
Тестові варіанти для лінійного алгоритму пошуку
'''
import timeit
import numpy as np
test = [[np.array([5, 8, 9, 3, 7, 6, 4, 2, 1]), 7], [np.array([1, 7, 6, 9, 10, 3, 4, 5]), 8], \
[np.array([1, 18, 15, 7, 13, 11, 6, 2, 0, 9, 11, 10, 17, 15, 20, 3, 4, 5]), 20], \
[np.array([9, 7, 14, 3, 4, 17, 20, 19, 11, 8, 3, 15, 14, 4, 1, 6, 16, 13]), 2]]
c = 0
for A, x in test:
c += 1
print('{:>30}{:<}'.format('Test №', c))
print(f'{A}, шуканий елемент: {x}')
n, i, count = len(A), 0, 0
while i < n and A[i] != x:
count += 2
i += 1
if i == n:
print(f'Елемент {x} не знайдений, порівнянь було здійснено {count + 1}')
else:
print(
f'Елемент {x} знайдений на позиції {i}, порівнянь було здійснено {count + 1 if i == len(A) - 1 else count + 2}')
t = timeit.timeit('"-".join(str(n) for n in range(100))', number=10000)
print('Час виконання алгоритму пошуку: {:.5f}'.format(t))
|
import multiprocessing
import time
import gym
import gym3
import numpy as np
from gym.vector import make as make_vec_env
from procgen import ProcgenGym3Env
population_size = 112
number_env_steps = 1000
def run_episode_full(u):
env = gym.make('procgen:procgen-heist-v0')
obs = env.reset()
reward = 0
for _ in range(number_env_steps):
action = env.action_space.sample()
obs, rew, done, info = env.step(action)
reward += rew
return reward
def run_episode_vec_env(u):
env = make_vec_env(id="procgen:procgen-heist-v0", num_envs=population_size, asynchronous=True)
obs = env.reset()
rewards = np.zeros(population_size)
for _ in range(number_env_steps):
action = env.action_space.sample()
obs, rew, done, info = env.step(action)
rewards += rew
return rewards
def run_episode_gym3_vec_env(u):
env = ProcgenGym3Env(num=population_size, env_name="heist")
rewards = np.zeros(population_size)
for _ in range(number_env_steps):
env.act(gym3.types_np.sample(env.ac_space, bshape=(env.num,)))
rew, obs, first = env.observe()
rewards += rew
return rewards
def main():
inputs = np.zeros(population_size)
# Multiprocessing
pool = multiprocessing.Pool()
t_start = time.time()
result_mp = pool.map(run_episode_full, inputs)
print("Multi-Processing map took: {:6.3f}s".format(time.time()-t_start))
# Vectorized environment
t_start = time.time()
result_vec = run_episode_vec_env([])
print("Vectorized environment took: {:6.3f}s".format(time.time()-t_start))
# Gym3 Vectorized environment
t_start = time.time()
result_gym3_vec = run_episode_gym3_vec_env([])
print("Gym3 vec environment took: {:6.3f}s".format(time.time()-t_start))
assert (len(result_mp) == len(result_vec)
and len(result_mp) == len(result_gym3_vec)
and len(result_mp) == population_size)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class PointLibResult(object):
def __init__(self):
self._balance = None
self._library_id = None
self._library_name = None
self._status = None
self._sum_point = None
@property
def balance(self):
return self._balance
@balance.setter
def balance(self, value):
self._balance = value
@property
def library_id(self):
return self._library_id
@library_id.setter
def library_id(self, value):
self._library_id = value
@property
def library_name(self):
return self._library_name
@library_name.setter
def library_name(self, value):
self._library_name = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def sum_point(self):
return self._sum_point
@sum_point.setter
def sum_point(self, value):
self._sum_point = value
def to_alipay_dict(self):
params = dict()
if self.balance:
if hasattr(self.balance, 'to_alipay_dict'):
params['balance'] = self.balance.to_alipay_dict()
else:
params['balance'] = self.balance
if self.library_id:
if hasattr(self.library_id, 'to_alipay_dict'):
params['library_id'] = self.library_id.to_alipay_dict()
else:
params['library_id'] = self.library_id
if self.library_name:
if hasattr(self.library_name, 'to_alipay_dict'):
params['library_name'] = self.library_name.to_alipay_dict()
else:
params['library_name'] = self.library_name
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.sum_point:
if hasattr(self.sum_point, 'to_alipay_dict'):
params['sum_point'] = self.sum_point.to_alipay_dict()
else:
params['sum_point'] = self.sum_point
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PointLibResult()
if 'balance' in d:
o.balance = d['balance']
if 'library_id' in d:
o.library_id = d['library_id']
if 'library_name' in d:
o.library_name = d['library_name']
if 'status' in d:
o.status = d['status']
if 'sum_point' in d:
o.sum_point = d['sum_point']
return o
|
def solve(x1, x2, v1, v2):
if v1 <= v2:
return False
t = float(x1-x2)/float(v2-v1)
if t-int(t) == 0 and t > 0:
return True
return False
x1, v1, x2, v2 = map(int,raw_input().split())
print solve(x1, x2, v1, v2) |
import pandas as pd
import os
from main import loadProjects,loadUsers
from build_network import filterProjects,controlGroupAffiliation,filterNodes,generateNework
def main():
net_path = 'results/cytoscapeFiles'
people = loadUsers()
groupAfilliation = controlGroupAffiliation(people)[['USERNAME','ML_GROUP']]
pis = set(people[people['PERSON_TYPE'].isin(['Faculty/PI'])]['USERNAME'])
projects = loadProjects()
projects = filterProjects(projects)
net = generateNework(projects,keepProjectData=True)
net = filterNodes(net,pis)
net = pd.merge(net,groupAfilliation.rename(columns={'USERNAME':'username_s','ML_GROUP':'group_s'}))
net = pd.merge(net,groupAfilliation.rename(columns={'USERNAME':'username_t','ML_GROUP':'group_t'}))
net.to_csv(os.path.join(net_path,'edges_2014-onwards_all-withprojects.csv'),index=False,encoding='utf-8')
if __name__ == '__main__':
main() |
from iinsta.entities.Asset import Asset
from mongoengine.queryset import DoesNotExist
from iinsta.mongo import db
class AssetFacade(object):
@staticmethod
def get_by_ids(ids):
return db.page.find({
'_id': {'$in': ids}
})
@staticmethod
def get(**kwargs):
try:
return Asset.objects.get(**kwargs)
except DoesNotExist:
return None
@staticmethod
def get_all():
return Asset.objects().order_by('name')
@staticmethod
def create(**kwargs):
c = Asset(**kwargs)
c.save()
return c
@staticmethod
def exists(**kwargs):
try:
return Asset.objects.get(**kwargs) is not None
except DoesNotExist:
return False
|
import matplotlib.pyplot as plt
lang=['Java', 'Python', 'PHP', 'JavaScript', 'C#','C++']
popularity=[22.2,17.6,8.8,8,7.7,6.7]
plt.bar(lang,popularity,color=(0.2, 0.4, 0.6,0.6),edgecolor='blue')
plt.xlabel('programing Languages')
plt.ylabel('Popularity(%)')
plt.title('Programing Language bar representation')
plt.show()
|
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('refs')
args = parser.parse_args()
ref_file = open(args.refs)
sent_id = None
ref = None
j = 0 # index into the kbest list
sent_count = 0 # count of sentences seen so far. This is usually equal to sent_id + 1, but not always!
done = False # Keep track of whether or not we've already hit the reference
sum_rr = 0.0
inf = float('inf')
indices = []
for line in sys.stdin:
i, hyp, feats, score = [part.strip() for part in line.decode('utf-8').strip().split('|||')]
i = int(i)
if i != sent_id:
if not done:
indices.append(inf)
sent_id = i
refs = [ref.strip() for ref in ref_file.readline().decode('utf-8').split('\t')]
j = 0
sent_count += 1
done = False
j += 1
if hyp in refs and not done:
indices.append(j)
sum_rr += 1.0 / j
done = True
for k in [1, 5, 10]:
print 'p@%d: %f' % (k, 1.0 * len([1 for i in indices if i <= k]) / sent_count)
print 'mrr: %f' % (sum_rr / sent_count)
|
from django.shortcuts import render,redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User, auth
from admin.song.models import Song
# Create your views here.@login_required(login_url = 'user.login')
def register(request):
return render(request,'userTemplates/signup/index.html')
def signup(request):
if request.method == 'POST':
if not 'firstname' in request.POST.keys():
messages.error(request, "Parameters are missing!")
return redirect('user.register')
if not 'lastname' in request.POST.keys():
messages.error(request, "Parameters are missing!")
return redirect('user.register')
if not 'username' in request.POST.keys():
messages.error(request, "Parameters are missing!")
return redirect('user.register')
if not 'email' in request.POST.keys():
messages.error(request, "Parameters are missing!")
return redirect('user.register')
if not 'password1' in request.POST.keys():
messages.error(request, "Parameters are missing!")
return redirect('user.register')
if not 'password2' in request.POST.keys():
messages.error(request, "Parameters are missing!")
return redirect('user.register')
first_name = request.POST['firstname']
last_name = request.POST['lastname']
username = request.POST['username']
email = request.POST['email']
password1 = request.POST['password1']
password2 = request.POST['password2']
if password1 == password2:
if User.objects.filter(username=username).exists():
messages.error(request,'Username Taken')
return redirect('user.register')
elif User.objects.filter(email=email).exists():
messages.error(request,'Email already exists')
return redirect('user.register')
else:
user = User.objects.create_user(username=username,password=password1,email=email,first_name=first_name,last_name=last_name)
user.save()
return redirect('user.login')
else:
messages.error(request,'password not matching...')
return redirect('user.register')
return redirect('user.register')
else:
return redirect('user.register')
def login(request):
return render(request,'userTemplates/login/index.html')
def login_post(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username,password=password)
if user is not None:
auth.login(request,user)
return redirect('user.index')
else:
messages.info(request,'invalid credentials')
return redirect('user.login')
else:
return redirect('user.login')
# @login_required(login_url = 'user.login')
def index(request):
data = Song.objects.order_by('-id')
return render(request, 'userTemplates/index.html', {'data':data})
@login_required(login_url = 'user.login')
def logout(request):
auth.logout(request)
return redirect('user.login') |
#!/usr/bin/python3
""" Routing definitions for handling different urls """
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def say_hello():
""" Returns a string
Returns:
"Hello, HBNB!"
"""
return "Hello, HBNB!"
@app.route('/hbnb', strict_slashes=False)
def print_hbnb():
""" Returns HBNB
Returns:
"HBNB"
"""
return "HBNB"
@app.route('/c/<text>', strict_slashes=False)
def display_text_after_c(text):
""" Displays some text after the letter C
Returns:
a string with a letter C followed by some text
"""
char_list = list(text)
for i in range(len(char_list)):
if char_list[i] == '_':
char_list[i] = ' '
text = ''.join(char_list)
return "C {}".format(text)
@app.route('/python', strict_slashes=False)
@app.route('/python/<text>', strict_slashes=False)
def display(text='is cool'):
""" Displays some text after the word Python
Returns:
Python <text after the last forward slash>
"""
char_list = list(text)
for i in range(len(char_list)):
if char_list[i] == '_':
char_list[i] = ' '
text = ''.join(char_list)
return "Python {}".format(text)
@app.route('/number/<int:n>', strict_slashes=False)
def show_number(n):
""" Displays a string with "<n> is a number"
Returns:
"<n> is a number"
"""
return "{} is a number".format(n)
@app.route('/number_template/<int:n>', strict_slashes=False)
def show_html_template(n):
""" Displays an html template
Returns:
html template
"""
return render_template('5-number.html', n=n)
if __name__ == "__main__":
app.run(host="0.0.0.0")
|
import random
import matplotlib.pyplot as plt
from ch9.knapsack.individual import Individual
from ch9.knapsack.random_set_generator import random_set_generator
from ch9.knapsack.toolbox import mutation_bit_flip
def mutate(ind):
mutated_gene = mutation_bit_flip(ind.gene_list)
return Individual(mutated_gene)
if __name__ == '__main__':
random.seed(1)
random.seed(63)
items = random_set_generator(1, 100, 0.1, 7, 200)
Individual.set_items(items)
Individual.set_max_weight(10)
gene_set = [0] * len(items)
inclusions = [2, 30, 34, 42, 48, 64, 85, 104, 113, 119, 157, 174]
for i in inclusions:
gene_set[i] = 1
ind = Individual(gene_set)
alive = 0
killed = 0
for _ in range(1000):
mutated = mutate(ind)
if mutated.fitness == 0:
killed += 1
else:
alive += 1
print(f'Best individual: {ind.fitness}')
labels = 'Killed', 'Alive'
sizes = [killed, alive]
plt.pie(sizes, labels = labels)
plt.show() |
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
class StudentRegisterForm(UserCreationForm):
email = forms.EmailField()
department=forms.CharField()
roll_no=forms.IntegerField()
course=forms.CharField()
year=forms.IntegerField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2', 'department', 'roll_no', 'course', 'year']
class FacultyRegisterForm(UserCreationForm):
email = forms.EmailField()
teacherId=forms.CharField()
department=forms.CharField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2', 'teacherId', 'department']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email']
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['image']
|
from num_user_movie import *
from shuffle import *
from train_test import *
from id_map import *
from build_matrix import *
from fill_matrix import *
from column_mean import *
from normalize import *
from numpy.linalg import svd
from S_to_matrix import *
from trim_rating import *
from predict_ratings import *
from round_array import *
from os.path import *
from create_file import *
class SVD:
def __init__(self):
pass
def main(self):
# Read data
print('Reading data...\n')
filename1 = '_data.csv'
filename2 = 'ratings.dat'
if not exists(filename1):
if not exists(filename2):
print('Error: Please add file', filename2, 'into the path!')
exit(1)
else:
create_file(filename2, filename1)
A = loadtxt(filename1, delimiter=',')
# Initialize variables
no_user = num_user(A)
no_movie = num_movie(A)
B = shuffle(A)
# Set parameters
k_set = [1, 3]
fold_set = [3, 4]
rmse = zeros((len(fold_set), len(k_set)))
ratings_round = False
# Main algorithm
for ff in range(len(fold_set)):
num_fold = fold_set[ff]
print(str(num_fold) + '-fold Cross Validation begins.\n')
num_test = int(floor(100000/num_fold))
num_train = 100000 - num_test
for kk in range(len(k_set)):
k = k_set[kk]
print('Reducing dimensions to', k, '.')
error_each_fold = zeros((num_fold,1))
for i in range(num_fold):
print('Fold ' + str(i+1) + '. Splitting train/test...')
tr, tt = train_test(B, i, num_test)
u, v = id_map(B)
# Build matrix R in the paper
print('Building matrix R...')
R_raw = build_matrix(tr, u, v, num_test, no_user, no_movie)
R_filled = fill_matrix(R_raw)
m = column_mean(R_filled)
R = normalize(R_filled, m)
# Dimensionality Reduction
print('Dimensionality Reduction...')
U, S, V = svd(R, full_matrices=False)
Ss = copy(S[0:k])
Sk = S_to_matrix(Ss)
Uk = copy(U[:, 0:k])
Vk = copy(V[0:k, :])
sqrt_Sk = sqrt(Sk)
US = dot(Uk,transpose(sqrt_Sk))
SV = dot(sqrt_Sk, Vk)
# Predict the ratings
print('Predicting ratings...')
pr = predict_ratings(US, SV, m, u, v, tt, num_test)
if ratings_round == True:
pr = round_array(pr)
pr_trim = trim_rating(pr)
# Find error
print('Calculating error...')
real = copy(tt[:, 2])
error = pr_trim - real
error_each_fold[i] = sqrt(sum(error**2)/num_test)
print('End one fold.\n')
rmse[ff, kk] = mean(error_each_fold)
savetxt("_rmse.csv", rmse, fmt='%.4f', delimiter=",")
print(rmse)
S = SVD()
S.main() |
import argparse
import torch
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--text', help='checkpoint file')
parser.add_argument('--image', help='checkpoint file')
parser.add_argument('--video', help='checkpoint file')
parser.add_argument('--audio', help='checkpoint file')
parser.add_argument('--out', help='checkpoint file')
parser.add_argument('--no_keep_head', default=False, action='store_true')
return parser.parse_args()
def main():
args = parse_args()
text_ckpt = torch.load(args.text)
image_ckpt = torch.load(args.image)
video_ckpt = torch.load(args.video)
audio_ckpt = torch.load(args.audio)
if 'state_dict' in text_ckpt.keys():
text_ckpt = text_ckpt['state_dict']
if 'state_dict' in image_ckpt.keys():
image_ckpt = image_ckpt['state_dict']
if 'state_dict' in video_ckpt.keys():
video_ckpt = video_ckpt['state_dict']
if 'state_dict' in audio_ckpt.keys():
audio_ckpt = audio_ckpt['state_dict']
ret = {}
for typ, dic in zip(['text', 'image', 'video', 'audio'],
[text_ckpt, image_ckpt, video_ckpt, audio_ckpt]):
for key, val in dic.items():
ret[typ + '_branch.' + key] = val
# if key.startswith(typ) and (not args.no_keep_head
# or 'head' not in key):
# ret[key] = val
torch.save(ret, args.out)
print(f'Saved as {args.out}')
if __name__ == '__main__':
main()
|
class Solution:
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
sorted_nums = list(sorted(nums))
i ,j = len(nums)-1, 1
while j < len(nums):
nums[j] = sorted_nums[i]
i, j = i-1, j+2
j = 0
while j < len(nums):
nums[j] = sorted_nums[i]
i, j = i-1, j+2 |
# S3 details
accessKey = "<your-amazon-access-key>"
secretAccessKey = "<your-secret-amazon-access-key>"
# a test bucket for running software tests on (i.e. it doesn't matter if tests delete or alter data in it)
testBucket = "<your-test-bucket>"
|
#
# @lc app=leetcode.cn id=179 lang=python3
#
# [179] 最大数
#
# https://leetcode-cn.com/problems/largest-number/description/
#
# algorithms
# Medium (36.27%)
# Likes: 305
# Dislikes: 0
# Total Accepted: 32.1K
# Total Submissions: 88.3K
# Testcase Example: '[10,2]'
#
# 给定一组非负整数,重新排列它们的顺序使之组成一个最大的整数。
#
# 示例 1:
#
# 输入: [10,2]
# 输出: 210
#
# 示例 2:
#
# 输入: [3,30,34,5,9]
# 输出: 9534330
#
# 说明: 输出结果可能非常大,所以你需要返回一个字符串而不是整数。
#
#
# @lc code=start
class Solution:
def largestNumber(self, nums: List[int]) -> str:
def cmp(s1, s2):
c1, c2 = s1+s2, s2+s1
if c1 == c2:
return 0
elif c1 < c2:
return -1
else:
return 1
res = ''.join(sorted(map(str, nums), key=functools.cmp_to_key(cmp), reverse=True))
return '0' if res[0] == '0' else res
# @lc code=end
|
class Solution:
def sortedSquares(self, A: List[int]) -> List[int]:
if len(A) == 0:
return []
neg = pos = 0
while pos < len(A):
if A[pos] < 0:
pos += 1
continue
neg = pos - 1
break
res = []
while neg >= 0 or pos < len(A):
if neg < 0:
res.append(A[pos] ** 2)
pos += 1
continue
if pos >= len(A):
res.append(A[neg] ** 2)
neg -= 1
continue
if A[pos] >= -A[neg]:
res.append(A[neg] ** 2)
neg -= 1
else:
res.append(A[pos] ** 2)
pos += 1
return res |
lista = []
for c in range(0,10):
lista.append (int(input('Digite o número: ')) )
print(lista) |
#!/usr/bin/env python
"""
Install treediff lib in local env:
pip install requests git+https://github.com/learningequality/treediffer
then run
./ricecookerdifferpoc.py
to generate the detailed tree diff JSON and and print the diff in terminal.
"""
import argparse
from contextlib import redirect_stdout
import copy
import io
import json
import os
import subprocess
from treediffer import treediff
from treediffer.diffutils import print_diff
import pprint
import requests
OLD_TREE_FILENAME = "ricecooker-medium-oldtree.json"
NEW_TREE_FILENAME = "ricecooker-medium-newtree.json"
REMOTE_DOWNLOAD_DIR = "https://minireference.com/static/tmp/"
LOCAL_DOWNLOAD_DIR = "downloads"
if not os.path.exists(LOCAL_DOWNLOAD_DIR):
os.mkdir(LOCAL_DOWNLOAD_DIR)
def ensure_filename_exists(filename):
local_path = os.path.join(LOCAL_DOWNLOAD_DIR, filename)
if not os.path.exists(local_path):
remote_path = os.path.join(REMOTE_DOWNLOAD_DIR, filename)
response = requests.get(remote_path)
with open(local_path, 'wb') as local_file:
local_file.write(response.content)
assert os.path.exists(local_path)
return local_path
def get_trees():
pathA = ensure_filename_exists(OLD_TREE_FILENAME)
treeA = json.load(open(pathA))
pathB = ensure_filename_exists(NEW_TREE_FILENAME)
treeB = json.load(open(pathB))
return treeA, treeB
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Ricecooker tree differ')
args = parser.parse_args()
treeA, treeB = get_trees()
print('loaded old tree with ', len(treeA['children']), 'children in root')
print('loaded new tree with ', len(treeB['children']), 'children in root')
diff = treediff(treeA, treeB, preset="ricecooker", format="restructured")
diff_filename = 'ricecooker_medium_tree_diff.json'
with open(diff_filename, 'w') as jsonf:
json.dump(diff, jsonf, indent=2, ensure_ascii=False)
nodes_deleted = diff['nodes_deleted']
nodes_added = diff['nodes_added']
nodes_moved = diff['nodes_moved']
nodes_modified = diff['nodes_modified']
print('SUMMARY:')
print('#'*80)
print('nodes_added:', len(nodes_added))
print('nodes_deleted:', len(nodes_deleted))
print('nodes_moved:', len(nodes_moved))
print('nodes_modified:', len(nodes_modified))
print('\nRESTRUCTUREDDIFF:')
print('#'*80)
print_diff(diff,
attrs=['title', 'kind'],
ids=['node_id', 'parent_id']
)
|
class RDF:
def __init__(self, filename=''):
self.in_filename = filename
self.fd = open(filename,'r')
self.urls_serial = 0
self.read_urls = []
self.topics = {}
def __enter__(self):
#print 'Enter RDF'
return self
def __exit__(self, type, value, traceback):
#print 'Exit RDF'
self.fd.close()
def add_url(self, url='', topic='', lower=True):
# To turn Kids_and_Teens into Kids
topic = topic.split('_')[0]
url = url.replace(',','')
if lower:
url = url.lower()
# Baykan-2009 ignores those topics
ignored_topics = ['World', 'Regional']
if not topic in ignored_topics:
self.urls_serial += 1
self.read_urls.append([self.urls_serial, url, topic])
if topic in self.topics:
self.topics[topic] += 1
else:
self.topics[topic] = 1
def showTopics(self):
for topic in self.topics:
print topic, ':', str(self.topics[topic])
def getPageURL(self, line=''):
return line.split('"')[1]
def getPageTopic(self):
topic = ''
#for line in self.fd.readlines():
for line in self.fd:
line = line.strip()
#print 'getPageTopic:', line
if line.startswith('<topic'):
#print line
topic = line.rsplit('<',1)[0].split('/')[1]
elif line.startswith('</ExternalPage'):
return topic
def getPages(self):
#for line in self.fd.readlines():
for line in self.fd:
line = line.strip()
if line.startswith('<ExternalPage'):
url = self.getPageURL(line)
topic = self.getPageTopic()
self.add_url(url, topic)
print 'Read URLs:', len(self.read_urls), '\n'
def writeCSV(self):
csv_finename = self.in_filename.split('.')[0] + '.csv'
fd = open(csv_finename, 'w')
for u in self.read_urls:
line = '%d,%s,%s\n' % tuple(u)
fd.write(line)
fd.close()
def main(filename=''):
with RDF(filename) as rdf:
rdf.getPages()
rdf.showTopics()
rdf.writeCSV()
if __name__ == '__main__':
main(filename='content.rdf.u8') |
def classify(values):
"""*values* should be a list of numbers. Returns a list of the
same size with values 'even' or 'odd'."""
return ['odd' if value % 2 else 'even' for value in values]
|
"""
Downloads image thumbnails from google image search. Please note that this is a quite simple implementation with some limitations. First of all it is only possible to download low res thumbnails. Also, you can download 20 thumbnails at max.
If you need a more advanced solution please take a look at the serpAPI https://serpapi.com/images-results from google itself.
This program is published under the CC BY-SA 4.0 license.
By therealpeterpython https://github.com/therealpeterpython - 2021
"""
from pathlib import Path
import os
import shutil
import requests
def _create_url(query):
"""
Creates the google image search url for the search term query
"""
return "https://www.google.com/search?q={}&tbm=isch".format(query)
def _download_html(url):
"""
Downloads the html page for the given url
"""
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
html = requests.get(url, headers=headers).text
return html
def _get_image_links(html):
"""
Extracts the image links from the raw html.
"""
start_token = 'src="'
end_token = '&s'
offset_token = "/url?q="
offset = html.find(offset_token)
html = html[offset:]
links = list()
while True:
start = html.find(start_token)
end = html.find(end_token, start)
if start == -1: # nothing found
break
links.append(html[start+len(start_token):end])
html = html[end+1:]
return links
def _download_images(query, links, num, dir):
"""
Takes a list of image links and downloads the first num of them. If num is negativ all images will be downloaded. The dir parameter determines the folder to save the images in.
Returns the relative paths to the images.
"""
if num > 0:
links = links[:num]
os.makedirs(dir, exist_ok=True) # create image dir
image_paths = list()
headers={}
headers["User-Agent"] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
for i, link in enumerate(links):
try:
req = requests.get(link, headers=headers, stream=True)
try:
type_guess = req.headers["Content-Type"].split("/")[1]
except:
type_guess = "jpg"
file_name = "{}_{}.{}".format(query, i, type_guess).replace("/", "\\/") # escape stuff
file_name = str(Path(dir, file_name))
with open(file_name, "wb") as fp: # save image in file
req.raw.decode_content = True
shutil.copyfileobj(req.raw, fp)
image_paths.append(file_name)
except Exception as e:
print("Failed at image {} with -> {} <-!".format(file_name, str(e)))
return image_paths
def download(query, num=-1, dir="images"):
"""
Main function to download images. Takes a string query to search for and downloads the first max(num, 20) images. If num < 1, all 20 images will be downloaded. The output directory is dir.
Returns the relative paths to the images.
"""
url = _create_url(query)
html = _download_html(url)
links = _get_image_links(html)
image_paths = _download_images(query, links, num=num, dir=dir)
return image_paths
def download_one(query):
"""
Download one image to the standard dir "images".
Returns the relative path to the image.
"""
return download(query, num = 1)[0] |
from models.vae.cvae import CVAE
from models.contrastive_learning.cl_encoder import ContrastiveLearningEncoder
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm
from torch.utils.data import DataLoader
from torch.optim import Adam
import wandb
from tqdm import tqdm
import time
import random
def build_images_to_log(vae:CVAE, style_encoder:ContrastiveLearningEncoder, source, style):
log_dict = {"orig_imgs": [wandb.Image(img.transpose(0, 2).numpy()) for img in source.cpu()]}
with torch.no_grad():
z = vae.encode(source, style)
shifted_styles = [s.unsqueeze(0).expand_as(style) for s in style]
# reconstructed images
x_rec = vae.decode(z, style)
log_dict["rec_imgs"] = [wandb.Image(img.transpose(0, 2).numpy()) for img in x_rec.cpu()]
# with randomized z
for i in range(len(z)):
new_z = [zz for zz in z]
imgs = []
for _ in range(4):
new_z[i] = torch.randn_like(new_z[i])
x_rec = vae.decode(new_z, style)
imgs.extend([wandb.Image(img.transpose(0, 2).numpy()) for img in x_rec.cpu()])
log_dict[f"noised_{i+1}_imgs"] = imgs
# Style transfer
for i, s in enumerate(shifted_styles):
x_rec = vae.decode(z, s)
log_dict[f"transfer_to_{i}"] = [wandb.Image(img.transpose(0, 2).numpy()) for img in x_rec.cpu()]
# generated
new_z = [torch.randn_like(zz) for zz in z]
x_rec = vae.decode(new_z, style)
log_dict["gen_imgs"] = [wandb.Image(img.transpose(0, 2).numpy()) for img in x_rec.cpu()]
return log_dict
def random_crop(x, size=256):
x_crop = random.randint(0, x.shape[-2]-size)
y_crop = random.randint(0, x.shape[-1]-size)
return x[:, :, x_crop:x_crop+size, y_crop:y_crop+size]
def compute_style_shifted_loss(vae:CVAE, style_encoder:ContrastiveLearningEncoder, z, style):
style = torch.cat([style[1:], style[:1]], dim=0) # shift styles by one
x_trans = vae.decode(z, style)
gen_style = style_encoder(random_crop(x_trans))
with torch.no_grad():
style = style_encoder.last(style)
cos_dst = (gen_style * style).sum(-1)
return -cos_dst.mean() # simply maximize cos distance
def train_vae(vae:CVAE, style_encoder:ContrastiveLearningEncoder, dataset, dataloader_workers=8, lr=5e-5, kld_coef=0.1,
cos_loss_coef=0.1, epochs=400, batches_per_epoch=1000, batch_size=6, log_images_every=10):
wandb.init(project="CST-GAN-2021-styled")
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=dataloader_workers, pin_memory=True)
optim = Adam(vae.parameters(), lr)
test_images = torch.stack([dataset[10094][0], dataset[1282][0], dataset[25954][0], dataset[25513][0], dataset[7007][0]]).cuda()
test_styles = torch.stack([dataset[10094][1], dataset[1282][1], dataset[25954][1], dataset[25513][1], dataset[7007][1]]).cuda()
with torch.no_grad():
test_styles = style_encoder.encode(test_styles)
for i in range(epochs):
data_iter = iter(dataloader)
for k in tqdm(range(batches_per_epoch)):
get_batch_time = 0.
load_batch_time = 0.
tmp = time.time()
batch = next(data_iter)
if batch is None:
data_iter = iter(dataloader)
batch = next(data_iter)
x, style = batch
get_batch_time += time.time() - tmp
tmp = time.time()
x = x.cuda()
style = style.cuda()
with torch.no_grad():
style = style_encoder.encode(style)
load_batch_time += time.time() - tmp
z, kld = vae(x, style)
x_rec = vae.decode(z, style)
#x_rec, x_rec_noised = compute_with_noised(z, vae)
#noised_losses = [((x - xr)**2).sum((-1, -2, -3)) for xr in x_rec_noised]
#rec_noised_loss = torch.stack(noised_losses).mean()
kld_loss = kld.mean()
rec_loss = ((x - x_rec)**2).sum((-1, -2, -3)).mean()
scale_factor = x.size(-1) * x.size(-2)
if cos_loss_coef > 0.:
style_loss = compute_style_shifted_loss(vae, style_encoder, z, style)
else:
style_loss = torch.scalar_tensor(0.).cuda()
#loss = (rec_loss + kld_coef * kld_loss + noised_coef * rec_noised_loss) / scale_factor
loss = (rec_loss + kld_coef * kld_loss) / scale_factor + style_loss * cos_loss_coef
for param in vae.parameters():
param.grad = None
optim.zero_grad()
loss.backward()
#clip_grad_norm(vae.parameters(), 100)
optim.step()
wandb.log({"kld_loss": kld_loss.detach().cpu().item(),
"rec_loss": rec_loss.detach().cpu().item(),
"style_loss": style_loss.detach().cpu().item(),
#"rec_noised_loss": rec_noised_loss.detach().cpu().item(),
"loss": loss.detach().cpu().item(),
"get_batch_time": get_batch_time,
"load_batch_time": load_batch_time,
"step": i*batches_per_epoch + k + 1}, step=i*batches_per_epoch + k + 1)
if i % log_images_every == 0:
wandb.log(build_images_to_log(vae, style_encoder, test_images, test_styles), step=(i+1) * batches_per_epoch)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 25 17:19:58 2021
@author: Alvin
Set Manipulation – Part 1
Sedikit berbeda dengan tipe data list dan tuple, pada tipe data set terdapat cukup banyak fitur yang disediakan oleh bahasa Python.
Tugas:
Ketikkan potongan kode pada kolom Contoh Penggunaan di live code editor.
"""
# Fitur .add()
print(">>> Fitur .add()")
set_buah = {'Jeruk','Apel','Anggur'}
set_buah.add('Melon')
print(set_buah)
# Fitur .clear()
print(">>> Fitur .clear()")
set_buah = {'Jeruk','Apel','Anggur'}
set_buah.clear()
print(set_buah)
# Fitur .copy()
print(">>> Fitur .copy()")
set_buah1 = {'Jeruk','Apel','Anggur'}
set_buah2 = set_buah1
set_buah3 = set_buah1.copy()
set_buah2.add('Melon')
set_buah3.add('Kiwi')
print(set_buah1)
print(set_buah2)
# Fitur .update()
print(">>> Fitur .update()")
parcel1 = {'Anggur','Apel','Jeruk'}
parcel2 = {'Apel','Kiwi','Melon'}
parcel1.update(parcel2)
print(parcel1)
# Fitur .pop()
print(">>> Fitur .pop()")
parcel = {'Anggur','Apel','Jeruk'}
buah = parcel.pop()
print(buah)
print(parcel)
# Fitur .remove()
print(">>> Fitur .remove()")
parcel = {'Anggur','Apel','Jeruk'}
parcel.remove('Apel')
print(parcel) |
n=int(input())
s="123456789"
for i in range(n):print("+"*i+s[:n-i])
for i in range(n):print(s[:n-i].rjust(n,"+"))
|
'''
2차원 평면 위의 점 N개가 주어진다.
좌표를 x좌표가 증가하는 순으로, x좌표가 같으면 y좌표가 증가하는 순서로 정렬한 다음 출력하는 프로그램을 작성하시오.
첫째 줄에 점의 개수 N (1 ≤ N ≤ 100,000)이 주어진다. 둘째 줄부터 N개의 줄에는 i번점의 위치 xi와 yi가 주어진다.
(-100,000 ≤ xi, yi ≤ 100,000) 좌표는 항상 정수이고, 위치가 같은 두 점은 없다.
solve)
x,y 좌표를 2차원 배열에 매칭 시켜서 순환해서 찍는 것도 방법인듯
그냥 sorted를 사용하면 알아서 순서대로 정렬해주지만 lambda사용법을 익혀야함
'''
import sys
N = int(sys.stdin.readline())
coordinate = list()
for n in range(N):
coordinate.append(tuple(map(int, sys.stdin.readline().split())))
for c in sorted(coordinate, key= lambda x : (x[0], x[1])):
print(c[0], c[1]) |
from __future__ import print_function
import time
from learning.utils import *
from learning.log import *
from torch.autograd import Variable
import torch
def train_loop_npn(models, data_loader, optimizers, lr_schedulers, epoch, args):
for model in models:
model.train()
set_dropout_mode(model, True)
enc = models[0]
opt_non_discr = optimizers[0]
lr_scheduler_non = lr_schedulers[0]
# schedule learning rate
lr_scheduler_non.step()
num_per_epoch = len(data_loader)
loss_all = 0
loss_mse_all = 0
loss_var_all = 0
loss_cnt = 0
for idx, icml_data in enumerate(data_loader, 1):
if idx > num_per_epoch:
break
input, labels, subject, wave = icml_data
input = Variable(input.cuda())
labels = Variable(labels.cuda())
wave = Variable(wave.cuda())
a_m, a_s = enc.forward(input)
if not args.regression_delta:
a_m = a_m + input[:, 0].unsqueeze(1)
# loss = torch.sum((1 - args.lambda_) * (a_m - labels) ** 2 / (a_s + 1e-10) + args.lambda_ * torch.log(a_s))
loss = torch.sum((1 - args.lambda_) * (a_m - labels) ** 2 / (a_s + 1e-10) + args.lambda_ * a_s ** 2)
loss = loss / a_m.size(1) / a_m.size(0)
mse_loss = torch.sum((a_m - labels) ** 2) / a_m.size(1) / a_m.size(0)
var_loss = torch.sum(a_s ** 2) / a_m.size(1) / a_m.size(0)
for model in models:
model.zero_grad()
loss.backward()
opt_non_discr.step()
loss_all += loss.data[0]
loss_mse_all += mse_loss.data[0]
loss_var_all += var_loss.data[0]
loss_cnt += 1.0
string_out = "{} epoch {}: train loss = {} certainty = {} mse_square_loss = {}\n" \
.format(args.enc_type, epoch, loss_all/loss_cnt, (loss_var_all/loss_cnt) ** 0.5, loss_mse_all/loss_cnt)
print(string_out)
args.fp.write(string_out)
return loss_mse_all/loss_cnt
def val_loop_npn(models, data_loader, epoch, args):
for model in models:
model.eval() ####depends
set_dropout_mode(model, False)
enc = models[0]
num_per_epoch = len(data_loader)
loss_all = 0
abs_loss_all = 0
loss_cnt = 0
loss_mse_all = 0
loss_var_all = 0
for idx, icml_data in enumerate(data_loader, 1):
if idx > num_per_epoch:
break
input, labels, subject, wave = icml_data
input = Variable(input.cuda())
labels = Variable(labels.cuda())
wave = Variable(wave.cuda())
# predict = enc.forward(input)
# loss = full_mse_loss(predict, labels)
a_m, a_s = enc.forward(input)
if not args.regression_delta:
a_m = a_m + input[:, 0].unsqueeze(1)
loss = torch.sum((1 - args.lambda_) * (a_m - labels) ** 2 / (a_s + 1e-10) + args.lambda_ * torch.log(a_s))
loss = loss / a_m.size(1) / a_m.size(0)
mse_loss = torch.sum((a_m - labels) ** 2) / a_m.size(1) / a_m.size(0)
var_loss = torch.sum(a_s ** 2) / a_m.size(1) / a_m.size(0)
abs_loss = torch.sum(torch.abs(a_m - labels)) / a_m.size(1) / a_m.size(0)
loss_all += loss.data[0]
abs_loss_all += abs_loss.data[0]
loss_mse_all += mse_loss.data[0]
loss_var_all += var_loss.data[0]
loss_cnt += 1.0
string_out = "val loss = {} certainty_variance = {} mse_square_loss = {} meter error = {}\n".format(loss_all/loss_cnt,
(loss_var_all/loss_cnt) ** 0.5,
loss_mse_all/loss_cnt,
abs_loss_all / loss_cnt)
print(string_out)
args.fp.write(string_out)
return loss_mse_all/loss_cnt, abs_loss_all / loss_cnt
|
import os, json
from flask import Flask, request, Response, send_file, make_response, jsonify
from flask import render_template, url_for, redirect, send_from_directory
from werkzeug import secure_filename
from imanip import app, helpers
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
@app.route('/', methods=['GET', 'POST'])
def get_index():
return make_response(open('imanip/templates/index.html').read())
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
print "in POST"
effects = request.form['effects']
effects = json.loads(effects)
file = request.files['file']
print file
if file and allowed_file(file.filename):
filename = helpers.name_gen(secure_filename(file.filename))
print filename
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
helpers.apply(os.path.join(app.config['UPLOAD_FOLDER'], filename), effects)
return jsonify({ 'success': True, 'path': filename })
return redirect(url_for('get_image', filename=filename))
@app.route('/image/<filename>')
def get_image(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
|
import typing
from typing import Any, Optional, Text, Dict, List, Type
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from spellchecker import SpellChecker
spell = SpellChecker()
spell.word_frequency.load_words(
[
"Andhra",
"Pradesh",
"Arunachal",
"Assam",
"Bihar",
"Chhattisgarh",
"Gujarat",
"Haryana",
"Himachal",
"Jharkhand",
"Karnataka",
"Kerala",
"Madhya",
"Maharashtra",
"Manipur",
"Meghalaya",
"Mizoram",
"Nagaland",
"Odisha",
"Sikkim",
"Nadu",
"Telangana",
"Tripura",
"Uttarakhand",
"Uttar",
"Bengal",
"WB",
"MP",
"UP",
"ind",
"Adilabad",
"Agar",
"Agatti",
"Ahmednagar",
"Akola",
"Amravati",
"Aurangabad",
"Beed",
"Bhandara",
"Buldhana",
"Chandrapur",
"Dhule",
"Gadchiroli",
"Gondia",
"Hingoli",
"Jalgaon",
"Jalna",
"Kolhapur",
"Latur",
"Nagpur",
"Nanded",
"Nandurbar",
"Nashik",
"Osmanabad",
"Palghar",
"Parbhani",
"Pune",
"Raigad",
"Ratnagiri",
"Sangli",
"Satara",
"Sindhudurg",
"Solapur",
"Thane",
"Wardha",
"Washim",
"Yavatmal",
"Alipurduar",
"District",
"Bankura",
"Basirhat",
"Parganas",
"Birbhum",
"Bishnupur",
"Bankura",
"Cooch",
"Behar",
"COOCHBEHAR",
"Dakshin",
"Dinajpur",
"Darjeeling",
"Harbor",
"Bardhaman",
"Hoogly",
"Howrah",
"Jalpaiguri",
"Jhargram",
"Kalimpong",
"Kolkata",
"Malda",
"Murshidabad",
"Nadia",
"Nandigram",
"Medinipore",
"Parganas",
"Paschim",
"Medinipore",
"Purba",
"Purulia",
"Rampurhat",
"Birbhum",
"Uttar",
"Dinajpur",
"Bardhaman",
"(Bankura)",
"Bishnupur HD (Bankura)",
"Cooch Behar",
"(S 24 Parganas)",
"(East Medinipore)",
"(Birbhum)",
]
)
spell.known(
[
"Andhra",
"Pradesh",
"Arunachal",
"Assam",
"Bihar",
"Chhattisgarh",
"Gujarat",
"Haryana",
"Himachal",
"Jharkhand",
"Karnataka",
"Kerala",
"Madhya",
"Maharashtra",
"Manipur",
"Meghalaya",
"Mizoram",
"Nagaland",
"Odisha",
"Sikkim",
"Nadu",
"Telangana",
"Tripura",
"Uttarakhand",
"Uttar",
"Bengal",
"WB",
"MP",
"UP",
"ind",
"Adilabad",
"Agar",
"Agatti",
"Ahmednagar",
"Akola",
"Amravati",
"Aurangabad",
"Beed",
"Bhandara",
"Buldhana",
"Chandrapur",
"Dhule",
"Gadchiroli",
"Gondia",
"Hingoli",
"Jalgaon",
"Jalna",
"Kolhapur",
"Latur",
"Nagpur",
"Nanded",
"Nandurbar",
"Nashik",
"Osmanabad",
"Palghar",
"Parbhani",
"Pune",
"Raigad",
"Ratnagiri",
"Sangli",
"Satara",
"Sindhudurg",
"Solapur",
"Thane",
"Wardha",
"Washim",
"Yavatmal",
"Alipurduar",
"District",
"Bankura",
"Basirhat",
"Parganas",
"Birbhum",
"Bishnupur",
"Bankura",
"Cooch",
"Behar",
"COOCHBEHAR",
"Dakshin",
"Dinajpur",
"Darjeeling",
"Harbor",
"Bardhaman",
"Hoogly",
"Howrah",
"Jalpaiguri",
"Jhargram",
"Kalimpong",
"Kolkata",
"Malda",
"Murshidabad",
"Nadia",
"Nandigram",
"Medinipore",
"Parganas",
"Paschim",
"Medinipore",
"Purba",
"Purulia",
"Rampurhat",
"Birbhum",
"Uttar",
"Dinajpur",
"Bardhaman",
"(Bankura)",
"Bishnupur HD (Bankura)",
"Cooch Behar",
"(S 24 Parganas)",
"(East Medinipore)",
"(Birbhum)",
]
)
if typing.TYPE_CHECKING:
from rasa.nlu.model import Metadata
class CorrectSpelling(Component):
"""A new component"""
# Which components are required by this component.
# Listed components should appear before the component itself in the pipeline.
@classmethod
def required_components(cls) -> List[Type[Component]]:
"""Specify which components need to be present in the pipeline."""
return []
# Defines the default configuration parameters of a component
# these values can be overwritten in the pipeline configuration
# of the model. The component should choose sensible defaults
# and should be able to create reasonable results with the defaults.
defaults = {}
# Defines what language(s) this component can handle.
# This attribute is designed for instance method: `can_handle_language`.
# Default value is None which means it can handle all languages.
# This is an important feature for backwards compatibility of components.
supported_language_list = ["en"]
# Defines what language(s) this component can NOT handle.
# This attribute is designed for instance method: `can_handle_language`.
# Default value is None which means it can handle all languages.
# This is an important feature for backwards compatibility of components.
not_supported_language_list = None
def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None:
super().__init__(component_config)
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
"""Train this component.
This is the components chance to train itself provided
with the training data. The component can rely on
any context attribute to be present, that gets created
by a call to :meth:`components.Component.pipeline_init`
of ANY component and
on any context attributes created by a call to
:meth:`components.Component.train`
of components previous to this one."""
pass
def process(self, message: Message, **kwargs: Any) -> None:
"""Process an incoming message.
This is the components chance to process an incoming
message. The component can rely on
any context attribute to be present, that gets created
by a call to :meth:`components.Component.pipeline_init`
of ANY component and
on any context attributes created by a call to
:meth:`components.Component.process`
of components previous to this one."""
try:
textdata = message.data["text"]
# print("text :::" + textdata)
textdata = textdata.split()
new_message = " ".join(spell.correction(w) for w in textdata)
# print("after correction text :::" + new_message)
message.data["text"] = new_message
except KeyError:
pass
def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
"""Persist this component to disk for future loading."""
pass
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["Component"] = None,
**kwargs: Any,
) -> "Component":
"""Load this component from file."""
if cached_component:
return cached_component
else:
return cls(meta)
|
import pygame
import os, sys
import time
import csv
class bar(object):
def __init__(self, amplitude, min_frequency, max_frequency, screen_height, color = [50, 50, 50]):
"""
Creates a rectangular bar object
"""
self.amplitude = amplitude
self.min_frequency = min_frequency
self.max_frequency = max_frequency
self.color = color
self.rect = pygame.Rect(min_frequency, (screen_height - self.amplitude), (self.max_frequency - self.min_frequency), self.amplitude)
def update_bar(self, old_height):
change = old_height - self.amplitude
pygame.Rect.inflate_ip(self.rect, 0, change)
def draw_bar(self, Surface):
pygame.draw.rect(Surface, self.color, self.rect)
class bar_main():
def __init__(self, width=800,height=600):
"""Makes the window and displays it"""
# initializes pygame
pygame.init()
pygame.display.init()
# sets window size
self.width = width
self.height = height
# creates the window
self.window = pygame.display.set_mode((self.width, self.height))
def main_loop(self):
a = 300 # sets initial bar height
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# updates the Surface that everything is displaying on
self.background = pygame.Surface(self.window.get_size())
self.background = self.background.convert()
self.background.fill((100,15,15))
b = bar(a, 200, 300, self.height)
b.draw_bar(self.background)
self.window.blit(self.background, (0,0))
# refreshes the display and makes all of the changes visisble
pygame.display.flip()
time.sleep(0.01) #delay between stuff
a -= 2 #sets new bar height
if __name__ == "__main__":
newGame = bar_main()
newGame.main_loop()
|
#................................. image denoising .............................................................#
def imageDenoising(gray):
import numpy
import cv2
from matplotlib import pyplot as plt
# h : parameter deciding filter strength. Higher h value removes noise better, but removes details of image also(4 by try and error)
# templateWindowSize : should be odd. (recommended 7)
# searchWindowSize : should be odd. (recommended 21)
Denoised = cv2.fastNlMeansDenoising(gray,None,4,7,21)
cv2.imshow('Denoised image',Denoised )
cv2.imwrite('Denoised.png', Denoised)
return(Denoised)
|
from setuptools import setup
setup(
name="wimpy",
version="0.6",
description="Anti-copy-pasta",
long_description=open('README.rst').read(),
url="https://github.com/wimglenn/wimpy",
author="Wim Glenn",
author_email="hey@wimglenn.com",
license="MIT",
packages=["wimpy"],
options={"bdist_wheel": {"universal": True}},
)
|
import numpy as np
from numba import njit
from numba.core.errors import TypingError
import unittest
from numba.tests.support import TestCase, force_pyobj_flags
def build_map():
return {0: 1, 2: 3}
def build_map_from_local_vars():
# There used to be a crash due to wrong IR generation for STORE_MAP
x = TestCase
return {0: x, x: 1}
class DictTestCase(TestCase):
def test_build_map(self, flags=force_pyobj_flags):
self.run_nullary_func(build_map, flags=flags)
def test_build_map_from_local_vars(self, flags=force_pyobj_flags):
self.run_nullary_func(build_map_from_local_vars, flags=flags)
class TestCompiledDict(TestCase):
"""Testing `dict()` and `{}` usage that are redirected to
`numba.typed.Dict`.
"""
def test_use_dict(self):
# Test dict()
@njit
def foo():
d = dict()
d[1] = 2
return d
d = foo()
self.assertEqual(d, {1: 2})
def test_use_dict_iterable_args(self):
# Test dict(iterable)
@njit
def dict_iterable_1(a, b):
d = dict(zip(a, b))
return d
@njit
def dict_iterable_2():
# from python docs
return dict([('sape', 4139), ('guido', 4127), ('jack', 4098)])
inps = (
([1, 2, 3], [4, 5, 6]),
(np.arange(4), np.arange(4)),
([1, 2, 3], 'abc'),
([1, 2, 3, 4], 'abc'),
)
for a, b in inps:
d = dict_iterable_1(a, b)
self.assertEqual(d, dict(zip(a, b)))
self.assertEqual(dict_iterable_2(), dict_iterable_2.py_func())
def test_ctor_iterable_tuple(self):
@njit
def ctor():
return dict(((1, 2), (1, 2)))
expected = dict({1: 2})
got = ctor()
self.assertEquals(expected, got)
def test_unsupported_dict_usage(self):
# Test dict(dict())
from numba.core.typing.dictdecl import _message_dict_support
@njit
def ctor1():
d = dict()
d[1] = 2
return dict(d)
@njit
def ctor2():
return dict(((1, 2), (3, 'a')))
@njit
def ctor3():
return dict((('a', 'b', 'c'), ('d', 'e', 'f')))
@njit
def ctor4():
return dict((({}, 1), ({}, 2)))
_non_iter_args = "Non-iterable args used in dict(iterable)"
_dict_upd_item_len = "dictionary update sequence element has length 3;"
_unhashable_type = "Unhashable type"
inputs = [
(ctor1, TypingError, _message_dict_support),
(ctor2, TypingError, _non_iter_args),
(ctor3, TypingError, _dict_upd_item_len),
(ctor4, TypingError, _unhashable_type),
]
for func, exc, msg in inputs:
with self.assertRaises(exc) as raises:
func()
self.assertIn(msg, str(raises.exception))
def test_use_curlybraces(self):
# Test {} with empty args
@njit
def foo():
d = {}
d[1] = 2
return d
d = foo()
self.assertEqual(d, {1: 2})
def test_use_curlybraces_with_init1(self):
# Test {} with 1 item
@njit
def foo():
return {1: 2}
d = foo()
self.assertEqual(d, {1: 2})
def test_use_curlybraces_with_initmany(self):
# Test {} with many items
@njit
def foo():
return {1: 2.2, 3: 4.4, 5: 6.6}
d = foo()
self.assertEqual(d, {1: 2.2, 3: 4.4, 5: 6.6})
def test_curlybraces_init_with_coercion(self):
# Type coercion at dict init is tested
@njit
def foo():
return {1: 2.2, 3: 4, 5: 6}
self.assertEqual(foo(), foo.py_func())
def test_use_curlybraces_with_manyvar(self):
# Test using variable in {}
@njit
def foo(x, y):
return {x: 1, y: x + y}
x, y = 10, 20
self.assertEqual(foo(x, y), foo.py_func(x, y))
def test_mixed_curlybraces_and_dict(self):
# Test mixed use of {} and dict()
@njit
def foo():
k = dict()
k[1] = {1: 3}
k[2] = {4: 2}
return k
self.assertEqual(foo(), foo.py_func())
def test_dict_use_with_none_value(self):
# Test that NoneType cannot be used as value for Dict
@njit
def foo():
k = {1: None}
return k
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
"Dict.value_type cannot be of type none",
str(raises.exception),
)
def test_dict_use_with_optional_value(self):
# Test that Optional cannot be used as value for Dict
@njit
def foo(choice):
optional = 2.5 if choice else None
k = {1: optional}
return k
with self.assertRaises(TypingError) as raises:
foo(True)
self.assertIn(
"Dict.value_type cannot be of type OptionalType(float64)",
str(raises.exception),
)
def test_dict_use_with_optional_key(self):
# Test that Optional cannot be used as a key for Dict
@njit
def foo(choice):
k = {2.5 if choice else None: 1}
return k
with self.assertRaises(TypingError) as raises:
foo(True)
self.assertIn(
"Dict.key_type cannot be of type OptionalType(float64)",
str(raises.exception),
)
def test_dict_use_with_none_key(self):
# Test that NoneType cannot be used as a key for Dict
@njit
def foo():
k = {None: 1}
return k
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
"Dict.key_type cannot be of type none",
str(raises.exception),
)
if __name__ == '__main__':
unittest.main()
|
def check(r, c, size):
return r >= 0 and r < size and c >= 0 and c < size
def DFS(r, c, n):
arr[r][c] = n
x = [0, 0, -1, 1]
y = [1, -1, 0, 0]
for i in range(4):
if check(r+x[i], c+y[i], N) and arr[r+x[i]][c+y[i]] == 1:
DFS(r+x[i], c+y[i], n)
N = int(input())
arr = [list(map(int, input())) for _ in range(N)]
n = 2
for i in range(N):
for j in range(N):
if arr[i][j] == 1:
DFS(i, j, n)
n += 1
arr = [arr[i][j] for i in range(N) for j in range(N)]
print(max(arr)-1)
house = list(map(arr.count, range(2, max(arr)+1)))
for x in sorted(house):
print(x)
|
# -*- coding: utf-8 -*-
__author__ = 'florije'
from api.basic_service import BaseService
from api.models import TaskModel
from api.schemas import TaskSchema
from api.custom_exception import InvalidAPIUsage
class TaskService(BaseService):
def create_task(self, **params):
new_task = TaskModel(title=params.get('title'), content=params.get('content'))
self.db.add(new_task)
self.flush()
task_ma = TaskSchema().dump(new_task)
if task_ma.errors:
raise InvalidAPIUsage(message=task_ma.errors)
return task_ma.data
@staticmethod
def get_tasks():
res_task = TaskModel.query.all()
task_ma = TaskSchema().dump(res_task, many=True)
if task_ma.errors:
raise InvalidAPIUsage(message=task_ma.errors)
return task_ma.data
@staticmethod
def get_task_by_id(task_id):
res_task = TaskModel.query.filter(TaskModel.id == task_id).first()
task_ma = TaskSchema().dump(res_task)
if task_ma.errors:
raise InvalidAPIUsage(message=task_ma.errors)
return task_ma.data |
# Save the face of the user in encoded form
# Import required modules
import time
import os
import sys
import json
import configparser
import builtins
import cv2
import numpy as np
from threading import Timer
import csv
import boto3
# Try to import dlib and give a nice error if we can't
# Add should be the first point where import issues show up
os.system("ps -ef | grep analyseface.py | head -1| awk '{print $2}' | xargs kill -USR1 ")
try:
import dlib
except ImportError as err:
print(err)
print("\nCan't import the dlib module, check the output of")
print("pip3 show dlib")
sys.exit(1)
# Get the absolute path to the current directory
path = os.path.abspath(__file__ + "/..")
# Test if at lest 1 of the data files is there and abort if it's not
if not os.path.isfile(path + "/../dlib-data/shape_predictor_5_face_landmarks.dat"):
print("Data files have not been downloaded, please run the following commands:")
print("\n\tcd " + os.path.realpath(path + "/../dlib-data"))
print("\tsudo ./install.sh\n")
sys.exit(1)
# Read config from disk
config = configparser.ConfigParser()
config.read(path + "/../config.ini")
if not os.path.exists(config.get("video", "device_path")):
print("Camera path is not configured correctly, please edit the 'device_path' config value.")
sys.exit(1)
use_cnn = config.getboolean("core", "use_cnn", fallback=False)
if use_cnn:
face_detector = dlib.cnn_face_detection_model_v1(path + "/../dlib-data/mmod_human_face_detector.dat")
else:
face_detector = dlib.get_frontal_face_detector()
pose_predictor = dlib.shape_predictor(path + "/../dlib-data/shape_predictor_5_face_landmarks.dat")
face_encoder = dlib.face_recognition_model_v1(path + "/../dlib-data/dlib_face_recognition_resnet_model_v1.dat")
if os.path.isfile('/lib/security/howdy/photo/student.jpg'):
print ("Previous file exists")
os.remove('/lib/security/howdy/photo/student.jpg')
print('Cleared')
else:
print ("Previous file not exist")
print("___________________________________")
print("Adding face model for the user ")
# Start video capture on the IR camera through OpenCV
video_capture = cv2.VideoCapture(config.get("video", "device_path"))
# Set the frame width and height if requested
fw = config.getint("video", "frame_width", fallback=-1)
fh = config.getint("video", "frame_height", fallback=-1)
if fw != -1:
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, fw)
if fh != -1:
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, fh)
# Request a frame to wake the camera up
video_capture.grab()
print("\nWe are adding your face now. Please look straight to the camera.")
# Give the user time to read
time.sleep(3)
frames = 0
dark_threshold = config.getfloat("video", "dark_threshold")
# Loop through frames till we hit a timeout
#
# def capture():
# print("\nFace found!")
# cv2.imwrite("face10.jpg", frame)
# a = Timer(2.0, capture)
while frames < 60:
# Grab a single frame of video
# Don't remove ret, it doesn't work without it
ret, frame = video_capture.read()
gsframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Create a histogram of the image with 8 values
hist = cv2.calcHist([gsframe], [0], None, [8], [0, 256])
# All values combined for percentage calculation
hist_total = np.sum(hist)
# If the image is fully black or the frame exceeds threshold,
# skip to the next frame
if hist_total == 0 or (hist[0] / hist_total * 100 > dark_threshold):
continue
frames += 1
# Get all faces from that frame as encodings
face_locations = face_detector(gsframe, 1)
# If we've found at least one, we can continue
if face_locations:
print("\nFace detected! Indexing...")
cv2.imwrite("/lib/security/howdy/photo/student.jpg", frame)
break
video_capture.release()
# If more than 1 faces are detected we can't know wich one belongs to the user
if len(face_locations) > 1:
print("Multiple faces detected, aborting")
sys.exit(1)
elif not face_locations:
print("No face detected, aborting")
sys.exit(1)
with open('/lib/security/howdy/admin2_credentials.csv', 'r') as input:
next(input)
reader = csv.reader(input)
for line in reader:
access_key_id = line[2]
secret_access_key = line[3]
photo = '/lib/security/howdy/photo/student.jpg'
user = builtins.howdy_user
eid = user
client = boto3.client('rekognition',
aws_access_key_id = access_key_id,
aws_secret_access_key = secret_access_key,
region_name = 'us-east-2')
with open(photo, 'rb') as source_image:
source_bytes = source_image.read()
response = client.index_faces(
CollectionId='c3',
DetectionAttributes=[
'DEFAULT'
],
Image={'Bytes': source_bytes},
ExternalImageId = eid,
MaxFaces=1,
)
if(response):
word = eid
# Substring is searched in 'eks for geeks'
position = word.find('-', 0)
length = len(word)
print(position)
print(length)
matric = word[0:position]
name = word[position + 1:length]
fullname = name.replace("_", " ")
print("Dear " + name + ", your face has been added successfully.")
os.system("ps -ef | grep analyseface.py | head -1| awk '{print $2}' | xargs kill -USR2 ")
else:
print("Face adding failed.")
os.system("ps -ef | grep analyseface.py | head -1| awk '{print $2}' | xargs kill -USR2 ")
#
#
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import HTCMaskHead
class TestHTCMaskHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = HTCMaskHead(
with_conv_res=True,
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
x = torch.rand((1, 1, 10, 10))
res_feat = torch.rand((1, 1, 10, 10))
with self.assertRaises(AssertionError):
mask_head(x, return_logits=False, return_feat=False)
results = mask_head(x)
self.assertEqual(len(results), 2)
results = mask_head(x, res_feat=res_feat)
self.assertEqual(len(results), 2)
results = mask_head(x, return_logits=False)
self.assertIsInstance(results, Tensor)
results = mask_head(x, return_feat=False)
self.assertIsInstance(results, Tensor)
results = mask_head(x, res_feat=res_feat, return_logits=False)
self.assertIsInstance(results, Tensor)
results = mask_head(x, res_feat=res_feat, return_feat=False)
self.assertIsInstance(results, Tensor)
|
from tkinter import *
from PIL import Image, ImageTk
root = Tk()
canvas = Canvas(width=500, height=500, bg='white')
canvas.pack()
image = Image.open("Trollface.jpg")
photo = ImageTk.PhotoImage(image)
canvas.create_image(250, 250, image=photo)
root.mainloop()
|
#Building DataFrames with broadcasting
'''
You can implicitly use 'broadcasting', a feature of NumPy, when creating pandas DataFrames. In this exercise, you're going to create a DataFrame of cities in Pennsylvania that contains the city name in one column and the state name in the second. We have imported the names of 15 cities as the list cities.
Your job is to construct a DataFrame from the list of cities and the string 'PA'.
#Instructions
100 XP
Make a string object with the value 'PA' and assign it to state.
Construct a dictionary with 2 key:value pairs: 'state':state and 'city':cities.
Construct a pandas DataFrame from the dictionary you created and assign it to df.
'''
# Code
# Make a string with the value 'PA': state
state = 'PA'
# Construct a dictionary: data
data = {'state':state, 'city':cities}
# Construct a DataFrame from dictionary data: df
df = pd.DataFrame(data)
# Print the DataFrame
print(df)
'''result
state city
0 PA Manheim
1 PA Preston park
2 PA Biglerville
3 PA Indiana
4 PA Curwensville
5 PA Crown
6 PA Harveys lake
7 PA Mineral springs
8 PA Cassville
9 PA Hannastown
10 PA Saltsburg
11 PA Tunkhannock
12 PA Pittsburgh
13 PA Lemasters
14 PA Great bend
''' |
from StringIO import StringIO
import re
from PIL import Image
from django.conf import settings
from django.db import models
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from south.modelsinspector import add_introspection_rules
class LongBlob(models.Field):
def db_type(self, connection):
return 'longblob'
add_introspection_rules([], ["^attachments\.models\.LongBlob"])
class Attachment(models.Model):
DOCUMENT = 1
IMAGE = 2
TYPES = (
(DOCUMENT, "Document"),
(IMAGE, "Image")
)
JPG = 'image/jpeg'
PNG = 'image/png'
GIF = 'image/gif'
PDF = 'application/pdf'
WORD = 'application/msword'
WORDX = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
EXCEL = 'application/vnd.ms-excel'
EXCELX = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
RTF = 'text/richtext'
BMP = 'image/x-ms-bmp'
MIME_TYPES = (
(JPG, 'JPEG Image'),
(PNG, 'PNG Image'),
(GIF, 'GIF Image'),
(PDF, 'PDF Document'),
(WORD, 'MS Word Document'),
(WORDX, 'MS Word Document'),
(EXCEL, 'Excel Document'),
(EXCELX, 'Excel Document'),
(RTF, 'Rich Text'),
(BMP, 'Bitmap Image')
)
#This will control how the mime type is set based on file extension
MIME_TYPE_EXTENSIONS = (
(re.compile(r'.+?\.(?i)jpg$'), JPG),
(re.compile(r'.+?\.(?i)jpeg$'), JPG),
(re.compile(r'.+?\.(?i)gif$'), GIF),
(re.compile(r'.+?\.(?i)png$'), PNG),
(re.compile(r'.+?\.(?i)pdf$'), PDF),
(re.compile(r'.+?\.(?i)doc$'), WORD),
(re.compile(r'.+?\.(?i)xls$'), EXCEL),
(re.compile(r'.+?\.(?i)docx$'), WORDX),
(re.compile(r'.+?\.(?i)xlsx$'), EXCELX),
(re.compile(r'.+?\.(?i)rtf'), RTF),
(re.compile(r'.+?\.(?i)bmp'), BMP),
)
#This is a cross reference list to map from mimetype to attachment type
MIME_TYPE_ATTACHMENT_TYPES = {
JPG:IMAGE,
PNG:IMAGE,
GIF:IMAGE,
PDF:DOCUMENT,
WORD:DOCUMENT,
EXCEL:DOCUMENT,
WORDX:DOCUMENT,
EXCELX:DOCUMENT,
RTF:DOCUMENT,
BMP:IMAGE,
}
#Define which model instance to attach to
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
attach_to = generic.GenericForeignKey()
#define what is being attached
mimetype = models.CharField(max_length=120, choices=MIME_TYPES)
attachment_type = models.IntegerField(choices=TYPES)
description = models.CharField(max_length=256, null=True, blank=True)
attachment = LongBlob()
file_name = models.CharField(max_length=256)
tag = models.CharField(max_length=50, blank=True, default="", db_index=True)
#define when it was attached
attached_at = models.DateTimeField(auto_now_add=True)
def thumb(self):
return self.get_attachment_url('thumbnail')
def preview(self):
return self.get_attachment_url('preview')
def get_attachment_url(self, image_url):
if self.attachment_type == self.IMAGE:
return reverse("attachments:%s" % image_url, kwargs={'identifier':self.pk})
elif self.mimetype == self.PDF:
return "%simages/icons/pdf_icon.gif" % settings.MEDIA_URL
elif self.mimetype in (self.WORD, self.WORDX):
return "%simages/icons/DOC_icon.jpg" % settings.MEDIA_URL
elif self.mimetype in (self.EXCEL, self.EXCELX):
return "%simages/icons/excel_icon.gif" % settings.MEDIA_URL
elif self.mimetype == self.RTF:
return "%simages/icons/rtf_icon.png" % settings.MEDIA_URL
def create_thumbnail(self, max_size):
stream = StringIO(self.attachment)
new_image = Image.open(stream)
new_image.thumbnail((max_size, max_size), Image.ANTIALIAS)
thumbnail = StringIO()
new_image.save(thumbnail, self.mimetype.split('/')[1])
return thumbnail.getvalue()
def get_mime_type(self, file_name):
for regex, mime_type in self.MIME_TYPE_EXTENSIONS:
if regex.match(file_name):
return mime_type
def save(self, force_insert=False, force_update=False, using=None):
if hasattr(self.attachment, 'name'):
self.file_name = self.attachment.name
for regex, mime_type in self.MIME_TYPE_EXTENSIONS:
if regex.match(self.file_name):
self.mimetype = mime_type
self.attachment_type = self.MIME_TYPE_ATTACHMENT_TYPES[mime_type]
break
if hasattr(self.attachment, 'open'):
self.attachment.open()
data = ''.join([chunk for chunk in self.attachment.chunks()])
self.attachment.close()
self.attachment = data
super(Attachment, self).save(force_insert, force_update, using)
@staticmethod
def get_attachments_for(model):
if model:
content_type = ContentType.objects.get_for_model(model)
return Attachment.objects.filter(content_type__pk=content_type.pk, object_id=model.pk).defer('attachment')
@staticmethod
def get_attachments_for_list(list_of_models):
for model in list_of_models:
for attachment in Attachment.get_attachments_for(model):
yield attachment |
#
# @lc app=leetcode.cn id=865 lang=python3
#
# [865] 具有所有最深节点的最小子树
#
# https://leetcode-cn.com/problems/smallest-subtree-with-all-the-deepest-nodes/description/
#
# algorithms
# Medium (64.22%)
# Likes: 122
# Dislikes: 0
# Total Accepted: 7.4K
# Total Submissions: 11.5K
# Testcase Example: '[3,5,1,6,2,0,8,null,null,7,4]'
#
# 给定一个根为 root 的二叉树,每个节点的深度是 该节点到根的最短距离 。
#
# 如果一个节点在 整个树 的任意节点之间具有最大的深度,则该节点是 最深的 。
#
# 一个节点的 子树 是该节点加上它的所有后代的集合。
#
# 返回能满足 以该节点为根的子树中包含所有最深的节点 这一条件的具有最大深度的节点。
#
#
#
# 注意:本题与力扣 1123
# 重复:https://leetcode-cn.com/problems/lowest-common-ancestor-of-deepest-leaves/
#
#
#
# 示例 1:
#
#
#
#
# 输入:root = [3,5,1,6,2,0,8,null,null,7,4]
# 输出:[2,7,4]
# 解释:
# 我们返回值为 2 的节点,在图中用黄色标记。
# 在图中用蓝色标记的是树的最深的节点。
# 注意,节点 5、3 和 2 包含树中最深的节点,但节点 2 的子树最小,因此我们返回它。
#
#
# 示例 2:
#
#
# 输入:root = [1]
# 输出:[1]
# 解释:根节点是树中最深的节点。
#
# 示例 3:
#
#
# 输入:root = [0,1,3,null,2]
# 输出:[2]
# 解释:树中最深的节点为 2 ,有效子树为节点 2、1 和 0 的子树,但节点 2 的子树最小。
#
#
#
# 提示:
#
#
# 树中节点的数量介于 1 和 500 之间。
# 0
# 每个节点的值都是独一无二的。
#
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def subtreeWithAllDeepest(self, root: TreeNode) -> TreeNode:
def dfs(root):
if not root:
return None, 0
lr, ld = dfs(root.left)
rr, rd = dfs(root.right)
if ld > rd:
return lr, ld + 1
elif ld < rd:
return rr, rd + 1
else:
return root, ld + 1
ans, h = dfs(root)
return ans
# @lc code=end
|
"""generic training helpers.
training.py and training_aux.py are mostly for CNN.
"""
import os.path
import os
from shlex import quote
from collections import OrderedDict
from tempfile import NamedTemporaryFile
import h5py
import numpy as np
import os.path
import time
from . import dir_dictionary
from . import io
from . import data_preprocessing
from .model_fitting_glm import suffix_fn as suffix_fn_glm, get_trainer as get_trainer_glm
from .model_fitting_cnnpre import suffix_fn as suffix_fn_cnnpre, get_trainer as get_trainer_cnnpre
from .model_fitting_gabor import get_trainer as get_trainer_gabor
from .io import load_split_dataset
from subprocess import run
from itertools import product
from .eval import eval_fn_corr_raw
from functools import partial
from .stimulus_classification import num_ot_dict
def eval_fn_particular_dtype(yhat: np.ndarray, y: np.ndarray, dtype):
assert y.dtype == np.float64
assert yhat.dtype == dtype
y = y.astype(dtype, copy=False)
return eval_fn_corr_raw(yhat, y, dtype)
validation_dict = {
'cnn': True,
'glm': True,
'gabor': False,
'cnnpre': True,
}
switch_val_test_dict = {
# False if last being val
# True if middle being val.
'cnn': False,
'glm': True,
'cnnpre': True,
'gabor': False,
}
def cnn_suffix_fn(x):
# first strip @ if there is any
if 'mlp' in x:
return 'linear'
else:
return None
suffix_fn_dict = {
'cnn': cnn_suffix_fn,
'glm': lambda x: suffix_fn_glm(x),
'gabor': lambda x: None,
'cnnpre': lambda x: suffix_fn_cnnpre(x),
}
def cnn_top_dim_fn(x):
# first strip @ if there is any
if '@' in x:
x = x.split('@')[0]
if 'mlp' in x:
_, k = x.split('.')
assert _ == 'mlp'
return int(k)
else:
return None
top_dim_fn_dict = {
'cnn': cnn_top_dim_fn,
'glm': lambda x: None,
'cnnpre': lambda x: None,
'gabor': lambda x: None
}
subtract_mean_dict = {
'cnn': False,
'glm': False,
'cnnpre': False,
'gabor': True,
}
split_steps_fn_dict = {
'cnn': lambda x: 50,
'glm': lambda x: 100,
'cnnpre': lambda x: 100,
# really slow.
'gabor': lambda x: 10,
}
eval_fn_dict = {
'cnn': partial(eval_fn_particular_dtype, dtype=np.float32),
'gabor': partial(eval_fn_particular_dtype, dtype=np.float32),
'glm': partial(eval_fn_particular_dtype, dtype=np.float64),
'cnnpre': partial(eval_fn_particular_dtype, dtype=np.float64),
}
# what portions of datasets to train.
training_portions_fn_dict = {
# only train one seed first.
'cnn': lambda x: {'seed_list': range(2),
# 'subset_list': ('all',
# 'OT'
# ),
# 'neural_dataset_to_process': ('MkE2_Shape',),
# 'neural_dataset_to_process': ('MkA_Shape',)
},
'glm': lambda x: {'seed_list': range(2), },
'cnnpre': lambda x: {'seed_list': range(2), 'train_percentage_list': (100,)},
'gabor': lambda x: {'seed_list': range(2), 'train_percentage_list': (100,)},
}
chunk_dict = {
'cnn': 5,
# 'cnn': None,
'glm': None,
'cnnpre': None,
'gabor': None,
}
assert (validation_dict.keys() == suffix_fn_dict.keys() ==
split_steps_fn_dict.keys() == training_portions_fn_dict.keys() ==
switch_val_test_dict.keys() == chunk_dict.keys() == eval_fn_dict.keys() ==
top_dim_fn_dict.keys() == subtract_mean_dict.keys())
_cache_vars = {'num_neuron_dict': None,
'num_im_dict': None}
def get_num_neuron(neural_dataset_key):
if _cache_vars['num_neuron_dict'] is None:
_cache_vars['num_neuron_dict'] = io.get_num_neuron_all_datasets()
return _cache_vars['num_neuron_dict'][neural_dataset_key]
def get_num_test_im(neural_dataset_key, subset):
# get size of test.
# this is just a sanity check.
# TODO this //5 is magic number.
# but should be true across the whole project.
if _cache_vars['num_im_dict'] is None:
_cache_vars['num_im_dict'] = io.get_num_im_all_datasets()
image_key = io.neural_dataset_dict[neural_dataset_key]['image_dataset_key']
num_all = _cache_vars['num_im_dict'][image_key]
if subset == 'all':
return num_all // 5
elif subset == 'OT':
return num_ot_dict[image_key] // 5
else:
raise NotImplementedError
def get_trainer(model_type, model_subtype):
# fetch the training funcs, which takes in a dataset and returns three things.
# 1. y_test_hat
# 2. corr
# 3. (optional) attrs. a dict. to store some attributes.
# 4. (optional) model. a dict. to create a subgroup called 'model' to store additional things.
if model_type == 'glm':
trainer = get_trainer_glm(model_subtype)
elif model_type == 'cnn':
from .model_fitting_cnn import get_trainer as get_trainer_cnn
trainer = get_trainer_cnn(model_subtype)
elif model_type == 'cnnpre':
trainer = get_trainer_cnnpre(model_subtype)
elif model_type == 'gabor':
trainer = get_trainer_gabor(model_subtype)
else:
raise NotImplementedError
return trainer
def dataset_spec_encode(neural_dataset_key, subset, percentage: int, seed: int):
encoded = '@'.join([neural_dataset_key, subset, str(percentage), str(seed)])
assert encoded == quote(encoded) and '/' not in encoded
# check decode
assert dataset_spec_decode(encoded) == (neural_dataset_key, subset, percentage, seed)
return encoded
def dataset_spec_decode(encoded):
neural_dataset_key, subset, percentage, seed = encoded.split('@')
percentage = int(percentage)
seed = int(seed)
return neural_dataset_key, subset, percentage, seed
def neuron_spec_encode(neuron_start, neuron_end):
encoded = '-'.join([str(neuron_start), str(neuron_end)])
assert encoded == quote(encoded) and '/' not in encoded
assert neuron_spec_decode(encoded) == (neuron_start, neuron_end)
# check decode
return encoded
def neuron_spec_decode(encoded):
neuron_start, neuron_end = encoded.split('-')
return int(neuron_start), int(neuron_end)
def get_data_one_slice(datasets_all, idx_relative):
new_datasets = []
for idx, x_or_y in enumerate(datasets_all):
if idx % 2 == 0:
new_datasets.append(x_or_y)
else:
if x_or_y is None:
new_datasets.append(x_or_y)
else:
new_datasets.append(x_or_y[:, idx_relative:idx_relative + 1])
return tuple(new_datasets)
def train_one_case_generic_save_data(train_result: dict, key_this: str, f_out: h5py.File,
y_test: np.ndarray, eval_fn):
assert {'y_test_hat', 'corr'} <= train_result.keys() <= {'y_test_hat', 'corr', 'attrs', 'model'}
# save
y_test_hat = train_result['y_test_hat']
assert np.all(np.isfinite(y_test_hat))
assert y_test_hat.ndim == 2 and y_test_hat.shape[1] == 1
assert y_test.shape == y_test_hat.shape
grp_this = f_out.create_group(key_this)
grp_this.create_dataset('y_test_hat', data=y_test_hat)
assert np.isscalar(train_result['corr']) and np.isfinite(train_result['corr'])
assert eval_fn(y_test_hat, y_test) == train_result['corr']
grp_this.create_dataset('corr', data=train_result['corr'])
print('performance', train_result['corr'])
if 'attrs' in train_result:
# save attrs
for k, v in train_result['attrs'].items():
grp_this.attrs[k] = v
if 'model' in train_result:
grp_this_model = grp_this.create_group('model')
if isinstance(train_result['model'], dict):
for k_model, v_model in train_result['model'].items():
grp_this_model.create_dataset(k_model, data=v_model)
else:
# for Gabor.
assert callable(train_result['model'])
train_result['model'](grp_this_model)
f_out.flush()
def train_one_case_generic(model_type, model_subtype, dataset_spec, neuron_spec):
# this will be the function that slurm scripts will call.
#
# dataset_spec will be a string that is bash safe.
# so is neuron_spec.
neural_dataset_key, subset, percentage, seed = dataset_spec_decode(dataset_spec)
neuron_start, neuron_end = neuron_spec_decode(neuron_spec)
dir_to_save, file_name_base, key_to_save = file_and_key_to_save(model_type, model_subtype,
neural_dataset_key, subset, percentage, seed,
neuron_start, neuron_end)
test_im_size = get_num_test_im(neural_dataset_key, subset)
neuron_range = slice(neuron_start, neuron_end)
os.makedirs(dir_to_save, exist_ok=True)
with h5py.File(os.path.join(dir_to_save, file_name_base)) as f_out:
trainer = get_trainer(model_type, model_subtype)
# get dataset
datasets_all = load_split_dataset(neural_dataset_key, subset, validation_dict[model_type],
neuron_range, percentage=percentage,
seed=seed, last_val=not switch_val_test_dict[model_type],
suffix=suffix_fn_dict[model_type](model_subtype),
top_dim=top_dim_fn_dict[model_type](model_subtype),
subtract_mean=subtract_mean_dict[model_type])
# then training one by one.
for neuron_idx_relative, neuron_idx_real in enumerate(range(neuron_start, neuron_end)):
key_this = key_to_save + '/' + str(neuron_idx_real)
if key_this in f_out:
print(f'{key_this} done before')
else:
print(f'{key_this} start')
t1 = time.time()
datasets_this = get_data_one_slice(datasets_all, neuron_idx_relative)
train_result = trainer(datasets_this)
y_test = datasets_this[5] if switch_val_test_dict[model_type] else datasets_this[3]
assert y_test.shape == (test_im_size, 1)
train_one_case_generic_save_data(train_result, key_this, f_out, y_test, eval_fn_dict[model_type])
t2 = time.time()
print(f'{key_this} @ {t2-t1}sec')
def generate_one_script(header, model_type, model_subtype, dataset_spec, neuron_spec_or_spec_list):
template_function_middle = f"""
. activate tf15
# wait for a while. otherwise, it may not work... maybe some bug of conda.
sleep 2
cd {dir_dictionary['root']}
. ./setup_env_variables.sh
""".strip()
# https://stackoverflow.com/questions/8577027/how-to-declare-a-long-string-in-python
# https://stackoverflow.com/questions/363223/how-do-i-get-both-stdout-and-stderr-to-go-to-the-terminal-and-a-log-file
template_function_inner = ("PYTHONUNBUFFERED=1 python scripts/model_fitting/fitting_master.py "
f"{{model_type}} {{model_subtype}} {{dataset_spec}} {{neuron_spec}} "
f"2>&1 | tee {dir_dictionary['root']}/trash/"
f"model_fitting_{{model_type}}_{{model_subtype}}_{{dataset_spec}}_{{neuron_spec}}"
).strip()
assert isinstance(model_type, str) and quote(model_type) == model_type and '/' not in model_type
assert isinstance(model_subtype, str) and quote(model_subtype) == model_subtype and '/' not in model_subtype
script_to_run = header + '\n' + template_function_middle + '\n\n\n'
if isinstance(neuron_spec_or_spec_list, str):
# then simple.
script_to_run += template_function_inner.format(
model_type=model_type, model_subtype=model_subtype,
dataset_spec=dataset_spec, neuron_spec=neuron_spec_or_spec_list
) + '\n'
else:
for neuron_spec in neuron_spec_or_spec_list:
script_to_run += template_function_inner.format(
model_type=model_type, model_subtype=model_subtype,
dataset_spec=dataset_spec, neuron_spec=neuron_spec
) + ' &\n'
script_to_run += 'wait\n'
return script_to_run
# https://stackoverflow.com/questions/434287/what-is-the-most-pythonic-way-to-iterate-over-a-list-in-chunks
def chunker(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def check_training_portions(seed_list, subset_list, neural_dataset_to_process, train_percentage_list):
for x in seed_list:
assert x in data_preprocessing.seed_list
for x in subset_list:
assert x in data_preprocessing.subset_list
for x in neural_dataset_to_process:
assert x in data_preprocessing.neural_dataset_to_process
for x in train_percentage_list:
assert x in data_preprocessing.train_percentage_list
# TODO: check that portions are valid.
def _get_scope_to_try(name, model_type, model_subtype, override):
if name in override:
return override[name]
else:
return training_portions_fn_dict[model_type](model_subtype).get(name,
getattr(data_preprocessing, name))
def generate_all_scripts(header, model_type, model_subtype_list, override=None):
"""this is what those _sub files call. they provide header and subtypes.
it will return a list of scripts.
and then, the script should either run these scripts one by one,
or sbatch them
"""
chunk_option = chunk_dict[model_type]
script_dict = OrderedDict()
if override is None:
override = dict()
for model_subtype in model_subtype_list:
# generate all datasets
seed_list = _get_scope_to_try('seed_list', model_type, model_subtype, override)
subset_list = _get_scope_to_try('subset_list', model_type, model_subtype, override)
neural_dataset_to_process = _get_scope_to_try('neural_dataset_to_process', model_type, model_subtype, override)
train_percentage_list = _get_scope_to_try('train_percentage_list', model_type, model_subtype, override)
check_training_portions(seed_list, subset_list, neural_dataset_to_process, train_percentage_list)
for neural_dataset_key, subset, percentage, seed in product(
neural_dataset_to_process, subset_list, train_percentage_list, seed_list
):
dataset_spec = dataset_spec_encode(neural_dataset_key, subset, percentage, seed)
# generate chunks to process.
neuron_fitting_pairs = get_neuron_fitting_pairs(get_num_neuron(neural_dataset_key),
split_steps_fn_dict[model_type](model_subtype))
# convert every one into specs.
neuron_fitting_pairs = [neuron_spec_encode(x, y) for (x, y) in neuron_fitting_pairs]
if chunk_option is not None:
specs_to_iter = chunker(neuron_fitting_pairs, chunk_option)
else:
specs_to_iter = neuron_fitting_pairs
for neuron_spec in specs_to_iter:
if isinstance(neuron_spec, str):
script_name = (model_subtype, dataset_spec, neuron_spec)
else:
script_name = (model_subtype, dataset_spec, '&'.join(neuron_spec))
script_dict[script_name] = generate_one_script(header,
model_type,
model_subtype,
dataset_spec,
neuron_spec)
return script_dict
def run_all_scripts(script_dict, slurm=True):
"""this is another function that those _sub files should call. this actually execute files"""
if slurm:
trash_global = os.path.join(dir_dictionary['root'], 'trash')
os.chdir(trash_global)
for script_name, script_content in script_dict.items():
# make sure it will run.
assert script_content.startswith('#!/usr/bin/env bash\n')
file_temp = NamedTemporaryFile(delete=False)
file_temp.write(script_content.encode('utf-8'))
file_temp.close()
print(script_name, 'start')
# print(script_content)
# input('haha')
if not slurm:
os.chmod(file_temp.name, 0o755)
# then run it.
run(file_temp.name, check=True)
else:
run(['sbatch', file_temp.name], check=True)
os.remove(file_temp.name)
print(script_name, 'done')
def get_neuron_fitting_pairs(n_neuron, step):
result = []
assert n_neuron > 0
for x in range(0, n_neuron, step):
y = min(n_neuron, x + step)
result.append((x, y))
return result
def file_and_key_to_save(model_type: str, model_subtype: str,
neural_dataset_key, subset, percentage: int, seed: int,
start_neuron, end_neuron):
assert model_type in validation_dict
dir_to_save = os.path.join(
dir_dictionary['models'], model_type, model_subtype,
neural_dataset_key, subset, str(percentage), str(seed)
)
file_name_base = f'{start_neuron}_{end_neuron}.hdf5'
key_to_save = '/'.join([neural_dataset_key, subset, str(percentage), str(seed), model_type, model_subtype])
return dir_to_save, file_name_base, key_to_save
|
import sys
import csv
import StringIO
import json
# Usage: python process_kbp_data.py <input_file> <output_file>
input_filename = sys.argv[1]
output_filename = sys.argv[2]
mapping = {
"per:country_of_death" : 0,
"per:schools_attended" : 1,
"per:other_family" : 2,
"per:city_of_birth" : 3,
"org:top_members/employees" : 4,
"org:founded_by" : 5,
"per:stateorprovinces_of_residence" : 6,
"per:parents" : 7,
"per:stateorprovince_of_death" : 8,
"org:website" : 9,
"per:stateorprovince_of_birth" : 10,
"org:political/religious_affiliation" : 11,
"per:age" : 12,
"per:date_of_birth" : 13,
"per:title" : 14,
"per:member_of" : 15,
"org:members" : 16,
"org:city_of_headquarters" : 17,
"per:origin" : 18,
"per:alternate_names" : 19,
"per:date_of_death" : 20,
"per:children" : 21,
"org:stateorprovince_of_headquarters" : 22,
"org:member_of" : 23,
"org:subsidiaries" : 24,
"org:alternate_names" : 25,
"per:religion" : 26,
"per:spouse" : 27,
"per:siblings" : 28,
"per:cities_of_residence" : 29,
"per:countries_of_residence" : 30,
"org:country_of_headquarters" : 31,
"org:number_of_employees/members" : 32,
"per:cause_of_death" : 33,
"per:charges" : 34,
"org:shareholders" : 35,
"per:country_of_birth" : 36,
"per:employee_of" : 37,
"org:dissolved" : 38,
"org:parents" : 39,
"org:founded" : 40,
"per:city_of_death" : 41
}
# Process a single line of TSV and print result to stdout
def process_line(line):
strio = StringIO.StringIO(line)
reader = csv.reader(strio, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
gloss = row[0]
dependencies_conll = row[1]
words = row[2]
lemmas = row[3]
pos_tags = row[4]
ner_tags = row[5]
subject_id = row[6]
subject_entity = row[7]
subject_link_score = row[8]
subject_ner = row[9]
object_id = row[10]
object_entity = row[11]
object_link_score = row[12]
object_ner = row[13]
subject_begin = int(row[14])
subject_end = int(row[15])
object_begin = int(row[16])
object_end = int(row[17])
known_relations = None
incompatible_relations = None
annotated_relation = None
# training
if len(row) > 18:
known_relations = row[18]
incompatible_relations = row[19]
annotated_relation = row[20]
relations = ''
if len(row) > 18:
known_relations = known_relations[1:-1]
known_relations_list = known_relations.split(',')
known_relations_list = [str(mapping[x]) for x in known_relations_list]
relations = ','.join(known_relations_list) # 0,2,3 or something like that
words = words[1:-1]
words = words.replace('\",\"', '~^~COMMA~^~')
words = words.split(",")
m1_begin = 0
m2_begin = 0
temp = []
# Do we want to replace linked mention words ("Barack Obama") with their entity name (e.g. BarackObama)?
replace_mention_words_with_entity_str = False
for i, word in enumerate(words):
cur = word
if replace_mention_words_with_entity_str:
if i == subject_begin:
cur = subject_entity
m1_begin = len(temp)
if i == object_begin:
cur = object_entity
m2_begin = len(temp)
if i > subject_begin and i < subject_end:
continue
if i > object_begin and i < object_end:
continue
if cur == '~^~COMMA~^~':
cur = ','
temp.append(cur)
new_gloss = ' '.join(temp)
if replace_mention_words_with_entity_str:
# indices into the new gloss
ind1 = min(m1_begin, m2_begin)
ind2 = max(m1_begin, m2_begin)
output = [new_gloss, str(ind1), str(ind2), relations]
#print json.dumps(output)
#sys.stdout.write(json.dumps(output))
#sys.stdout.write("\n")
#sys.stdout.flush()
return json.dumps(output)
else:
output = [new_gloss, subject_entity, object_entity, str(subject_begin), str(subject_end), str(object_begin), str(object_end), relations]
#sys.stdout.write(json.dumps(output))
#sys.stdout.write("\n")
#sys.stdout.flush()
return json.dumps(output)
f_in = open(input_filename, 'r')
f_out = open(output_filename, 'w')
# for line in sys.stdin:
# line = line.strip()
# if line and line != '':
# process_line(line)
for line in f_in:
if line and line != '':
json_string = process_line(line)
f_out.write(json_string)
f_out.write('\n')
f_in.close()
f_out.close() |
import gzip
import re
import csv
import pickle
import pprint
if __name__ == "__main__":
pp = pprint.PrettyPrinter(indent=4)
filenames = [
"ydata-fp-td-clicks-v1_0.20090501",
"ydata-fp-td-clicks-v1_0.20090502",
"ydata-fp-td-clicks-v1_0.20090503",
"ydata-fp-td-clicks-v1_0.20090504",
"ydata-fp-td-clicks-v1_0.20090505",
"ydata-fp-td-clicks-v1_0.20090506",
"ydata-fp-td-clicks-v1_0.20090507",
"ydata-fp-td-clicks-v1_0.20090508",
"ydata-fp-td-clicks-v1_0.20090509",
"ydata-fp-td-clicks-v1_0.20090510",
]
for i in range(1, 11):
# Get id_articles
path = '/home/emanuele/GoogleDrive/Thompson Sampling/yahoo_dataset/'
with open(path + 'day' + str(i) + '/id_articles.txt', "rb") as fp:
id_articles = pickle.load(fp)
path = "/media/emanuele/860EFA500EFA392F/Dataset Yahoo!/R6/"
file_path = path + filenames[i-1] + ".gz"
input = gzip.GzipFile(file_path, 'rb')
data = input.read().decode("utf-8")
input.close()
print("Load file: " + filenames[i-1])
features = {}
for id in id_articles:
s = '\|' + str(id) + ' \d\:(.{8}) \d\:(.{8}) \d\:(.{8}) \d\:(.{8}) \d\:(.{8}) \d\:.{8}'
result = re.search(s, data)
if result != None:
features.update({id : result.groups()})
else:
features.update({id : result})
print("Cut file: " + filenames[i-1])
path = '/home/emanuele/GoogleDrive/Thompson Sampling/yahoo_dataset/'
save_file_path = path + 'day' + str(i) + '.csv'
with open(save_file_path, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['id_article', 'feature_1', 'feature_2', 'feature_3', 'feature_4', 'feature_5'])
for key, value in features.items():
l = [key]
if value != None:
l.extend([item for item in value])
else:
l.append(None)
writer.writerow(l)
|
from django.urls import path
from .views import register_user
from .views import login_user
from .views import edit_user
from .views import list_user
urlpatterns=[
path('register/',register_user,name="register_user"),
path('login/',login_user,name="login_user"),
path('edit/<int:pk>/',edit_user,name="edit_user"),
path('list/',list_user,name="list_user"),
] |
from bs4 import BeautifulSoup
import requests
session = requests.Session()
def gather_spark_scala_html_files():
spark_scala_html = session.get('{}{}'.format(spark_scala_base_url, "index.html"))
soup = BeautifulSoup(spark_scala_html.text, 'html.parser')
links = soup.find_all('a')
for link in links:
href = link.get('href')
class_name_elem = link.find("span", class_ = "tplLink")
if not class_name_elem:
continue
class_name = class_name_elem.text
if 'org/apache/spark' in href:
with open("download/{}".format(href.replace('/','.')), 'wb') as outfile:
outfile.write(session.get('{}{}'.format(spark_scala_base_url, href)).text.encode("utf-8"))
if __name__ == '__main__':
spark_scala_base_url = open('data.url').read().strip()
gather_spark_scala_html_files() |
from __future__ import print_function
import sys
import Pyro4
if sys.version_info < (3, 0):
input = raw_input
uri = input("enter async server object uri: ").strip()
proxy = Pyro4.Proxy(uri)
print("* normal call: (notice the delay)")
print("result=", proxy.divide(100, 5))
print("\n* async call:")
proxy._pyroAsync()
asyncresult = proxy.divide(100, 5) # returns immediately
print("result value available?", asyncresult.ready) # prints False because the server is still 'busy'
print("client can do other stuff here.")
print("getting result value...(will block until available)")
print("resultvalue=", asyncresult.value) # blocks until the result is available
print("\n* async call, with normal call inbetween:")
normalproxy = Pyro4.Proxy(uri)
asyncresult = proxy.divide(100, 5) # returns immediately
print("client does normal call: ", normalproxy.multiply(5, 20))
print("client does normal call: ", normalproxy.multiply(5, 30))
print("getting result value of async call...(will block until available)")
print("resultvalue=", asyncresult.value) # blocks until the result is available
print("\n* async call with exception:")
asyncresult = proxy.divide(100, 0) # will trigger a zero division error, 100//0
print("getting result value...")
try:
value = asyncresult.value
print("Weird, this shouldn't succeed!?... resultvalue=", value)
except ZeroDivisionError as x:
print("got exception (expected):", repr(x))
print("\n* async call with timeout:")
asyncresult = proxy.divide(100, 5)
print("checking if ready within 2 seconds...")
ready = asyncresult.wait(2) # wait for ready within 2 seconds but the server takes 3
print("status after waiting=", ready) # should print False
print("checking again if ready within 5 seconds...(should be ok now)")
ready = asyncresult.wait(timeout=5) # wait 5 seconds now (but server will be done within 1 more second)
print("status after waiting=", ready)
print("available=", asyncresult.ready)
print("resultvalue=", asyncresult.value)
print("\n* a few async calls at the same time:")
results = [
proxy.divide(100, 7),
proxy.divide(100, 6),
proxy.divide(100, 5),
proxy.divide(100, 4),
proxy.divide(100, 3),
]
print("getting values...")
for result in results:
print("result=", result.value)
print("\ndone.")
|
import cv2
import numpy
img = cv2.imread("4.jpeg")
retval,threshold=cv2.threshold(img,12,255,cv2.THRESH_BINARY)
grayimg=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
retval,threshold2=cv2.threshold(grayimg,12,255,cv2.THRESH_BINARY)
gaus=cv2.adaptiveThreshold(grayimg,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,115,1)
graycimg=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow("orginal",img)
cv2.imshow("threshold2",threshold2)
cv2.imshow("gaus",gaus)
cv2.imshow("threshold",threshold)
cv2.waitKey(0)
cv2.destroyAllWindows() |
# coding: utf-8
# In[10]:
#last digit of partial sum of Fibonacci number
a = [int(x) for x in input().split()]
def Fn3(a):
f0 = 0
f1 = 1
if a <= 1: return a
else:
rem = a % 60
if(rem == 0): return 0
for i in range(2, rem + 3):
f =(f0 + f1)% 60
f0 = f1
f1 = f
s = f1-1
return(s)
print ((Fn3(a[1])-Fn3(a[0]-1))%10)
|
from . import app, login_manager
from flask_restplus import Api, Resource
from flask import send_file, abort, request, Response, render_template
from werkzeug.datastructures import FileStorage
import logging
import os
import uuid
import subprocess
from OpenSSL.crypto import FILETYPE_PEM, Error as crypto_Error, load_certificate, dump_certificate
from .mycrypto import load_certificate_request
api = Api(app)
app.logger.setLevel(logging.INFO)
log = logging.getLogger()
file_arg = api.parser()
file_arg.add_argument('req', type=FileStorage, location='files', required=True)
user_name_arg = api.parser()
user_name_arg.add_argument('login', type=str, location='args', required=True)
remote_host_arg = api.parser()
remote_host_arg.add_argument('remote', type=str, location='args', required=True)
def get_from_index():
with open(os.path.join(app.config["EASYRSA_PKI"], "index.txt")) as index_fn:
for line in index_fn.readlines():
parts = line.split()
type, expires, serial, file_name = line.split()[:4]
subject_name = ' '.join(line.split()[4:])
yield {
'type': type,
'expires': expires,
'serial': serial,
'file_name': file_name,
'subject_name': subject_name,
}
def is_certificate_issued(subject_name):
for index_rec in get_from_index():
if subject_name == index_rec['subject_name']:
return True
@api.route('/ca', methods=['GET'])
class Ca(Resource):
@api.response(200, 'OK')
@api.response(400, 'Bad Request')
def get(self):
return send_file(os.path.join(app.config["EASYRSA_PKI"], "ca.crt"), attachment_filename='ca.crt')
@api.route('/server/sign', methods=['POST'])
class ServerSign(Resource):
@api.response(200, 'OK')
@api.response(400, 'Bad Request')
@api.response(409, 'Certificate already exist')
@api.response(500, 'Server error')
@api.expect(file_arg, validate=True)
def post(self):
args = file_arg.parse_args()
cert_uniq_prefix = str(uuid.uuid4())
f_obj = args.req
file_name = "server-%s-%s" % (cert_uniq_prefix, f_obj.filename)
file_base = os.path.splitext(file_name)[0]
f_obj.save(os.path.join(app.config["EASYRSA_PKI"], 'reqs', file_name))
with open(os.path.join(app.config["EASYRSA_PKI"], 'reqs', file_name)) as req_fn:
try:
req = load_certificate_request(FILETYPE_PEM, req_fn.read())
except crypto_Error as e:
return abort(400, "Certificate file '%s' could not be loaded: %s" % (file_name, e))
except Exception as e:
return abort(400, "Unknown error: '%s' %s" % (file_name, e))
subject = str(req.get_subject().subject_name)
if is_certificate_issued(subject):
return abort(409, "There is already a certificate for: %s" % subject)
command = [
"easyrsa",
"sign-req",
"server",
file_base,
]
process=subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdoutdata, stderrdata = process.communicate(input=b"yes\n")
rc = process.returncode
if rc != 0:
return abort(500, "RC: %s in command: %s" %(rc, ' '.join(command)))
return send_file(os.path.join(app.config["EASYRSA_PKI"], "issued", "%s.crt" % file_base),
attachment_filename="%s.crt" % file_base)
@api.route('/server/tls', methods=['POST'])
class ServerTls(Resource):
@api.response(201, 'TLS key file stored')
@api.response(409, 'TLS key file already exist')
@api.response(400, 'Bad Request')
@api.expect(remote_host_arg, file_arg, validate=True)
def post(self):
args = file_arg.parse_args()
remote_host = request.args.get('remote')
f_obj = args.req
file_name = "%s.key" % remote_host
if os.path.isfile(os.path.join(app.config["EASYRSA_PKI"], 'tls', file_name)):
return Response(status=409)
f_obj.save(os.path.join(app.config["EASYRSA_PKI"], 'tls', file_name))
return Response(status=201)
@api.route('/client/ovpn', methods=['GET'])
class GetClientFile(Resource):
@api.response(200, 'OK')
@api.response(400, 'Bad Request')
@api.response(500, 'Server error')
@api.expect(remote_host_arg, user_name_arg, validate=True)
def get(self):
def _read_from_file(file_name):
with open(file_name) as fn:
return str(fn.read())
def _validate_login(l):
if l == 'ca':
abort(400, "Invalid value for login")
return l
user_login = _validate_login(request.args.get('login'))
remote_host = request.args.get('remote')
tls_key_file_name = "%s.key" % remote_host
if not os.path.isfile(os.path.join(app.config["EASYRSA_PKI"], 'tls', tls_key_file_name)):
return abort(400, "TLS auth key for server %s not found" % remote_host)
if not os.path.isfile(os.path.join(app.config["EASYRSA_PKI"], "issued", "%s.crt" % user_login)):
command = [
"easyrsa",
"build-client-full",
user_login,
"nopass",
]
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(60)
rc = process.returncode
if rc != 0:
return abort(500, "RC: %s in command: %s" % (rc, ' '.join(command)))
clent_cert = load_certificate(FILETYPE_PEM, _read_from_file(os.path.join(app.config["EASYRSA_PKI"], "issued", "%s.crt" % user_login)))
r = Response(
render_template(
'ovpn.tpl',
remote_server=remote_host,
client_key=_read_from_file(os.path.join(app.config["EASYRSA_PKI"], "private", "%s.key" % user_login)),
client_cert=dump_certificate(FILETYPE_PEM, clent_cert).decode(),
ca=_read_from_file(os.path.join(app.config["EASYRSA_PKI"], "ca.crt")),
tls_auth=_read_from_file(os.path.join(app.config["EASYRSA_PKI"], 'tls', tls_key_file_name)),
),
mimetype='application/x-openvpn-profile',
)
r.headers["Content-Disposition"] = 'attachment; filename="client.ovpn"'
return r
|
from django.contrib import admin
from dungeon.models import *
admin.site.register(Dungeon, admin.ModelAdmin)
admin.site.register(Square, admin.ModelAdmin)
admin.site.register(Character, admin.ModelAdmin) |
from socket import *
s_scoket = socket(AF_INET,SOCK_STREAM)
address = ("127.0.0.1",4721)
s_scoket.bind(address)
s_scoket.listen(3)
while True:
print("等待连接中。。")
coon, addr = s_scoket.accept()
print("连接来自",addr)
while True:
data = coon.recv(1024)
print("服务端接收的数据是",data)
if not data:
break
message = input("服务端发送的消息是")
coon.send(bytes(message,'utf-8'))
|
from . import SentenceEvaluator, SimilarityFunction
from torch.utils.data import DataLoader
import torch
import logging
from tqdm import tqdm
from ..util import batch_to_device
import os
import csv
from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances
from scipy.stats import pearsonr, spearmanr
import numpy as np
import scipy.spatial
class TranslationEvaluator(SentenceEvaluator):
"""
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that en_i = fr_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accurarcy in both directions
"""
def __init__(self, dataloader: DataLoader, main_similarity: SimilarityFunction = None, name: str = '', show_progress_bar: bool = None):
"""
Constructs an evaluator based for the dataset
The labels need to indicate the similarity between the sentences.
:param dataloader:
the data for the evaluation
:param main_similarity:
the similarity metric that will be used for the returned score
"""
self.dataloader = dataloader
self.main_similarity = main_similarity
self.name = name
if name:
name = "_"+name
if show_progress_bar is None:
show_progress_bar = (logging.getLogger().getEffectiveLevel() == logging.INFO or logging.getLogger().getEffectiveLevel() == logging.DEBUG)
self.show_progress_bar = show_progress_bar
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.csv_file = "translation_evaluation"+name+"_results.csv"
self.csv_headers = ["epoch", "steps", "src2trg", "trg2src"]
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
model.eval()
embeddings1 = []
embeddings2 = []
labels = []
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logging.info("Evaluation the model on "+self.name+" dataset"+out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
iterator = self.dataloader
if self.show_progress_bar:
iterator = tqdm(iterator, desc="Convert Evaluating")
for step, batch in enumerate(iterator):
features, label_ids = batch_to_device(batch, self.device)
with torch.no_grad():
emb1, emb2 = [model(sent_features)['sentence_embedding'].to("cpu").numpy() for sent_features in features]
labels.extend(label_ids.to("cpu").numpy())
embeddings1.extend(emb1)
embeddings2.extend(emb2)
distances = -scipy.spatial.distance.cdist(embeddings1, embeddings2, "cosine")
correct_src2trg = 0
correct_trg2src = 0
for i in range(len(distances)):
max_idx = np.argmax(distances[i])
if i == max_idx:
correct_src2trg += 1
distances = distances.T
for i in range(len(distances)):
max_idx = np.argmax(distances[i])
if i == max_idx:
correct_trg2src += 1
acc_src2trg = correct_src2trg / len(distances)
acc_trg2src = correct_trg2src / len(distances)
logging.info("Accuracy src2trg: {:.2f}".format(acc_src2trg*100))
logging.info("Accuracy trg2src: {:.2f}".format(acc_trg2src*100))
if output_path is not None:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else 'w', encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc_src2trg, acc_trg2src])
return (acc_src2trg+acc_trg2src)/2
|
# Generated by Django 3.0.7 on 2021-01-10 09:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobads', '0021_auto_20210110_0848'),
]
operations = [
migrations.AlterField(
model_name='jobad',
name='max_salary',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='jobad',
name='min_salary',
field=models.CharField(max_length=100, null=True),
),
]
|
import threading
import numpy as np
from lidar import Lidar
from gps import GPS
from cameras import Cameras
# py vesc import tty.usbserial
# load lidar
#lidar = Lidar('/dev/tty.SLAB_USBtoUART')
#load GPS
#gps = GPS('/dev/tty.usbserial-1A1330', 4800, 5)
cameras = Cameras(1, 2, 0)
cameras.start()
#t1 = threading.Thread(target=lidar.start)
#t2 = threading.Thread(target=gps.start)
# starting thread 1
#t1.start()
# starting thread 2
#t2.start()
# wait until thread 1 is completely executed
#t1.join()
# wait until thread 2 is completely executed
#t2.join() |
"""
Project:DeepRating
Author: Raphael Abbou
Version: python3
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
CUT_OFF_DATE = "201803"
orig_col = ['fico','dt_first_pi','flag_fthb','dt_matr','cd_msa',"mi_pct",'cnt_units','occpy_sts',\
'cltv','dti','orig_upb','ltv','int_rt','channel','ppmt_pnlty','prod_type','st', \
'prop_type','zipcode','loan_purpose', 'orig_loan_term','cnt_borr','seller_name'\
,'servicer_name', 'flag_sc']
orig_data = pd.read_csv('sample_orig_2016.txt', header = None, sep = '|', index_col = 19)
orig_data.columns = orig_col
#svcg_cols = ['id_loan','svcg_cycle','current_upb','delq_sts','loan_age','mths_remng', 'repch_flag',\
# 'flag_mod', 'cd_zero_bal', 'dt_zero_bal','current_int_rt','non_int_brng_upb',\
# 'dt_lst_pi','mi_recoveries', 'net_sale_proceeds','non_mi_recoveries','expenses','legal_costs',\
# 'maint_pres_costs','taxes_ins_costs','misc_costs','actual_loss', 'modcost', 'stepmod_ind', 'dpm_ind']
def get_training_output(mth_data):
''' Outputs management
Arguments:
mth_data -- raw data frame of monthly data for all loans ID
Returns:
Y_train, training outputs of shape (C = 7: number of classes, m: number of loans ID)
Notes:
9th column is the zero balance type
10th column is the zero balance effective date
'''
####Getting Training Ouputs
#Selecting the data before 2017
train_set = mth_data[np.around(mth_data[1]/100, decimals=1) <2016]
#Getting the list of defaulting loans IDs, by looking if the defaulting date is empty or not
dflt_loans = train_set[train_set[9].notnull()][0]
#Getting the associated zero balance code, when the defaulting date is not empty
dflt_code = np.around(train_set[train_set[9].notnull()][8])
#Non defaulting loans IDs
non_dflt_loans = [loan for loan in train_set[0].drop_duplicates().values if loan not in dflt_loans.values]
non_dflt_loans = pd.DataFrame(data = [], index = non_dflt_loans)
Y_train = pd.DataFrame(data = dflt_code.values, index = dflt_loans)
#We add the code 0 for a non-defaulting loan over the time priod considered
Y_train = Y_train.append(non_dflt_loans).fillna(0)
Y_train.columns = ["outputs"]
return Y_train
def get_test_output(mth_data):
''' Outputs management
Arguments:
mth_data -- raw data frame of monthly data for all loans ID
Returns:
Y_test, training outputs of shape (C = 7: number of classes, m: number of loans ID)
Notes:
9th column is the zero balance type
10th column is the zero balance effective date
'''
####Getting Test Ouputs
#Selecting the data after 2017
test_set = mth_data[np.around(mth_data[1]/100, decimals=1) >= 2016]
#Getting the list of defaulting loans IDs
dflt_loans = test_set[test_set[9].notnull()][0]
#Getting the associated zero balance code
dflt_code = np.around(test_set[test_set[9].notnull()][8])
non_dflt_loans = [loan for loan in test_set[0].drop_duplicates().values if loan not in dflt_loans.values]
non_dflt_loans = pd.DataFrame(data = [], index = non_dflt_loans)
Y_test = pd.DataFrame(data = dflt_code.values, index = dflt_loans)
#We add the code 0 for a non-defaulting loan over the time priod considered
Y_test = Y_test.append(non_dflt_loans).fillna(0)
Y_test.columns = ["outputs"]
return Y_test
def formatting_ouput(Y):
pass
def aggregate(year):
data = pd.read_csv("historical_data1_time_Q" + str(1) + str(year) + ".txt", header = None, sep = '|')
for quarter in range(2,5):
try:
data.append(pd.read_csv("historical_data1_time_Q" + str(quarter) + str(year) + ".txt", header = None, sep = '|'))
except:
pass
return data
def switch_to_binary(Y_train):
return pd.DataFrame(Y_train['outputs'].apply(lambda x: 0 if x == 0 else 1), columns = ['outputs'])
def get_training_output_binary(mth_data):
Y = get_training_output(mth_data)
return switch_to_binary(Y)
def get_test_output_binary(mth_data):
Y = get_test_output(mth_data)
return switch_to_binary(Y)
if __name__ == "__main__":
# year = 2016
# #mth_data = pd.read_csv('sample_svcg_2016.txt', header = None, sep = '|')
# mth_data = aggregate(year)
# Y_train = get_training_output(mth_data)
# #print(Y_train.head())
year = 2017
mth_data = aggregate(year)
Y_train = get_test_output(mth_data)
hist = Y_train.hist()
for x in hist[0]:
x.set_xlabel("Default Types")
x.set_ylabel("Number of Loans")
x.set_title("Class repartition for " + str(year))
Y_train = switch_to_binary(Y_train)
dft = Y_train[Y_train['outputs'] == 1].count()
ndft = Y_train[Y_train['outputs'] == 0].count()
|
import cStringIO
import csv
import re
from google.appengine.api import users
from google.appengine.ext import ndb
from models import Student, GradeEntry
import utils
import webapp2
class BulkStudentImportAction(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if len(self.request.get("remove_all_students")) > 0:
utils.remove_all_students(user)
imported_file = self.request.params["bulk-import-file"].value
process_roster(imported_file, user)
self.redirect(self.request.referer)
def process_roster(imported_file, user):
try:
csv_file = cStringIO.StringIO(imported_file)
# Read the first kb to ensure the file is a valid CSV file.
csv.Sniffer().sniff(csv_file.read(1024), ",")
csv_file.seek(0)
reader = csv.DictReader(csv_file, dialect="excel")
except:
raise Exception("Invalid CSV file")
reader.fieldnames = [re.compile('[\W_]+', flags=re.UNICODE).sub('', field).lower()
for field in reader.fieldnames]
for row in reader:
rose_username = row.get("username", None)
new_student = Student(parent=utils.get_parent_key(user),
id=rose_username,
first_name=row.get("first", None),
last_name=row.get("last", None),
team=row.get("team", None),
rose_username=rose_username)
new_student.put()
class ExportCsvAction(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
export_student_name = len(self.request.get("student_name")) > 0
export_rose_username = len(self.request.get("rose_username")) > 0
export_team = len(self.request.get("team")) > 0
urlsafe_assignment_keys = self.request.get_all("assignment_keys[]")
csv_data = get_csv_export_lists(user, export_student_name, export_rose_username,
export_team, urlsafe_assignment_keys)
self.response.headers['Content-Type'] = 'application/csv'
writer = csv.writer(self.response.out)
for csv_row in csv_data:
writer.writerow(csv_row)
def get_csv_export_lists(user, export_student_name, export_rose_username,
export_team, urlsafe_assignment_keys):
table_data = []
student_row_index_map = {} # Map of student_key to row in the table_data
assignment_col_index_map = {} # Map of assignment_key to column in the table_data
header_row = []
table_data.append(header_row)
num_columns = 0
# Student Header
if export_student_name:
header_row.append("First")
header_row.append("Last")
num_columns += 2
if export_rose_username:
header_row.append("Username")
num_columns += 1
if export_team:
header_row.append("Team")
num_columns += 1
# Assignment Prep
assignment_keys = []
for urlsafe_assignment_key in urlsafe_assignment_keys:
assignment_keys.append(ndb.Key(urlsafe=urlsafe_assignment_key))
assignments = ndb.get_multi(assignment_keys)
assignments.sort(key=lambda assignment: assignment.name)
num_assignments_found = 0
for assignment in assignments:
if assignment:
header_row.append(assignment.name)
assignment_col_index_map[assignment.key] = num_columns
num_columns += 1
num_assignments_found += 1
# Student Data + assignment placeholders
num_rows = 1
students = Student.query(ancestor=utils.get_parent_key(user)).order(Student.rose_username)
for student in students:
current_row = []
if export_student_name:
current_row.append(student.first_name)
current_row.append(student.last_name)
if export_rose_username:
current_row.append(student.rose_username)
if export_team:
current_row.append(student.team)
for i in range(num_assignments_found):
current_row.append("-")
table_data.append(current_row)
student_row_index_map[student.key] = num_rows
num_rows += 1
# Add the grades
grade_query = GradeEntry.query(ancestor=utils.get_parent_key(user))
for grade in grade_query:
if grade.student_key in student_row_index_map and grade.assignment_key in assignment_col_index_map:
row = student_row_index_map[grade.student_key]
col = assignment_col_index_map[grade.assignment_key]
table_data[row][col] = grade.score
# Removing rows with no grades (allows for data merging)
for row_index in reversed(range(1, num_rows)):
row = table_data[row_index]
blank_grades = row.count("-")
if blank_grades == num_assignments_found:
table_data.remove(row)
return table_data
|
from pageObjects.LoginPage import LoginPage
from utilities.BaseTest import BaseTest
from utilities.TestData import TestData
class Test_001_Login(BaseTest):
def test_homepage_title(self):
self.logger = self.get_logger() # get_logger is designed in base test class
self.logger.info('******************Test_001_Login****************')
self.logger.info('*************verifying home page title***********')
actual_title = self.driver.title
if actual_title == 'Your store. Login':
assert True
self.logger.info('*************home page title test is passed')
else:
self.driver.save_screenshot('../Reports/test_homePageTitle.png')
self.logger.error('***************home page title test is failed*************')
assert False
def test_login(self):
self.logger = self.get_logger()
self.logger.info('************verifying login test')
# create an object of LoginPage Class and access its methods
self.login_page = LoginPage(self.driver)
self.login_page.do_login(TestData.USERNAME, TestData.PASSWORD)
actual_title = self.driver.title
if actual_title == 'Dashboard / nopCommerce administration':
assert True
self.logger.info('***********login test is passed************')
else:
self.driver.save_screenshot('..\\Reports\\' + 'test_loginPageTitle.png')
self.logger.error('***************login test is failed**************')
assert False
|
from typing import List, Union
import sys
from collections import deque
class BrainFuckMachine:
array: List
_pointer: int
code: str
stack: deque
ip: int
nf: bool
stdin: str
stdin_p: int
stdout: str
do_print: bool
def __init__(self, do_print: bool = True):
self.array = [0]
self._pointer = 0
self.code = ''
self.stack = deque()
self.ip = 0
self.nf = False
self.stdin = ''
self.stdin_p = 0
self._stdout = ''
self.do_print = do_print
@property
def pointer(self):
return self._pointer
@pointer.setter
def pointer(self, p):
if p == len(self.array):
self.array.append(0)
self._pointer = p
@property
def stdout(self):
return self._stdout
@stdout.setter
def stdout(self, n):
if self.do_print:
print(n[len(self.stdout):], end='')
self._stdout = n
def p_inc(self):
self.pointer += 1
def p_dec(self):
self.pointer = self.pointer - 1
def inc(self):
self.array[self.pointer] += 1
def dec(self):
self.array[self.pointer] -= 1
def put(self):
c = chr(self.array[self.pointer])
self.stdout += c
def read(self):
if len(self.stdin) == self.stdin_p:
a = input()
if not a:
a = '\n'
self.stdin += a
self.array[self.pointer] = ord(self.stdin[self.stdin_p])
self.stdin_p += 1
def jz_e(self):
self.stack.append(self.ip)
if not self.array[self.pointer]:
self.nf = True
def jz_s(self):
ip = self.stack.pop()
self.nf = False
if self.array[self.pointer]:
self.ip = ip - 1
def step(self) -> Union[bool, None]:
if self.ip >= len(self.code):
return True
c = self.code[self.ip]
try:
{
'>': self.p_inc,
'<': self.p_dec,
'+': self.inc,
'-': self.dec,
'.': self.put,
',': self.read,
'[': self.jz_e,
']': self.jz_s,
}[c]()
except KeyError:
pass
self.ip += 1
try:
while (self.nf and self.code[self.ip] != ']') or (self.code[self.ip] not in '><+-.,[]'):
self.ip += 1
except IndexError:
return True
def run(self, code):
self.code = code
self.ip = 0
while not self.step():
pass
if __name__ == '__main__':
if len(sys.argv) == 1:
sys.stderr.write('usage: python machine.py [filename] [-d --debug]')
exit(1)
filename = sys.argv[1]
machine = BrainFuckMachine()
machine.run(open(filename).read())
|
class CraftyAPIRoutes(object):
HOST_STATS = '/api/v1/host_stats'
SERVER_STATS = '/api/v1/server_stats'
ADD_USER = '/api/v1/crafty/add_user'
DEL_USER = '/api/v1/crafty/del_user'
GET_LOGS = '/api/v1/crafty/get_logs'
SEARCH_LOGS = '/api/v1/crafty/search_logs'
class MCAPIRoutes(object):
SEND_CMD = '/api/v1/server/send_command'
GET_LOGS = '/api/v1/server/get_logs'
SEARCH_LOGS = '/api/v1/server/search_logs'
FORCE_BACKUP = '/api/v1/server/force_backup'
START = '/api/v1/server/start'
STOP = '/api/v1/server/stop'
RESTART = '/api/v1/server/restart'
LIST = '/api/v1/list_servers'
|
import os
from selenium import webdriver
import telebot
from telebot import types
from flask import Flask, request
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from config import *
import pickle
import schedule
import time
import random
import requests
import threading
# @siskiexpert
# -590852422 test group 2
# -506817497 test group 3
# -1001464385948 group_experts
bot = telebot.TeleBot(TOKEN)
server = Flask(__name__)
chrome_options = webdriver.ChromeOptions()
chrome_options.binary_location = os.environ.get('GOOGLE_CHROME_BIN')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-sh-usage')
driver = webdriver.Chrome(executable_path=os.environ.get('CHROMEDRIVER_PATH'), chrome_options=chrome_options)
driver.implicitly_wait(4)
group2 = -590852422
group3 = -506817497
group_experts = -1001464385948
launch = True
link_girls = []
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
# bot.send_message(message.from_user.id, "Bot telegram_photo_phrase works")
len_girls = len(link_girls)
if len_girls == 0:
get_girl_links()
bot.send_message(message.from_user.id, text='Хочешь сиськи?', reply_markup=x_keyboard())
# bot.send_message(message.from_user.id, message.from_user.id)
@bot.message_handler(commands=['send'])
def send_girl(message):
bot.send_message(message.from_user.id, "Send Bot works")
start_girl(message)
bot_schedule()
@bot.message_handler(commands=['send2'])
def send_girl(message):
bot.send_message(message.from_user.id, "Send2 Bot works")
len_girls = len(link_girls)
if len_girls == 0:
get_girl_links()
girl_to_group_expert()
@bot.message_handler(commands=['send3'])
def send_girl(message):
bot.send_message(message.from_user.id, "Send3 Bot works")
len_girls = len(link_girls)
if len_girls == 0:
get_girl_links()
girl_once_to_group2()
@bot.message_handler(commands=['stop'])
def stop_girl(message):
global launch
launch = False
bot.send_message(message.from_user.id, "STOP is activated")
@bot.message_handler(content_types=["text"])
def repeat_all_messages(message):
for_message = ['пока я туповат и никуя не понимаю', 'а вот и никуя', 'я просто эксперт по сиськам', 'да ладно?', 'отстань от меня человек', 'ой, всё', 'я в танке и ниипет', 'я устал, сегодня было много сисек']
bot_message_random = random.randrange(0, len(for_message))
bot_message = for_message[bot_message_random].capitalize()
bot.send_message(message.chat.id, bot_message)
def x_keyboard():
keyboard = types.InlineKeyboardMarkup()
btn1 = types.InlineKeyboardButton(text='Хочу', callback_data='Want')
keyboard.add(btn1)
btn2 = types.InlineKeyboardButton(text='Оочень хочу', callback_data='Very want')
keyboard.add(btn2)
btn3 = types.InlineKeyboardButton(text='Не надо, сегодня не стоит', callback_data='Cancel')
keyboard.add(btn3)
return keyboard
@bot.callback_query_handler(func=lambda call: True)
def callback_worker(call):
if call.data == "Want":
girl_once(call.message)
elif call.data == "Very want":
girl_once(call.message)
elif call.data == "Cancel":
msg = 'Пока пока, заходи еще ;)'
bot.send_message(call.message.chat.id, msg)
# @bot.message_handler(func=lambda message: True)
# def send_girl_once(message):
#
# bot.send_message(message.from_user.id, "Please wait, I am looking for sisechki")
# girl_once(message)
def ping():
bot.send_message(group2, 'ping')
def start_girl(message):
global launch
launch = True
bot.send_message(message.from_user.id, "Start is activated")
len_girls = len(link_girls)
if len_girls == 0:
get_girl_links()
def get_girl_links():
page_random = random.randrange(1, 10)
URL = 'https://xxx.pics/category/cute/' + str(page_random) + '/'
page = requests.get(URL)
if page.status_code != 200:
while page.status_code != 200:
page_random = random.randrange(1, 10)
URL = 'https://xxx.pics/category/cute/' + str(page_random) + '/'
page = requests.get(URL)
driver.get(URL)
wait = WebDriverWait(driver, 10)
all_pict_link = wait.until(
expected_conditions.visibility_of_all_elements_located((By.CLASS_NAME, 'pcsrt-th-lightgallery-item')))
# all_pict = len(path_to_pict)
link_girls.clear()
for item in all_pict_link:
pict = item.get_attribute('data-src')
page = requests.get(pict)
pict_width = item.size["width"]
if page.status_code == 200 and pict_width > 50:
link_girls.append(pict)
else:
pass
# len_ = len(link_girls)
# bot.send_message(group2, f'===================== {len_}')
def phrase():
guys = ['парни', 'ребятушки', 'братушки', 'ребятки', 'мужики', 'перцы', 'эксперты', 'экспертное сообщество',
'мои герои', 'сладкие мои', 'chicos', 'sexo masculino']
greeting = ['здарова', 'хая', 'салам', 'салют', 'здравствуйте', 'шалом', 'бонжур', 'хэллоу', 'хей',
'буэнос диас',
'хола', 'доброго дня', 'добрый день', 'ассалам алейкум', 'hola', 'prosperadlo', 'hola mis queridos']
phrases = ['как вам мои чики?', 'попробуйте меня', 'какая я вкусненькая', 'посмотрите на мои вишенки',
'как вам мои изюминки?', 'я вся горю', 'початимся?', 'пообщаемся?',
'ох, не смотри на меня так', 'мои булочки готовы для вас', 'рада тут побывать',
'всегда готова, жду вас тут', 'порадуйте меня чем нибудь', 'я секси, да?', 'я конфетка, да?',
'сейчас позову подружек не хуже меня', 'сегодня здесь будет жарко', 'я вся горю',
'классный денек сегодня, да?', 'погодка не фонтан, согрейте меня', 'всем хорошего дня!',
'всем классного дня!', 'заходите поглядеть на меня еще', 'хватит палитьтся на мои титьки',
'как я вам?', 'оцените меня экспертно', 'не сломайте об меня глаза', 'сиськи заказывали?',
'как вам мои шары?']
emoji = ['$)', ':)', ';)', 'oO', ':**', ' ', '..', 'уух', 'мм;)']
guys_random = random.randrange(0, len(guys))
greeting_random = random.randrange(0, len(greeting))
phrases_random = random.randrange(0, len(phrases))
emoji_random = random.randrange(0, len(emoji))
willing_phrase = f'{guys[guys_random].capitalize()} {greeting[greeting_random]}! {phrases[phrases_random].capitalize()} {emoji[emoji_random]}'
return willing_phrase
def phrase_once():
guys = ['парень', 'кабан', 'братух', 'перец', 'мужик', 'эксперт', 'мой герой', 'сладкий мой', 'chico', 'парниш', 'крепыш']
greeting = ['здарова', 'хая', 'салам', 'салют', 'здаров', 'шалом', 'бонжур', 'хэллоу', 'хей',
'буэнос диас',
'хола', 'доброго дня', 'добрый день', 'ассалам алейкум', 'hola', 'hola querido', 'эй']
phrases = ['как тебе мои чики?', 'попробуй меня', 'какая я вкусненькая', 'посмотри на мои вишенки',
'как тебе мои изюминки?', 'я вся горю', 'початимся?', 'пообщаемся?',
'ох, не смотри на меня так', 'мои булочки готовы для тебя', 'рада тут побывать',
'всегда готова, жду тебя тут', 'порадуй меня чем нибудь', 'я секси, да?', 'я конфетка, да?',
'сейчас позову подружек не хуже меня', 'сегодня здесь будет жарко', 'я вся горю',
'классный денек сегодня, да?', 'погодка не фонтан, согрей меня', 'хорошего дня!',
'классного дня!', 'заходи поглядеть на меня еще', 'хватит палитьтся на мои титьки',
'как я тебе?', 'оцени меня экспертно', 'не сломай об меня глаза', 'сиськи заказывал?',
'как тебе мои шары?']
emoji = ['$)', ':)', ';)', 'oO', ':**', ' ', '..', 'уух', 'мм;)']
guys_random = random.randrange(0, len(guys))
greeting_random = random.randrange(0, len(greeting))
phrases_random = random.randrange(0, len(phrases))
emoji_random = random.randrange(0, len(emoji))
willing_phrase = f'{guys[guys_random].capitalize()} {greeting[greeting_random]}! {phrases[phrases_random].capitalize()} {emoji[emoji_random]}'
return willing_phrase
def girl():
#bot.send_message(group2, "girl starts")
len_ = len(link_girls)
while len_ == 0:
time.sleep(30)
# bot.send_message(group2, f'It`s the length of array girls in Girl() {len_}')
pict = link_girls[random.randrange(0, len_)]
try:
bot.send_photo(group2, photo=pict)
except Exception as e:
#bot.send_message(group2, e)
girl()
phrase_to = phrase()
bot.send_message(group2, phrase_to)
def girl_double():
# bot.send_message(group2, "girl_double starts")
len_ = len(link_girls)
while len_ == 0:
time.sleep(30)
# bot.send_message(group2, f'It`s the length of array girls in Girl_double() {len_}')
pict_to_both = link_girls[random.randrange(0, len_)]
try:
bot.send_photo(group2, photo=pict_to_both)
bot.send_photo(group_experts, photo=pict_to_both)
except Exception as e:
# bot.send_message(group2, e)
# bot.send_message(group3, e)
girl_double()
phrase_to = phrase()
bot.send_message(group2, phrase_to)
bot.send_message(group_experts, phrase_to)
def girl_to_group_expert():
#bot.send_message(group2, "girl2 starts")
len_ = len(link_girls)
while len_ == 0:
time.sleep(30)
# bot.send_message(group2, f'It`s the length of array girls in Girl() {len_}')
pict = link_girls[random.randrange(0, len_)]
try:
bot.send_photo(group_experts, photo=pict)
except Exception as e:
#bot.send_message(group2, e)
girl_to_group_expert()
phrase_to = phrase()
bot.send_message(group_experts, phrase_to)
def girl_once(message):
len_ = len(link_girls)
while len_ == 0:
time.sleep(30)
pict = link_girls[random.randrange(0, len_)]
phrase_to = phrase_once()
# bot.send_message(message.chat.id, 'here')
# bot.send_message(message.chat.id, message.chat.id)
bot.send_photo(message.chat.id, photo=pict)
bot.send_message(message.chat.id, phrase_to)
def girl_once_to_group2():
bot.send_message(group2, 'test girl_once_to_group2')
len_ = len(link_girls)
while len_ == 0:
time.sleep(30)
pict = link_girls[random.randrange(0, len_)]
phrase_to = phrase_once()
# bot.send_message(message.chat.id, 'here')
# bot.send_message(message.chat.id, message.chat.id)
bot.send_photo(group2, photo=pict)
bot.send_message(group2, phrase_to)
# def send_ping_phrase():
# len_ = len(link_girls)
# bot.send_message(group2, f'ping + {len_}')
def bot_schedule():
schedule.every(5).minutes.do(run_threaded, ping)
schedule.every(60).minutes.do(run_threaded, girl)
# schedule.every(70).seconds.do(run_threaded, additional_check)
schedule.every(180).minutes.do(run_threaded, girl_double)
schedule.every(120).minutes.do(run_threaded, get_girl_links)
# schedule.every(6).hours.do(girl)
while launch:
schedule.run_pending()
time.sleep(1)
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
@server.route('/' + TOKEN, methods=['POST'])
def getMessage():
json_string = request.get_data().decode('utf-8')
update = telebot.types.Update.de_json(json_string)
bot.process_new_updates([update])
return "it works", 200
@server.route("/")
def webhook():
bot.remove_webhook()
bot.set_webhook(url=APP_NAME + TOKEN)
return "it worksssssssss", 200
if __name__ == "__main__":
server.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000)))
|
#!/usr/bin/env python3
def print_message(message):
return message
def sum(parcel1, parcel2): # pragma: no cover
sum = parcel1 + parcel2
return sum
def multiply(factor1, factor2): # pragma: no cover
return factor1 * factor2
def main():
print("test")
if __name__ == "__main__":
main()
|
def bfs(graph,startnode,visited):
q = [startnode]
while q:
v = q.pop(0)
if not v in visited:
visited = visited+[v]
q = q+graph[v]
return visited
graph = {}
num_nodes = int(input("Enter the number of nodes : "))
nodes = [x for x in input("Enter the nodes : ").split()]
startnode = input("Enter the start node : ")
for i in range(num_nodes):
num_adj_nodes = int(input("Enter the number of nodes adjacent to "+nodes[i]+" : "))
adj_nodes = [x for x in input("Enter the adjacent nodes : ").split()]
graph[nodes[i]] = adj_nodes
print(bfs(graph,startnode,[]))
|
# read in the list
# create score matrix
# use a score matrix
import numpy as np
from common import loaders
def read_moves():
moves = loaders.load_string()
return moves
def calculate_score(moves):
# (1 for Rock A X, 2 for Paper B Y, and 3 for Scissors C Z)
# plus
# (0 if you lost, 3 if the round was a draw, and 6 if you won).
# A, B, C
# X
# Y
# Z
score_matrix = np.asarray([[4, 8, 3],
[1, 5, 9],
[7, 2, 6]])
score = 0
for move in moves:
their_move, my_move = move.split(" ")
score += score_matrix[ord(their_move) - 65][ord(my_move) - 88]
return score
def calculate_score_part2(moves):
# (1 for Rock A, 2 for Paper B, and 3 for Scissors C)
# plus
# (0 if you lost X, 3 if the round was a draw Y, and 6 if you won Z).
# A, B, C
# X
# Y
# Z
score_matrix = np.asarray([[3, 1, 2], [4, 5, 6], [8, 9, 7]])
score = 0
for move in moves:
their_move, result = move.split(" ")
score += score_matrix[ord(result) - 88][ord(their_move) - 65]
return score
if __name__ == "__main__":
moves = read_moves()
score = calculate_score(moves)
score2 = calculate_score_part2(moves)
print(f"score is {score}")
print(f"score 2 is {score2}")
|
import boto3
MTURK_SANDBOX = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
MTURK_PROD = 'https://mturk-requester.us-east-1.amazonaws.com'
ak_ai2 = '<access-key>'
sak_ai2 = '<secret-access-key>'
mturk = boto3.client('mturk',
aws_access_key_id = ak_ai2,
aws_secret_access_key = sak_ai2,
region_name='us-east-1',
endpoint_url = MTURK_PROD
)
print "I have " + mturk.get_account_balance()['AvailableBalance'] + " in my Sandbox account"
for qid in range(0,30):
question = open(name='questions_sample.xml',mode='r').read()
new_hit = mturk.create_hit(
Title = 'Sports question answers',
Description = 'Please look at the hit and write a few question and answer based on the passages. It generally takes around 30-45 mins',
Keywords = 'question answering',
Reward = '5.0',
MaxAssignments = 5,
LifetimeInSeconds = 172800,
AssignmentDurationInSeconds = 172800,
AutoApprovalDelayInSeconds = 259200,
# AutoApprovalDelayInSeconds = 86400,
Question = question,
QualificationRequirements=[
# Master Qualification Sandbox 2ARFPLSP75KLA8M8DH1HTEQVJT3SY6
# Master Qualification for Prod 2F1QJWKUDD8XADTFD2Q0G6UTO95ALH
# {
# 'QualificationTypeId': '2F1QJWKUDD8XADTFD2Q0G6UTO95ALH',
# 'Comparator': 'Exists',
# },
#Qualification for US
{
'QualificationTypeId': '00000000000000000071',
'Comparator': 'EqualTo',
'LocaleValues':[{
'Country':'US'
}]
}
## Good tuker pool for NFL
# {
# 'QualificationTypeId':"3M3HXOD6K9394JG7PUA1AFZAEAR7IR",
# 'Comparator': 'EqualTo',
# 'IntegerValues':[100]
# }
## Good tuker pool for History GT
# {
# 'QualificationTypeId':"3LRUXMYH0RF97HQYFONT1VJZK4J29O",
# 'Comparator': 'EqualTo',
# 'IntegerValues':[100]
# }
],
)
# print("{0}\t{1}\t{2}".format("History_"+str(qid),"https://worker.mturk.com/mturk/preview?groupId=" + new_hit['HIT']['HITGroupId'], new_hit['HIT']['HITId']))
print("{0}\t{1}\t{2}".format("NFL_"+str(qid),"https://worker.mturk.com/mturk/preview?groupId=" + new_hit['HIT']['HITGroupId'], new_hit['HIT']['HITId']))
print "I have " + mturk.get_account_balance()['AvailableBalance'] + " in my account"
|
import numpy as np, xarray as xr, rpxdock as rp, rpxdock.homog as hm
from rpxdock.search import hier_search, grid_search
from rpxdock.filter import filters
def make_cyclic_hier_sampler(monomer, hscore, **kw):
'''
:param monomer:
:param hscore:
:return: 6 DOF - 2: Sampling all 3D space + moving in and out from the origin
getting resolutions from hscore
OriCart1Hier_f4: 3D orientations + 1D cartesion direction, Hierarchical sampling grid (4x4), where f4 is float point
[0,0]: cartesion lb
[ncart * cart_resl]: cartesian ub (n cartesian cells * cartesian cell width)
[ncart]: n top level cells for sampling
ori_resl: orientation resolution for sampling
returns "arrays of pos" to check for a given search resolution where pos are represented by matrices
'''
cart_resl, ori_resl = hscore.base.attr.xhresl
ncart = int(np.ceil(2 * monomer.radius_max() / cart_resl))
return rp.sampling.OriCart1Hier_f4([0.0], [ncart * cart_resl], [ncart], ori_resl)
def make_cyclic_grid_sampler(monomer, cart_resl, ori_resl, **kw):
ncart = int(np.ceil(2 * monomer.radius_max() / cart_resl))
hiersampler = rp.sampling.OriCart1Hier_f4([0.0], [ncart * cart_resl], [ncart], ori_resl)
isvalid, xforms = hiersampler.get_xforms(0, np.arange(hiersampler.size(0)))
return xforms[isvalid]
_default_samplers = {hier_search: make_cyclic_hier_sampler, grid_search: make_cyclic_grid_sampler}
def make_cyclic(monomer, sym, hscore, search=None, sampler=None, **kw):
'''
monomer and sym are the input single unit and symmetry
hscore (hierarchical score) defines the score functions
Contains scores for motifs at coarse --> fine levels of search resolution
sampler enumerates positions
search is usually hier_search but grid_search is also available
'''
kw = rp.Bunch(kw)
t = rp.Timer().start()
sym = "C%i" % i if isinstance(sym, int) else sym
kw.nresl = hscore.actual_nresl if kw.nresl is None else kw.nresl
kw.output_prefix = kw.output_prefix if kw.output_prefix else sym
if search is None:
if kw.docking_method not in 'hier grid'.split():
raise ValueError(f'--docking_method must be either "hier" or "grid"')
if kw.docking_method == 'hier':
search = hier_search
elif kw.docking_method == 'grid':
search = grid_search
if sampler is None: sampler = _default_samplers[search](monomer, hscore=hscore, **kw)
evaluator = CyclicEvaluator(monomer, sym, hscore, **kw)
xforms, scores, extra, stats = search(sampler, evaluator, **kw)
ibest = rp.filter_redundancy(xforms, monomer, scores, **kw)
tdump = _debug_dump_cyclic(xforms, monomer, sym, scores, ibest, evaluator, **kw)
if kw.verbose:
print(f"rate: {int(stats.ntot / t.total):,}/s ttot {t.total:7.3f} tdump {tdump:7.3f}")
print("stage time:", " ".join([f"{t:8.2f}s" for t, n in stats.neval]))
print("stage rate: ", " ".join([f"{int(n/t):7,}/s" for t, n in stats.neval]))
if kw.filter_config:
# Apply filters
sbest, filter_extra = filters.filter(xforms[ibest], monomer, **kw)
ibest = ibest[sbest]
xforms = xforms[ibest]
'''
dump pickle: (multidimensional pandas df)
body_: list of bodies/pos used in docking
attrs: xarray of all global config args, timing stats, total time, time to dump, and sym
scores: weighted combined score by modelid
xforms: xforms pos by modelid
rpx: rpxscore
ncontact: ncontact score
reslb/ub: lowerbound/upperbound of trimming
'''
wrpx = kw.wts.sub(rpx=1, ncontact=0)
wnct = kw.wts.sub(rpx=0, ncontact=1)
rpx, extra = evaluator(xforms, kw.nresl - 1, wrpx)
ncontact, _ = evaluator(xforms, kw.nresl - 1, wnct)
data = dict(
attrs=dict(arg=kw, stats=stats, ttotal=t.total, tdump=tdump, sym=sym),
scores=(["model"], scores[ibest].astype("f4")),
xforms=(["model", "hrow", "hcol"], xforms),
rpx=(["model"], rpx.astype("f4")),
ncontact=(["model"], ncontact.astype("f4")),
)
for k, v in extra.items():
if not isinstance(v, (list, tuple)) or len(v) > 3:
v = ['model'], v
data[k] = v
if kw.filter_config:
#add the filter data to data
for k, v in filter_extra.items():
if not isinstance(v, (list, tuple)) or len(v) > 3:
v = ['model'], v
data[k] = v
return rp.Result(
body_=None if kw.dont_store_body_in_results else [monomer],
**data,
)
class CyclicEvaluator:
'''
Takes a monomer position, generates a sym neighbor, and checks for "flat"-ish surface between the sym neighbors
For trimming: does trimming thing and finds intersection until overlap isn't too overlappy/clashy anymore
xforms: body.pos
xsym: xforms of symmetrically related copy
those two things get checked for intersections and clashes and scored by scorepos
'''
def __init__(self, body, sym, hscore, **kw):
self.kw = rp.Bunch(kw)
self.body = body
self.hscore = hscore
self.symrot = hm.hrot([0, 0, 1], 360 / int(sym[1:]), degrees=True)
# __call__ gets called if class if called like a fcn
def __call__(self, xforms, iresl=-1, wts={}, **kw):
kw = self.kw.sub(wts=wts)
xeye = np.eye(4, dtype="f4")
body, sfxn = self.body, self.hscore.scorepos
xforms = xforms.reshape(-1, 4, 4) # body.pos
xsym = self.symrot @ xforms # symmetrized version of xforms
# check for "flatness"
ok = np.abs((xforms @ body.pcavecs[0])[:, 2]) <= self.kw.max_longaxis_dot_z
# check clash, or get non-clash range
if kw.max_trim > 0:
trim = body.intersect_range(body, xforms[ok], xsym[ok], **kw)
trim, trimok = rp.search.trim_ok(trim, body.nres, **kw)
ok[ok] &= trimok
else:
ok[ok] &= body.clash_ok(body, xforms[ok], xsym[ok], **kw)
trim = [0], [body.nres - 1]
# score everything that didn't clash
scores = np.zeros(len(xforms))
bounds = (*trim, -1, *trim, -1)
'''
bounds: valid residue ranges to score after trimming i.e. don't score resi that were trimmed
sfxn: hscore.scorepos scores stuff from the hscore that got passed
takes two pos of bodies (the same monomer in this case)
xforms: not clashing xforms
iresl: stage of hierarchical search (grid spacing: 4A --> 2A --> 1A --> 0.5A --> 0.25A)
sampling at highest resl probably 0.6A due to ori + cart
returns score # for each "dock"
'''
scores[ok] = sfxn(body, body, xforms[ok], xsym[ok], iresl, bounds, **kw)
# record ranges used (trim data to return)
lb = np.zeros(len(scores), dtype="i4")
ub = np.ones(len(scores), dtype="i4") * (body.nres - 1)
if trim: lb[ok], ub[ok] = trim[0], trim[1]
return scores, rp.Bunch(reslb=lb, resub=ub)
def _debug_dump_cyclic(xforms, body, sym, scores, ibest, evaluator, **kw):
kw = rp.Bunch(kw)
t = rp.Timer().start()
nout_debug = min(10 if kw.nout_debug is None else kw.nout_debug, len(ibest))
for iout in range(nout_debug):
i = ibest[iout]
body.move_to(xforms[i])
wrpx, wnct = (kw.wts.sub(rpx=1, ncontact=0), kw.wts.sub(rpx=0, ncontact=1))
scr, extra = evaluator(xforms[i], kw.nresl - 1, wrpx)
cnt, extra = evaluator(xforms[i], kw.nresl - 1, wnct)
fn = kw.output_prefix + "_%02i.pdb" % iout
print(
f"{fn} score {scores[i]:7.3f} rpx {scr[0]:7.3f} cnt {cnt[0]:4}",
f"resi {extra.reslb[0]}-{extra.resub[0]}",
)
rp.dump_pdb_from_bodies(fn, [body], rp.symframes(sym), resbounds=extra)
return t.total
|
input = open('input.txt', 'r')
output = open('output.txt', 'w')
text = dict()
a = input.read().split()
for elem in a:
if elem not in text:
text[elem] = 0
else:
text[elem] += 1
print(text[elem], end = ' ', file = output)
input.close()
output.close() |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 1 18:43:45 2020
@author: dennis
"""
from itertools import permutations
with open('data/day1.txt') as f:
report = [int(x) for x in f.readlines()]
for perm in permutations(report, 2):
if sum(perm) == 2020:
break
print(f'Part 1: {perm[0] * perm[1]}')
for perm in permutations(report, 3):
if sum(perm) == 2020:
break
print(f'Part 1: {perm[0] * perm[1] * perm[2]}')
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# further reading https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/
# y = mx + c
m = 5000
c = 0
my_feature = [n for n in range(1, 10)]
my_label = [m*n + c for n in range(1, 10)]
label_name = f'y={m}x+{c}'
# input variable
my_feature = ([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0])
# thing were predicting
my_label = ([5.0, 8.8, 9.6, 14.2, 18.8, 19.5, 21.4, 26.8, 28.9, 32.0, 33.8, 38.2])
label_name = 'custom data'
epochs = 40
learning_rate = 0.008
bias_learning_rate = 0.04
bias_weight = 0.5
weight = 0.05
we = []
bw = []
er = []
for epoch in range(epochs):
costs = []
for n, feat in enumerate(my_feature):
we.append(weight)
bw.append(bias_weight)
inputs = np.array([feat, 1])
net = inputs.dot(np.array([weight, bias_weight]))
error = net - my_label[n]
costs.append(error)
# chain rule
# dE/dw = dE/d_net * d_net/d_w
# dE/d_net = (net - prediction)
# d_net/d_w = input
dE_dw = error*inputs[0]
weight = weight - learning_rate*dE_dw
dE_db = (net - my_label[n]) * 1
bias_weight = bias_weight - bias_learning_rate*dE_db
# mean squared error
mse = (1/len(my_feature))*(sum(np.array(costs)**2))
er.append(mse)
print("FINAL WEIGHT:", weight)
print('FINAL BIAS WEIGHT:', bias_weight)
zip_iter = iter(zip(we, bw))
num = iter(n for n in range(len(we)))
scale = len(we) / len(er)
fig = plt.figure(1, figsize=(8,8))
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 3)
def animate(i):
try:
weight, bias_weight = next(zip_iter)
n = next(num)
except StopIteration:
return
ax1.clear()
x = np.linspace(min(my_feature), max(my_feature), 100)
y = weight * x + bias_weight
ax1.plot(x, y, '-g', label='linear regression', linewidth=3)
ax1.scatter(my_feature, my_label, c='r', label=label_name, marker='x')
ax1.set_ylim([min(my_label)-2, max(my_label)+2])
ax1.legend(loc='best')
plt.grid()
ax2.clear()
ax2.plot([n for n in range(n)], we[:n], label='feature weight')
ax2.plot([n for n in range(n)], bw[:n], label='bias weight')
ax2.legend(loc='best')
plt.grid()
ax3.clear()
ax3.plot([n for n in range(int(n/scale))], er[:int(n/scale)], label='mean squared error')
ax3.legend(loc='best')
if er[int(n/scale)] < 3:
ax3.set_ylim([0, 10])
if er[int(n / scale)] < 0.3:
ax3.set_ylim([0, 1])
plt.grid()
ani = animation.FuncAnimation(fig, animate, interval=10)
plt.show()
|
import numpy as np
import cv2
import os
import sys
import time
import torch
from torch import nn
from models.yolo import *
from models.hrnet import *
from utils.detector import *
def predict(file_path, pred_path, module_dir, draw_bbox=False, box_tr=0.7):
# file_path - absolute path to file
# pred_path - absolute path for prediction
# module_dir - path for module folder
# draw_bbox - draw bboxes or not
# box_tr - threshold for bbox confidence
image_formats = ['.jpg', '.png', '.jpeg', '.bmp']
video_formats = ['.mp4', '.mov', '.avi', '.webm', '.mkv', '.m4v']
file_format = file_path[file_path.rindex('.'):].lower()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
yolov5 = load_yolo_model(module_dir).to(device)
keypoint_net = load_keypoint_net(module_dir).to(device)
if file_format in image_formats:
pred_path = predict_image(file_path, pred_path, yolov5, keypoint_net, device,
draw_bbox=draw_bbox, box_tr=box_tr)
elif file_format in video_formats:
pred_path = predict_video(file_path, pred_path, yolov5, keypoint_net, device,
draw_bbox=draw_bbox, box_tr=box_tr)
else:
print('Unknown file format')
return pred_path
|
#LeetCode problem 692: Top K Frequent Words
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
d=dict()
a=[]
for i in words:
d[i]=d.get(i,0)+1
res=sorted(d.items(),key=lambda item: (-item[1], item[0]))
for i, j in res:
a.append(i)
return(a[:k]) |
#
#Artificially Destined
#=============
#by SoapDictator
#
import pygame, sys, math
from pygame.locals import *
sys.path.append('/AD/obj')
sys.path.append('/AD/managers')
sys.path.append('/AD/utils')
from managers import event, window, input, unit, map
class Main(object):
#singleton implementation
instance = None
def __init__(self):
if not Main.instance:
Main.instance = Main.__Main()
def __getattr__(self, name):
return getattr(self.instance, name)
class __Main():
def __init__(self):
global EVENT0, WINDOW0, INPUT0, UNIT0, MAP0
EVENT0 = event.GameEventManager()
WINDOW0 = window.WindowManager()
INPUT0 = input.InputManager()
UNIT0 = unit.UnitManager()
MAP0 = map.MapManager()
EVENT0.defineGlobals(EVENT0, MAP0, UNIT0)
WINDOW0.defineGlobals(INPUT0, MAP0, UNIT0)
INPUT0.defineGlobals(EVENT0, WINDOW0, MAP0, UNIT0)
MAP0.defineGlobals(WINDOW0, UNIT0)
UNIT0.defineGlobals(WINDOW0, MAP0, UNIT0)
#regression(haha) tests
#self.testTankAttack().testArtiAttack().testUnitCastAbility()
#for manual testing
self.testCreateUnits()
pygame.key.set_repeat(50,50)
while True:
INPUT0.handleInput()
WINDOW0.screenRefresh()
#TESTS
def testCreateUnits(self):
EVENT0.eventAdd("EventUnitCreate", ("UA000", [-4, -3], "player1"))
EVENT0.eventAdd("EventUnitCreate", ("UA000", [4, 3], "player2"))
EVENT0.eventAdd("EventUnitCreate", ("US000", [-3, -4], "player1"))
EVENT0.eventAdd("EventUnitCreate", ("US000", [3, 4], "player2"))
EVENT0.eventHandle()
return self
def testUnitMove(self):
EVENT0.eventAdd("EventUnitCreate", ("UT000", [0, 0], "player1"))
EVENT0.eventHandle()
tstUnit0 = units[len(units)-1]
tstUnit0.setMoveQueue(MAP0.getPath(tstUnit0.getStatCur("SPD"), tstUnit0.getCoord(), [3, 3]))
EVENT0.eventHandle()
try:
assert(tstUnit0.getCoord() == [3, 3])
except:
print("Test Fail: UT000 horribly failed to move!")
UNIT0.unitDestroy(tstUnit0)
return self
def testTankAttack(self):
EVENT0.eventAdd("EventUnitCreate", ("UT000", [0, 0], "player1"))
EVENT0.eventAdd("EventUnitCreate", ("US000", [1, 1], "player2"))
EVENT0.eventHandle()
units = UNIT0.getAllUnits()
tstUnit0 = units[len(units)-2]
tstUnit1 = units[len(units)-1]
UNIT0.setUnitTarget(tstUnit0, tstUnit1.getCoord())
EVENT0.eventHandle()
UNIT0.unitDestroy(tstUnit0)
try:
assert(tstUnit1.getStatCur("HP") <= 0)
except:
print("Test Fail: Tank's attack failed horribly!")
UNIT0.unitDestroy(tstUnit1)
return self
def testArtiAttack(self):
EVENT0.eventAdd("EventUnitCreate", ("UA000", [0, 0], "player1"))
EVENT0.eventAdd("EventUnitCreate", ("US000", [3, 3], "player2"))
EVENT0.eventHandle()
units = UNIT0.getAllUnits()
tstUnit0 = units[len(units)-2]
tstUnit1 = units[len(units)-1]
UNIT0.setUnitTarget(tstUnit0, tstUnit1.getCoord())
EVENT0.eventHandle()
UNIT0.unitDestroy(tstUnit0)
try:
assert(tstUnit1.getStatCur("HP") <= 0)
except:
print("Test Fail: Artillery's attack failed horribly!")
UNIT0.unitDestroy(tstUnit1)
return self
def testUnitCastAbility(self):
EVENT0.eventAdd("EventUnitCreate", ("UE000", [0, 0], "player1"))
EVENT0.eventAdd("EventUnitCreate", ("US000", [1, 1], "player1"))
EVENT0.eventHandle()
units = UNIT0.getAllUnits()
tstUnit0 = units[len(units)-2]
tstUnit1 = units[len(units)-1]
tstUnit0.castAbility("A001", tstUnit1)
EVENT0.eventHandle()
UNIT0.unitDestroy(tstUnit0)
UNIT0.unitDestroy(tstUnit1)
return self
StartShenanigans = Main() |
import numpy as np
from trajectory import Step, Trajectory
class MemoryBuffer:
"""
Implementation of a transition memory buffer
"""
def __init__(self, max_memory_size):
self.max_memory_size = max_memory_size
self._top = 0 # Trailing index with latest entry in env
self._size = 0 # Trailing index with num samplea
def create_memory_buffer(self, step: Step):
"""
Dynamically creates a memory buffer with the right size, shape and dtypes.
"""
self.shape_dtype_dict = {
stat_name: (value.shape[-1], value.dtype)
for stat_name, value in step.asdict().items()
}
self.statistics = self.shape_dtype_dict.keys()
self._memory_buffer = {
stat_name: np.zeros(shape=(self.max_memory_size, self.shape_dtype_dict[stat_name][0]),
dtype=self.shape_dtype_dict[stat_name][1])
for stat_name in self.statistics
}
def add_step(self, step: Step):
if self._size == 0:
self.create_memory_buffer(step)
error_msg = f"""Memory buffer and step have different statistics - step:{step.statistics},
buffer:{self.statistics}"""
assert step.statistics == self.statistics, error_msg
for key, value in step.asdict().items():
self._memory_buffer[key][self._top] = value
self._advance()
def add_transitions(self, trajectory: Trajectory):
error_msg = f"""Memory buffer and trajectory have different statistics -
traj:{trajectory.step_statistics}, buffer:{self.statistics}"""
assert trajectory.step_statistics == self.statistics, error_msg
for step in trajectory.step_history:
self.add_step(step)
def _advance(self):
self._top = (self._top + 1) % self.max_memory_size
if self._size < self.max_memory_size:
self._size += 1
def sample_batch_transitions(self, batch_size):
assert batch_size <= self._size, "Not enough samples in buffer"
indices = np.random.randint(0, self._size, batch_size)
batch = {
stat: self._memory_buffer[stat][indices] for stat in self.statistics
}
return batch
@property
def current_size(self):
return self._size
|
# APPLE
"""
SOLVED -- LEETCODE#303
Create a class that initializes with a list of numbers and has one method called sum.
sum should take in two parameters, start_idx and end_idx and return the sum of the list
from start_idx (inclusive) to end_idx` (exclusive).
You should optimize for the sum method.
"""
class ListFastSum:
def __init__(self, nums):
self.nums = nums
self.initsum()
def sum(self, start_idx, end_idx):
# For each query; time: O(1)
return self.partial_sum[end_idx] - self.partial_sum[start_idx]
def initsum(self):
self.partial_sum = []
s = 0
for x in self.nums:
self.partial_sum.append(s)
s += x
self.partial_sum.append(s)
print(ListFastSum([1, 2, 3, 4, 5, 6, 7]).sum(2, 5))
# 12 because 3 + 4 + 5 = 12
|
import numpy as np
from liegroups.numpy import SE3, SO3
from pyslam.problem import Options, Problem
from collections import OrderedDict, namedtuple
from pyslam.losses import L2Loss, TDistributionLoss, HuberLoss
from pyslam.residuals import PoseResidual, PoseToPoseResidual, PoseToPoseOrientationResidual
from pyslam.utils import invsqrt
import torch
import sys
import time
import pickle
import copy
class VisualInertialPipelineAbs():
def __init__(self, dataset, T_cam_imu, hydranet_output_file, first_pose=SE3.identity()):
self.optimizer = PoseFusionSolverAbs()
self.dataset = dataset
self.dataset._load_timestamps()
self.imu_Q = self.compute_imu_Q()
self.T_w_c = [first_pose]
self.T_w_c_imu = [first_pose]
self.T_cam_imu = T_cam_imu
self._load_hydranet_files(hydranet_output_file)
def _load_hydranet_files(self, path):
hn_data = torch.load(path)
self.Sigma_C_imu_w = hn_data['Sigma_21'].numpy()
self.C_imu_w_hn = hn_data['Rot_21'].numpy()
self.C_imu_w_gt = hn_data['Rot_21_gt'].numpy()
#self.Sigma_21_hydranet_const = 1e-6*np.eye(3)#self.compute_rot_covar()
#self.C_21_large_err_mask = self.compute_large_err_mask()
# def compute_large_err_mask(self):
# phi_errs = np.empty((len(self.C_21_hydranet_gt)))
# for i in range(len(self.C_21_hydranet_gt)):
# C_21_est = SO3.from_matrix(self.C_21_hydranet[i], normalize=True)
# C_21_gt = SO3.from_matrix(self.C_21_hydranet_gt[i], normalize=True)
# phi_errs[i] = np.linalg.norm(C_21_est.dot(C_21_gt.inv()).log())
# return phi_errs > 0.2*np.pi/180.
#
# def compute_rot_covar(self):
# phi_errs = np.empty((len(self.C_21_hydranet_gt), 3))
# for i in range(len(self.C_21_hydranet_gt)):
# C_21_est = SO3.from_matrix(self.C_21_hydranet[i], normalize=True)
# C_21_gt = SO3.from_matrix(self.C_21_hydranet_gt[i], normalize=True)
# phi_errs[i] = C_21_est.dot(C_21_gt.inv()).log()
# return np.cov(phi_errs, rowvar=False)
def compute_imu_Q(self):
T_w_imu_gt = [SE3.from_matrix(o.T_w_imu) for o in self.dataset.oxts]
xi_errs = np.empty((len(self.dataset.oxts) - 1, 6))
for pose_i, oxt in enumerate(self.dataset.oxts):
if pose_i == len(self.dataset.oxts) - 1:
break
dt = (self.dataset.timestamps[pose_i + 1] - self.dataset.timestamps[pose_i]).total_seconds()
xi = -dt*self._assemble_motion_vec(oxt)
T_21_imu = SE3.exp(xi)
T_21_gt = T_w_imu_gt[pose_i+1].inv().dot(T_w_imu_gt[pose_i])
xi_errs[pose_i] = T_21_imu.dot(T_21_gt.inv()).log()/(dt)
return np.cov(xi_errs, rowvar=False)
def _assemble_motion_vec(self, oxt):
motion_vec = np.empty(6)
motion_vec[0] = oxt.packet.vf
motion_vec[1] = oxt.packet.vl
motion_vec[2] = oxt.packet.vu
motion_vec[3] = oxt.packet.wx
motion_vec[4] = oxt.packet.wy
motion_vec[5] = oxt.packet.wz
return motion_vec
def compute_vio(self):
num_poses = len(self.dataset.oxts)
start = time.time()
for pose_i, oxt in enumerate(self.dataset.oxts[:num_poses]):
if pose_i == num_poses - 1:
break
T_w_c = self.T_w_c[-1]
if pose_i % 100 == 0:
end = time.time()
print('Processing pose: {} / {}. Avg. proc. freq.: {:.3f} [Hz]'.format(pose_i, len(self.dataset.oxts),100.0/(end - start)))
start = time.time()
dt = (self.dataset.timestamps[pose_i+1] - self.dataset.timestamps[pose_i]).total_seconds()
xi = -dt*self._assemble_motion_vec(oxt)
T_21_imu = self.T_cam_imu.dot(SE3.exp(xi)).dot(self.T_cam_imu.inv())
Ad_T_cam_imu = SE3.adjoint(self.T_cam_imu)
Sigma_21_imu = Ad_T_cam_imu.dot(dt*dt*self.imu_Q).dot(Ad_T_cam_imu.transpose())
Sigma_hn = self.Sigma_21_hydranet[pose_i]
#Sigma_hn = self.Sigma_21_hydranet_const
C_hn = SO3.from_matrix(self.C_21_hydranet[pose_i], normalize=True)
self.optimizer.reset_solver()
self.optimizer.add_costs(T_21_imu, invsqrt(Sigma_21_imu), C_hn, invsqrt(Sigma_hn))
self.optimizer.set_priors(SE3.identity(), SE3.identity())
# if self.C_21_large_err_mask[pose_i]:
#T_21 = copy.deepcopy(T_21_imu)
#T_21.rot = C_hn
# else:
T_21 = self.optimizer.solve()
self.T_w_c.append(self.T_w_c[-1].dot(T_21.inv()))
self.T_w_c_imu.append(self.T_w_c_imu[-1].dot(T_21_imu.inv()))
class PoseFusionSolverAbs(object):
def __init__(self):
# Options
self.problem_options = Options()
self.problem_options.allow_nondecreasing_steps = True
self.problem_options.max_nondecreasing_steps = 3
self.problem_options.max_iters = 10
self.problem_solver = Problem(self.problem_options)
self.pose_keys = ['T_1_0', 'T_2_0']
self.loss = L2Loss()
# self.loss = HuberLoss(5.)
# self.loss = TukeyLoss(5.)
# self.loss = HuberLoss(.1)
#self.loss = HuberLoss(10.0) # Kerl et al. ICRA 2013
def reset_solver(self):
self.problem_solver = Problem(self.problem_options)
def set_priors(self, T_1_0, T_2_0):
self.params_initial = {self.pose_keys[0]: T_1_0, self.pose_keys[1]: T_2_0}
self.problem_solver.set_parameters_constant(self.pose_keys[0])
self.problem_solver.initialize_params(self.params_initial)
def add_costs(self, T_21_obs, odom_stiffness, C_imu_w_obs, rot_stiffness):
residual_pose = PoseToPoseResidual(T_21_obs, odom_stiffness)
residual_rot = OrientationResidual(C_imu_w_obs, rot_stiffness)
self.problem_solver.add_residual_block(residual_pose, self.pose_keys)
self.problem_solver.add_residual_block(residual_rot, self.pose_keys[1], loss=self.loss)
def solve(self):
self.params_final = self.problem_solver.solve()
#print(self.problem_solver.summary())
#self.problem_solver.compute_covariance()
T_1_0 = self.params_final[self.pose_keys[0]]
T_2_0 = self.params_final[self.pose_keys[1]]
T_2_1 = T_2_0.dot(T_1_0.inv())
return T_2_1 |
#!/usr/bin/env python
import sys
import os
import re
import xml.dom.minidom
import StringIO
import random
from mmap import mmap
from multiprocessing import Process
from os import rename, listdir
def main():
fnames = listdir('.')
for oldfname in fnames:
if re.match("[0-9][0-9][0-9] .*", oldfname):
newfname = oldfname[4:]
print "%s %60s" %(oldfname, newfname)
command = "mv \"" + oldfname + "\" \"" + newfname + "\""
#os.system(command)
elif re.match("[0-9].*", oldfname):
print "file: %s" %(oldfname)
#if oldfname.startswith("1"):
#rename(oldfname, oldfname.replace(badprefix, '', 1))
# main
#
if __name__ == "__main__":
main()
|
import unittest2
import responses
import dwollav2
class TokenShould(unittest2.TestCase):
client = dwollav2.Client(id='id', secret='secret')
access_token = 'access token'
refresh_token = 'refresh token'
expires_in = 123
scope = 'scope'
account_id = 'account id'
more_headers = {'idempotency-key': 'foo'}
def test_sets_access_token(self):
token = self.client.Token(access_token=self.access_token)
self.assertEqual(self.access_token, token.access_token)
def test_sets_refresh_token(self):
token = self.client.Token(refresh_token=self.refresh_token)
self.assertEqual(self.refresh_token, token.refresh_token)
def test_sets_expires_in(self):
token = self.client.Token(expires_in=self.expires_in)
self.assertEqual(self.expires_in, token.expires_in)
def test_sets_scope(self):
token = self.client.Token(scope=self.scope)
self.assertEqual(self.scope, token.scope)
def test_sets_account_id(self):
token = self.client.Token(account_id=self.account_id)
self.assertEqual(self.account_id, token.account_id)
def test_uses_new_session(self):
new_access_token = 'new access token'
token1 = self.client.Token(access_token=self.access_token)
token2 = self.client.Token(access_token=new_access_token)
self.assertNotEqual(token1._session, token2._session)
self.assertEqual(
token1._session.headers['authorization'], 'Bearer %s' % self.access_token)
self.assertEqual(
token2._session.headers['authorization'], 'Bearer %s' % new_access_token)
@responses.activate
def test_get_success(self):
responses.add(responses.GET,
self.client.api_url + '/foo',
body='{"foo": "bar"}',
status=200,
content_type='application/vnd.dwolla.v1.hal+json')
token = self.client.Token(access_token=self.access_token)
res = token.get('foo')
self.assertEqual(200, res.status)
self.assertEqual({'foo': 'bar'}, res.body)
@responses.activate
def test_get_success_leading_slash(self):
responses.add(responses.GET,
self.client.api_url + '/foo',
body='{"foo": "bar"}',
status=200,
content_type='application/vnd.dwolla.v1.hal+json')
token = self.client.Token(access_token=self.access_token)
res = token.get('/foo')
self.assertEqual(200, res.status)
self.assertEqual({'foo': 'bar'}, res.body)
@responses.activate
def test_get_success_full_url(self):
responses.add(responses.GET,
self.client.api_url + '/foo',
body='{"foo": "bar"}',
status=200,
content_type='application/vnd.dwolla.v1.hal+json')
token = self.client.Token(access_token=self.access_token)
res = token.get(self.client.api_url + '/foo')
self.assertEqual(200, res.status)
self.assertEqual({'foo': 'bar'}, res.body)
@responses.activate
def test_get_success_different_domain(self):
responses.add(responses.GET,
self.client.api_url + '/foo',
body='{"foo": "bar"}',
status=200,
content_type='application/vnd.dwolla.v1.hal+json')
token = self.client.Token(access_token=self.access_token)
res = token.get('https://foo.com/foo')
self.assertEqual(200, res.status)
self.assertEqual({'foo': 'bar'}, res.body)
@responses.activate
def test_get_error(self):
responses.add(responses.GET,
self.client.api_url + '/foo',
body='{"error": "bad"}',
status=400,
content_type='application/vnd.dwolla.v1.hal+json')
token = self.client.Token(access_token=self.access_token)
with self.assertRaises(dwollav2.Error):
token.get('foo')
@responses.activate
def test_get_with_headers_success(self):
responses.add(responses.GET,
self.client.api_url + '/foo',
body='{"foo": "bar"}',
status=200,
content_type='application/vnd.dwolla.v1.hal+json')
token = self.client.Token(access_token=self.access_token)
res = token.get('foo', None, self.more_headers)
self.assertEqual(200, res.status)
self.assertEqual({'foo': 'bar'}, res.body)
@responses.activate
def test_post_success(self):
responses.add(responses.POST,
self.client.api_url + '/foo',
body='{"foo": "bar"}',
status=200,
content_type='application/vnd.dwolla.v1.hal+json')
token = self.client.Token(access_token=self.access_token)
res = token.post('foo')
self.assertEqual(200, res.status)
self.assertEqual({'foo': 'bar'}, res.body)
@responses.activate
def test_post_with_headers_success(self):
responses.add(responses.POST,
self.client.api_url + '/foo',
body='{"foo": "bar"}',
status=200,
content_type='application/vnd.dwolla.v1.hal+json')
token = self.client.Token(access_token=self.access_token)
res = token.post('foo', None, self.more_headers)
self.assertEqual(200, res.status)
self.assertEqual({'foo': 'bar'}, res.body)
@responses.activate
def test_delete_success(self):
responses.add(responses.DELETE,
self.client.api_url + '/foo',
body='{"foo": "bar"}',
status=200,
content_type='application/vnd.dwolla.v1.hal+json')
token = self.client.Token(access_token=self.access_token)
res = token.delete('foo')
self.assertEqual(200, res.status)
self.assertEqual({'foo': 'bar'}, res.body)
@responses.activate
def test_delete_with_headers_success(self):
responses.add(responses.DELETE,
self.client.api_url + '/foo',
body='{"foo": "bar"}',
status=200,
content_type='application/vnd.dwolla.v1.hal+json')
token = self.client.Token(access_token=self.access_token)
res = token.delete('foo', None, self.more_headers)
self.assertEqual(200, res.status)
self.assertEqual({'foo': 'bar'}, res.body)
|
import datetime
from collections import OrderedDict
import pandas as pd
from google.cloud import bigquery
CLIENT = None
PROJECT_ID = None
def insert_date_range(sql, date_range):
start, end = date_range
if start is None and end is None: return sql
if start is None:
return sql + ' WHERE `date` <= DATE("%s")' % end
if end is None:
return sql + ' WHERE `date` >= DATE("%s")' % start
return sql + ' WHERE DATE("%s") <= `date` AND `date` <= DATE("%s")' % (start, end)
# define helper fns:
def query_covariate_df_from_gbq(pid, date_range, covariate):
"""
Query a table from Google BigQuery, via SQL.
:param pid: patient id (str)
:param covariate: `heartrate`, `step`, `sleep`
"""
assert covariate in ['heartrate', 'steps', 'sleep']
columns = ['Date', 'Time', 'Source', 'Value']
if covariate != 'sleep':
sql = """
SELECT date, time, device, value
FROM `%s.%s.%s`
""" % (PROJECT_ID, pid, covariate)
else:
sql = """
SELECT date, time, device, type, value
FROM `%s.%s.%s`
""" % (PROJECT_ID, pid, covariate)
columns = ['Date', 'Time', 'Source', 'Value', 'n_sleep_seconds']
sql = insert_date_range(sql, date_range)
df = CLIENT.query(sql).to_dataframe()
df.columns = columns
try:
df['date_time'] = pd.to_datetime(df['date_time'])
except KeyError: # if there is SHIT it in the db
df['date_time'] = df['date_time'] = ['%s %s' % (d, t) for d, t in zip(df['Date'].values, df['Time'].values)]
df['date_time'] = pd.to_datetime(df['date_time'])
df.drop(['Date', 'Time'], inplace=True, axis=1)
# df = df.set_index('date_time').drop('Test', axis=0).reset_index()
# df['date_time'] = pd.to_datetime(df['date_time'])
df['UserID'] = pid
if covariate == 'sleep':
df = df[['UserID', 'Source', 'Value', 'n_sleep_seconds', 'date_time']]
df['n_sleep_seconds'] = pd.to_numeric(df['n_sleep_seconds'])
else:
df = df[['UserID', 'Source', 'Value', 'date_time']]
df['Value'] = pd.to_numeric(df['Value'])
return df
def preprocess_covariate_df(pid, pid_df, covariate):
"""
Preprocess a covariate dataframe:
- expand data to 1 min resolution
- expand sleep data
:param covariate: `heartrate`, `steps` or `sleep`
:return:
"""
pid_df_expanded = []
# do the following per device and concatenate afterwards.
for device, ddf in pid_df.groupby('Source'):
if covariate == 'sleep':
# apple hk data
if any(['InBed' in ddf['Value'].unique(), 'Asleep' in ddf['Value'].unique()]):
ddf.columns = ['uid', 'device', 'sleep', 'date_time']
elif ddf.empty:
ddf.columns = ['uid', 'device', 'sleep', 'date_time']
ddf = ddf.set_index('date_time').resample('T').median().reset_index()
ddf['sleep'] = 0.
# fitbit data
elif any(['rem' in ddf['Value'].unique(),
'awake' in ddf['Value'].unique(),
'wake' in ddf['Value'].unique(),
'deep' in ddf['Value'].unique(),
'restless' in ddf['Value'].unique(),
'alseep' in ddf['Value'].unique(),
'unknown' in ddf['Value'].unique(),
]):
# we need to expand:
expanded_dfs = []
for i, r in ddf.iterrows():
n_mins = r['n_sleep_seconds'] // 60
df = pd.DataFrame([r['Value']] * n_mins,
index=pd.date_range(r['date_time'].round(freq='T'), periods=n_mins, freq='T'))
df['uid'] = r['UserID']
expanded_dfs.append(df)
ddf = pd.concat(expanded_dfs, sort=True, axis=0)
# delete dublicate indices:
ddf = ddf.loc[~ddf.index.duplicated(keep='first')]
ddf.reset_index(inplace=True)
ddf.columns = ['date_time', 'sleep', 'uid'] # sort out the user ID
else: # corrupted fitbit data
ddf.columns = ['uid', 'device', 'sleep', 'date_time']
uid = ddf['uid'].unique()[0]
ddf['sleep'] = 0.
ddf = ddf.set_index('date_time').resample('T').median().reset_index()
ddf['uid'] = uid
ddf['device'] = device
ddf = ddf[['uid', 'device', 'sleep', 'date_time']]
ddf['sleep'] = ddf['sleep'].astype(float)
elif covariate == 'steps':
ddf.columns = ['uid', 'device', 'steps', 'date_time']
ddf['steps'] = ddf['steps'].astype(float)
ddf = ddf.set_index('date_time').resample('T').mean().reset_index()
elif covariate == 'heartrate':
ddf.columns = ['uid', 'device', 'heart_rate', 'date_time']
ddf['heart_rate'] = ddf['heart_rate'].astype(float)
ddf = ddf.set_index('date_time').resample('T').median().reset_index()
ddf['uid'] = pid
ddf['device'] = device
ddf = ddf.loc[~ddf.index.duplicated(keep='first')]
pid_df_expanded.append(ddf)
try:
pid_df = pd.concat(pid_df_expanded, axis=0)
except ValueError:
raise OSError('Empty input files!')
pid_df = pid_df.set_index(['device', 'date_time']).sort_index()
return pid_df
def get_PID_df_per_device(pid, dfs, devices=['fitbit'], ndays=1000):
"""
This returns a pid_df per device in the input .csvs or .jsons
Possible Devices:
['FB-Fitbit', # Fitbit
'HK-Connect', # Garmin
'HK-Health', # ??
'HK-iPhone', # Phone -> Steps only
'HK-Motiv', # motiv ring
'HK-Apple', # apple watch
'HK-Biostrap' # Biostrap
]
:param pid:
:return:
"""
data_per_device = OrderedDict()
for d in devices:
p_dfs = []
for covariate in dfs.keys():
try:
p_dfs.append(dfs[covariate].xs(d, level='device', drop_level=True).drop('uid', axis=1))
except KeyError:
print('No %s data found for %s' % (covariate, d))
pdf = pd.DataFrame(columns=[covariate])
pdf.index.name = 'date_time'
p_dfs.append(pdf)
device_df = p_dfs[0].join(p_dfs[1], how='outer')
device_df = device_df.join(p_dfs[2], how='outer')
try:
last_timestamp = device_df.index.values[-1]
limit = last_timestamp - pd.Timedelta(days=ndays)
device_df = device_df.loc[limit:last_timestamp]
except IndexError:
pass
device_df['uid'] = pid
if device_df.index.name != 'date_time':
device_df.reset_index(inplace=True)
device_df.set_index('date_time', inplace=True)
device_df.dropna(subset=['heart_rate', 'steps',
# 'sleep'
], axis=0, thresh=1, inplace=True)
device_df[['heart_rate', 'steps']] = device_df[['heart_rate', 'steps']].astype(float)
data_per_device[d] = device_df
return data_per_device
def impute_PID_df(in_df, slen, granularity, **kwargs):
"""
The main preprocessing function.
IMPORTANT: As we reasample, we need to binarize the sleep before doing this.
:param in_df:
:return:
"""
uid = in_df['uid'].unique()
assert len(uid) == 1, 'There must be exactly 1 ID per user.'
in_df.drop('uid', axis=1)
in_df = in_df[in_df['heart_rate'] >= 20] # hard cut-off for HR as HR of 20 is non-realistic
# binarize the sleep:
in_df['sleep'] = in_df['sleep'].map(dict([('awake', 0),
('wake', 0),
('unknown', 1),
('light', 1),
('deep', 1),
('restless', 1),
('rem', 1),
('asleep', 1),
('Asleep', 1),
('InBed', 0),
('NaN', 0)]))
sleep_df = in_df.copy()
sleep_df.loc[~sleep_df[['heart_rate', 'steps']].isnull().all(axis=1), 'sleep'] = sleep_df.loc[
~sleep_df[['heart_rate', 'steps']].isnull().all(axis=1), 'sleep'].fillna(0.)
# resample
in_df = in_df.resample(granularity).median()
in_df['sleep'] = sleep_df.resample(granularity).max()
# set the steps to 0, where we have sleep == 1
in_df.loc[in_df['sleep'] == 1, 'steps'] = 0
# now extend the index of days that have x% of slen, and fill the nans w/ the average in sleep stratification
in_df.dropna(thresh=1, axis=0, inplace=True)
days = []
for n, d in in_df.groupby(pd.Grouper(freq='D')):
exclusioncounter = 0
if len(d.index.values) >= .5 * slen:
# get the date and reindex:
date = d.index[0].date()
# create full range:
full_day_index = pd.date_range(date, periods=slen, freq=granularity)
d = d.reindex(full_day_index)
days.append(d)
else:
exclusioncounter += 1
try:
in_df = pd.concat(days)
except ValueError:
return pd.DataFrame({'Empty': []})
in_df, _, _ = fill_nans_w_stratified_average(in_df, slen, granularity)
# This dropna is very important: Drop the hours for which we did not have data!!
in_df.dropna(axis=0, inplace=True)
in_df = in_df.groupby(pd.Grouper(freq='D')).filter(lambda x: len(x.index.values) == slen)
# binarize the sleep:
s = in_df['sleep']
in_df.loc[:, 'sleep'] = s.where(s == 0., 1.).values
assert in_df.shape[0] / slen == float(in_df.shape[0] // slen)
in_df['uid'] = uid[0]
# ensure numeric:
in_df[[c for c in in_df.columns if c != 'uid']] = in_df[[c for c in in_df.columns if c != 'uid']].apply(
pd.to_numeric)
return in_df
def get_average_per_granularity(df):
"""
Calculate the hourly medians and return a df that holds there values.
:param df: the input df to calculate the hourly medians with
:return: the df holding the hourly medians
"""
# median for HR and steps, mean for sleep, is later binarized.
median_df = df.resample('30T').median()
median_df.index = [h.time() for h in median_df.index]
median_df.index.name = 'time_unit'
median_df = median_df.groupby('time_unit').median() # here always median
return median_df
def get_stratified_average_per_granularity(df, slen, granularity, **kwargs):
"""
Calculate the medians/means per granularity STRATIFIED BY SLEEP and return a df that holds these values.
:param df: the input df to calculate the hourly medians with
:return: the df holding the hourly medians
"""
# stratify by sleep:
dfs = dict()
nulls = []
for n, g in df.groupby('sleep'):
if pd.isnull(n):
continue
# resample (will introduce 'NaNs' if no values
res_df = g.resample('30T').mean()
res_df.index = [h.time() for h in res_df.index]
res_df.index.name = 'time_unit'
# after the median NaNs migth be reduced but not resolved.
res_df = res_df.groupby('time_unit').mean() # here always median
# now assert that res_df has all hours:
if res_df.shape[0] < slen:
time_units = []
for i in range(0, 24):
time_units.extend([
datetime.time(i, j) for j in range(0, 60, int(granularity.strip('T')))
])
res_df = res_df.reindex(pd.Index(time_units))
res_df.index.name = 'time_unit'
nulls.append(sum(res_df.isnull().sum()))
# fill whats left with the median of the res_df (as this is stratified as well)
res_df = res_df.fillna(res_df.mean())
assert sum(res_df.isnull().sum()) == 0
dfs[n] = res_df
return dfs, nulls
def fill_nans_w_stratified_average(df, slen, granularity, **kwargs):
"""
Fills the NaNs by sleep distribution.
"""
df = df.astype('float')
impute_count = 0
# ensure that sleep is binary:
dfs, nulls = get_stratified_average_per_granularity(df.copy(), slen, granularity)
imputed = []
for n, g_df in df.groupby('sleep'):
if pd.isnull(n):
imputed.append(g_df)
complete_missing = g_df.loc[g_df[['steps', 'heart_rate']].isnull().all(axis=1)].index
for t_idx in complete_missing:
impute_count += 2 # as we fill 3 values
h = t_idx.time()
g_df.loc[t_idx, ['steps', 'heart_rate']] = dfs[n].loc[h, ['steps', 'heart_rate']]
# now fill the remaining NaNs (we might have had NaNs in the average_df:)
for c in [c for c in g_df.columns if c != 'sleep']:
for t in g_df.loc[g_df[c].isnull(), c].index:
h = t.time()
g_df.loc[t, c] = dfs[n].loc[h, c]
imputed.append(g_df)
imputed.append(df[df['sleep'].isnull()])
del df
df = pd.concat(imputed, axis=0)
del imputed
df.sort_index(inplace=True)
# now, where sleep is missing, we fill by the median over the complete data including sleep:
df = df.astype('float')
average_df = get_average_per_granularity(df)
daily_median_df = df.groupby(pd.Grouper(freq='D')).median() # the medians per day
complete_missing = df.loc[df[df.columns].isnull().all(axis=1)].index
for t_idx in complete_missing:
impute_count += 3 # as we fill 3 values
h = roundtime(t_idx.to_pydatetime(), 60 * 30).time()
df.loc[t_idx, :] = average_df.loc[h]
for c in df.columns:
for t in df.loc[df[c].isnull(), c].index:
# h = round_time(t.time(), 30*60)
h = roundtime(t.to_pydatetime(), 60 * 30).time()
d = t.date()
if c != 'sleep':
if not pd.isnull(average_df.loc[h, c]):
df.loc[t, c] = average_df.loc[h, c]
else:
df.loc[t, c] = daily_median_df.loc[d, c]
return df, impute_count, nulls
def roundtime(dt=None, roundTo=60):
"""Round a datetime object to any time laps in seconds
dt : datetime.datetime object, default now.
roundTo : Closest number of seconds to round to, default 1 minute.
Author: Thierry Husson 2012 - Use it as you want but don't blame me.
"""
if dt == None: dt = datetime.datetime.now()
seconds = (dt - dt.min).seconds
# // is a floor division, not a comment on following line:
rounding = (seconds + roundTo / 2) // roundTo * roundTo
return dt + datetime.timedelta(0, rounding - seconds, -dt.microsecond)
def upload_to_gpq(df, pid):
"""
Upload a df of preprocessed data for pid to gbq
"""
# This pandas implementation is slow! @Diego: rewriting to native GBQ would be much faster!
df.index.name = 'date_time'
df.reset_index(inplace=True)
df.to_gbq('%s.preprocessed' % pid,
project_id='phd-project',
chunksize=None,
if_exists='replace')
def main(pid, date_range, slen, granularity, **kwargs):
if slen is None: slen = 288
if granularity is None: granularity = '5T'
covariate_dfs = OrderedDict()
for covariate in ['heartrate', 'steps', 'sleep']:
try:
covariate_df = query_covariate_df_from_gbq(pid, date_range, covariate)
covariate_df = preprocess_covariate_df(pid, covariate_df, covariate)
covariate_dfs[covariate] = covariate_df
except NotImplementedError:
covariate_dfs[covariate] = pd.DataFrame(columns=['uid', covariate])
except OSError:
return
pid_device_dfs = get_PID_df_per_device(pid, covariate_dfs, devices=['fitbit'], ndays=100)
fitbit_df = pid_device_dfs['fitbit']
imputed_fitbit_df = impute_PID_df(fitbit_df, slen, granularity)
upload_to_gpq(imputed_fitbit_df, pid)
def setup(project_id):
global CLIENT, PROJECT_ID
CLIENT = bigquery.Client()
PROJECT_ID = project_id
# Must return arguments to be passed onto training.
def preprocess(pid, args):
setup(args['project_id'])
main(pid, (args['start_date'], args['end_date']),
args['slen'], args['granularity'])
train_args = {}
return train_args
|
from Tkinter import *
from os import *
class GUI(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
#VARIABLES
self.filelist=[]
self.file=""
##############################
self.win = parent
self.win.geometry("1024x800")
self.win.title("GUI-V1")
#FRAMES
self.f1 = Frame(self.win) # TREE
self.f1_B = Frame(self.f1) # buttons
self.f1_L = Frame(self.f1) # list
self.f2 = Frame(self.win) # canvas
self.f2_B = Frame(self.f2) # buttons
# BUTTONS
self.b1 = Button(self.win, text="EXIT",bg="red", fg="white") # EXIT
self.b2 = Button(self.f1_B, text="REFRESH") # TREE
self.b3 = Button(self.f1_B, text="LOAD")
self.b4 = Button(self.f2_B, text="DETECTOR 1") # CANVAS
self.b5 = Button(self.f2_B, text="DETECTOR 2")
self.b6 = Button(self.f2_B, text="SP")
# LABELS
# LISTBOX
self.lb1 = Listbox(self.f1_L, height=5)
# SCROOLBARS
self.sb1 = Scrollbar(self.f1_L, orient=VERTICAL)
# canvas
self.canvas=Canvas(self.f2,width=600,height=600)
############################################
#PACKING
self.b2.pack(side=LEFT)
self.b3.pack(side=LEFT)
self.b4.pack(side=LEFT)
self.b5.pack(side=LEFT)
self.b6.pack(side=LEFT)
self.sb1.pack(side=LEFT,fill=Y)
self.lb1.pack(side=LEFT)
self.f1_B.pack(side=TOP)
self.f1_L.pack(side=TOP)
self.f2_B.pack(side=BOTTOM)
self.canvas.pack(side=TOP)
#GRID SETUP
self.b1.grid(row=25, column=25)
self.f1.grid(row=0,column=0)
self.f2.grid(row=1, column= 10)
#Configuration
self.b1.configure(command=self.EXIT)
self.b2.configure(command=self.REFRESH)
self.b3.configure(command=self.LOAD)
self.b4.configure(command=self.D1)
self.b5.configure(command=self.D2)
self.b6.configure(command=self.SP)
self.sb1.configure(command=self.lb1.yview)
self.lb1.configure(yscrollcommand=self.sb1.set)
self.canvas.configure(background='white')
self.REFRESH()
##############################################
#FUNCTOINS
def LOAD(self): # na razie zachowuje tylko nazwe pliku
self.file=str( self.filelist [int(self.lb1.curselection()[0])] )
print self.file
def REFRESH(self):
self.lb1.delete(0, END)
self.filelist=[f for f in listdir(".") if path.isfile(path.join(".",f)) if f != "gui.py" if f != "gui.pyc" ]
#print self.filelist
for i in self.filelist:
self.lb1.insert(END,i)
#CANVAS PLOT
def D1(self):
self.canvas.delete("all")
self.canvas.create_line(0, 0, 200, 100)#na razie cokolwiek
def D2(self):
self.canvas.delete("all")
self.canvas.create_line(0, 400, 200, 120)#na razie cokolwiek
def SP(self):
self.canvas.delete("all")
self.canvas.create_line(23, 0, 200, 400)#na razie cokolwiek
def EXIT(self):
exit()
if __name__ == "__main__":
root=Tk()
GUI(root)
root.mainloop()
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import networkx as nx
import numpy as np
import logging as log
from mo.ops.op import Op
class FlattenONNX(Op):
op = 'Flatten'
enabled = True
def __init__(self, graph: nx.MultiDiGraph, attrs: dict):
super().__init__(graph, {
'type': 'Reshape',
'op': __class__.op,
'infer': __class__.infer,
}, attrs)
def supported_attrs(self):
return ['axis', ('dim', lambda node: ','.join(map(str, node['dim'])))]
@staticmethod
def infer(node):
"""
Infers shape of flatten node as it is done in onnx.
Args:
node: graph flatten node
"""
if not node.has_valid('axis'):
log.debug('Can\'t calculate output shape for {} node due to missing axis attribute'.format(node.name))
return
if node.in_node(0).shape is None:
log.debug('Can\'t calculate output shape for {} node due to shape for input node is None'.format(node.name))
return
if len(node.in_nodes()) != 1:
log.debug('Can\'t calculate output shape for {} node. Number of input nodes should be equal 1 instead of {}'.format(node.name, len(node.in_nodes())))
return
axis = node.axis
shape = node.in_node(0).shape
dim = [np.prod(shape[0:axis]), np.prod(shape[axis:])]
node['dim'] = np.array(dim)
node.out_node().shape = np.array(dim) |
import random
class RockPaperScissors:
def __init__(self):
self.all_options = ['rock', 'fire', 'scissors', 'snake', 'human', 'tree', 'wolf', 'sponge',
'paper', 'air', 'water', 'dragon', 'devil', 'lightning', 'gun']
self.default_options = ['rock', 'scissors', 'paper']
self.options = []
def test_win(self, user_choice, auto_choice):
if user_choice == auto_choice:
print(f"There is a draw ({auto_choice})")
return 50
else:
win_options = []
i = self.all_options.index(user_choice) + 1
while len(win_options) < (len(self.all_options) - 1) // 2:
if i >= len(self.all_options):
i = 0
win_options.append(self.all_options[i])
i += 1
if auto_choice not in win_options:
print(f"Sorry, but the computer chose {auto_choice}")
return 0
else:
print(f"Well done. The computer chose {auto_choice} and failed")
return 100
@staticmethod
def get_score(player):
players = {}
f = open('rating.txt')
for line in f.readlines():
players[line.split()[0]] = line.split()[1]
f.close()
return int(players[player]) if player in players else 0
def play(self):
user_name = input("Enter your name: ")
score = self.get_score(user_name)
print("Hello,", user_name)
user_options = input().strip()
if user_options != '':
self.options = user_options.split(',')
else:
self.options = self.default_options
print("Okay, let's start")
while True:
user_input = input()
if user_input == '!exit':
print("Bye!")
break
elif user_input == '!rating':
print(f"Your rating: {score}")
elif user_input in self.options:
score += self.test_win(user_input, random.choice(self.options))
else:
print("Invalid input")
my_game = RockPaperScissors()
my_game.play()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.