text string | size int64 | token_count int64 |
|---|---|---|
from .datetime_ import * | 25 | 9 |
# -*- coding: utf-8 -*-
from collections import Iterable
import click
def add_commands(click_group, commands):
if not isinstance(click_group, click.core.Group):
raise TypeError(
f"add_commands() expects click.core.Group for click_group, got {type(click_group)}"
)
if not isinstance(commands, Iterable):
raise TypeError(
f"add_commands() expects an Iterable type for commands, got {type(commands)}"
)
for command in commands:
if not isinstance(command, click.core.Command) and not isinstance(
command, click.core.Group
):
raise TypeError(
f"commands must be of type click.core.Command or click.core.Group, got {type(command)}"
)
click_group.add_command(command)
| 811 | 224 |
"""Script to generate a Snakebids project."""
import argparse
import os
from pathlib import Path
from cookiecutter.main import cookiecutter
import snakebids
from snakebids.app import SnakeBidsApp
from snakebids.cli import add_dynamic_args
def create_app(_):
cookiecutter(os.path.join(snakebids.__path__[0], "project_template"))
def create_descriptor(args):
# pylint: disable=unsubscriptable-object
app = SnakeBidsApp(args.app_dir.resolve())
add_dynamic_args(app.parser, app.config["parse_args"], app.config["pybids_inputs"])
app.create_descriptor(args.out_path)
print(f"Boutiques descriptor created at {args.out_path}")
def gen_parser():
parser = argparse.ArgumentParser(
description="Perform administrative Snakebids tasks."
)
subparsers = parser.add_subparsers(required=True, dest="command")
parser_create = subparsers.add_parser("create", help="Create a new Snakebids app.")
parser_create.set_defaults(func=create_app)
parser_boutiques = subparsers.add_parser(
"boutiques", help="Create a Boutiques descriptor for an existing Snakebids app."
)
parser_boutiques.add_argument(
"out_path",
help="Path for the output Boutiques descriptor. Should be a .json file.",
type=Path,
)
parser_boutiques.add_argument(
"--app_dir",
help="Location of the Snakebids app. Defaults to the current directory.",
type=Path,
default=".",
)
parser_boutiques.set_defaults(func=create_descriptor)
return parser
def main():
"""Invoke Cookiecutter on the Snakebids project template."""
parser = gen_parser()
args = parser.parse_args()
args.func(args)
| 1,706 | 545 |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 23 21:54:45 2016
@author: ringoyen
"""
import numpy as np
import unittest
class tester(unittest.TestCase):
def test_low_pass_filter(self):
"""
Unit Test for a moving averager (convolution) to serve as a low pass filter to remove noise
:return:
"""
from estimateHeartRate import low_pass_filter
import numpy as np
# define inputs for function
ecg_data_1 = np.array([1, 2, 3, 4, 5, 6, 7])
ecg_data_2 = np.array([2, 2, 2, 2, 2, 2, 2])
# define outputs for function
ecg_clean_1 = np.array([1.2, 2., 3., 4., 5., 4.4, 3.6])
ecg_clean_2 = np.array([1.2, 1.6, 2., 2., 2., 1.6, 1.2])
# Place inputs in function
clean_ecg_1 = low_pass_filter(ecg_data_1)
clean_ecg_2 = low_pass_filter(ecg_data_2)
# Unit Tests
self.assertEquals(np.all(ecg_data_1),np.all(clean_ecg_1), msg = 'low pass filter does not work!')
self.assertEquals(np.all(ecg_data_2), np.all(clean_ecg_2), msg='low pass filter does not work!')
self.assertEquals(len(ecg_data_1),len(clean_ecg_1), msg = 'low pass filter gives wrong output length!')
def test_remove_dc_offset(self):
"""
Unit Test for removing DC offset from data
:return:
"""
from estimateHeartRate import remove_dc_offset
import numpy as np
# define inputs for function
a = np.array([5.0,5.0,5.0,5.0])
b = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
c = np.array([3.45, 2.46, 4.32, 5.56])
# Define known output of function
ecgKnown_1 = [0.0, 0.0, 0.0, 0.0]
ecgKnown_2 = [-2.0, -1.0, 0.0, 1.0, 2.0]
ecgKnown_3 = [-0.4975, -1.4875, 0.3725, 1.61]
# Place values in function
noOffset_1 = remove_dc_offset(a)
noOffset_2 = remove_dc_offset(b)
noOffset_3 = remove_dc_offset(c)
self.assertEqual(np.all(ecgKnown_1), np.all(noOffset_1), msg ='ECG values incorrect')
self.assertEqual(np.all(ecgKnown_2), np.all(noOffset_2), msg='ECG values incorrect')
self.assertEqual(np.all(ecgKnown_3),np.all(noOffset_3), msg = 'ECG values incorrect')
self.assertEqual(len(ecgKnown_1), len(noOffset_1))
self.assertEqual(len(ecgKnown_2), len(noOffset_2))
def test_findThreshold(self):
"""
This tests the ability of the function to determine the maximum value in a given data
stream, and use it to determine the threshold value
"""
from estimateHeartRate import findThreshold
# Define inputs for function
a = np.linspace(1, 1000, 1000)
f_s_1 = 100
f_s_2 = 200
# Define known output for function
# should be 750, but changed to 800 to check git
threshknown_1 = 425
threshknown_2 = 850
# Place values in function
thresh_1 = findThreshold(a, f_s_1)
thresh_2 = findThreshold(a, f_s_2)
self.assertEqual(threshknown_1,thresh_1, msg='Threshold not correct')
self.assertEqual(threshknown_2,thresh_2, msg='Threshold not correct')
def test_find_instantaneous_heart_rate(self):
"""
This tests the ability of the function to determine the times and ECG values
when there is an actual heartbeat occuring. This is supposed to happen at the peaks (max values) of the
traces. There are 2 sets of test arrays I am using
"""
from estimateHeartRate import find_instantaneous_heart_rate
#Define inputs vectors for function
ecg_vals_1 = [1, 1, 1, 5, 6, 1, 1, 10, 1, 1, 11, 5]
ecg_time_1 = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]
ecg_vals_2 =[1,4.5,1,1,1,6,1]
ecg_time_2 =[1.,2.,3.,4.,5.,6.,7.]
ecg_threshold = 4
# Define known outputs of function
instant_hr_1 = [20.]
instant_hr_2 = [15.]
# Define function with inputs
actual_hr_1 = find_instantaneous_heart_rate(ecg_vals_1,ecg_time_1,ecg_threshold)
actual_hr_2 = find_instantaneous_heart_rate(ecg_vals_2,ecg_time_2,ecg_threshold)
self.assertEqual(instant_hr_1,actual_hr_1, 'msg = instant hr is wrong!')
self.assertEqual(instant_hr_2, actual_hr_2, 'msg = instant hr is wrong!')
if __name__ == '__main__':
unittest.main() | 4,500 | 1,706 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://github.com/shenwei356/bio_scripts
from __future__ import print_function
import argparse
import gzip
import logging
import os
import re
import sys
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
def parse_args():
parser = argparse.ArgumentParser(description="Translate DNA to peptide")
parser.add_argument("-v", "--verbose", help='verbosely print information',
action="count", default=0)
group = parser.add_mutually_exclusive_group()
group.add_argument("--stdin", action="store_true",
help='read from stdin, one sequence per line')
group.add_argument('-i', '--infile', type=str,
help='file name should like this: infile.[fasta|fa|fastq|fq][.gz]')
parser.add_argument('-f', '--format', type=str, # default='fasta',
help='seqence format: fasta |fastq [fasta]')
parser.add_argument('-t', '--table', type=int, default=1,
help='genetic code table (detail: http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi ) [1]')
args = parser.parse_args()
if not (args.stdin or args.infile):
sys.stderr.write("option --stdin or -i should be given\n")
sys.exit(1)
if args.format and not args.format in ['fasta', 'fastq']:
sys.stderr.write("option -f | --format should be 'fasta' or 'fastq'\n")
sys.exit(1)
if args.stdin and not args.format:
sys.stderr.write("option -f | --format should be given when --stdin is set.\n")
sys.exit(1)
return args
if __name__ == '__main__':
args = parse_args()
file, seq_format, fh = args.infile, args.format, None,
if file:
if not seq_format:
found = re.search(r'(?i)(fasta|fa|fastq|fq)(.gz)?$', file)
if not found:
print("invalid file name suffix.\nfile name should like this: infile.[fasfa|fa|fastq|fq][.gz]",
file=sys.stderr)
sys.exit(1)
seq_format, is_gz = found.groups()
if seq_format == 'fa':
seq_format = 'fasta'
if seq_format == 'fq':
seq_format = 'fastq'
fh = gzip.open(file, 'rt') if file.endswith('.gz') else open(file, 'r')
else:
fh = sys.stdin
seq_format = args.format
for seq in SeqIO.parse(fh, seq_format):
SeqIO.write([SeqRecord(seq.seq.translate(table=args.table), id=seq.id, description=seq.description)], sys.stdout, 'fasta')
fh.close()
| 2,602 | 859 |
from Simulation.Network.Actions import send_message
class Network:
def __init__(self, proposers, acceptors):
self.queue = []
self.proposers = proposers
self.acceptors = acceptors
def queue_message(self, message):
self.queue.append(message)
def extract_message(self):
for message in self.queue:
if not message.destination.failed and not message.source.failed:
self.queue.remove(message)
return message
def deliver_messsage(self, message):
print(f'{message.source} -> {message.destination} {message}', end='')
send_message[message.message_type](self, message)
| 682 | 188 |
from model.contact import Contacts
def test_contacts_list(app, db):
ui_list = app.contact.get_contacts_list()
def clean(contact):
return (Contacts(id=id, firstname=contact.firstname, middlename=contact.middlename, lastname=contact.lastname, nickname=contact.nickname,
company=contact.company, title=contact.title, address=contact.address, homephone=contact.homephone, mobilephone=contact.mobilephone, workphone=contact.workphone,
email=contact.email, email2=contact.email2, email3=contact.email3))
db_list = db.get_contacts_list()
assert sorted(ui_list, key=Contacts.id_or_max) == sorted(db_list, key=Contacts.id_or_max) | 697 | 218 |
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
is_author = models.BooleanField(default=False, verbose_name='نویسنده', help_text='نشان میدهد که آیا این کاربر میتواند مطلب ارسال کند یا خیر.')
premium_date = models.DateTimeField(default=timezone.now, verbose_name='پایان اشتراک')
class Meta:
verbose_name = 'کاربر'
verbose_name_plural = 'کاربران'
def __str__(self):
return self.username
def premium_days_remaining(self):
return (self.premium_date - timezone.now()).days
premium_days_remaining.short_description = 'مدت زمان باقیمانده از اشتراک ویژه'
def has_premium(self):
return self.premium_date > timezone.now()
has_premium.boolean = True
has_premium.short_description = 'اشتراک ویژه' | 859 | 357 |
import unittest
import os
from typing import Any, Text, NoReturn, Set, Union
from parameterized import parameterized
from pyfilter import FilterContext
from pyfilter import TextFilter
class TestFilter(unittest.TestCase):
def setUp(self) -> Any:
self.any_inclusion_keywords: Set[Text] = {'dog', 'cat'}
self.all_inclusion_keywords: Set[Text] = {'plane', 'car'}
self.exclusion_keywords: Set[Text] = {'red', 'grassy'}
self.regex_string: Text = '^[A-Za-z]'
self.filter: TextFilter = TextFilter.new_filter(
any_inclusion_keywords=self.any_inclusion_keywords,
all_inclusion_keywords=self.all_inclusion_keywords,
exclusion_keywords=self.exclusion_keywords,
regex_string=self.regex_string
)
def test_init(self) -> NoReturn:
self.assertEqual(self.filter.any_inclusion_filter.keywords, list(self.any_inclusion_keywords),
'The any_inclusion_keywords are different than the expected LIST of STRINGS of input data')
self.assertEqual(self.filter.all_inclusion_filter.keywords, list(self.all_inclusion_keywords),
'The all_inclusion_keywords are different than the expected LIST of STRINGS of input data')
self.assertEqual(self.filter.exclusion_filter.keywords, list(self.exclusion_keywords),
'The exclusion_keywords are different than the expected LIST of STRINGS of input data')
self.assertEqual(self.filter.regex_filter.regex.pattern, self.regex_string,
'The regex pattern is different than expected')
expected_default_context = FilterContext(casefold=True)
self.assertEqual(self.filter.default_context, expected_default_context,
'The default context is different from the expected (casefold=True)')
def test_update_keywords(self) -> NoReturn:
new_any_inclusion_keywords = []
new_all_inclusion_keywords = []
new_exclusion_keywords = []
self.filter.update_keywords(
any_inclusion_keywords=new_any_inclusion_keywords,
all_inclusion_keywords=new_all_inclusion_keywords,
exclusion_keywords=new_exclusion_keywords
)
self.assertEqual(self.filter.any_inclusion_filter.keywords,
list(self.any_inclusion_keywords) + list(new_any_inclusion_keywords),
'Incorrect any_inclusion_keywords after keyword update')
self.assertEqual(self.filter.all_inclusion_filter.keywords,
list(self.all_inclusion_keywords) + list(new_all_inclusion_keywords),
'Incorrect all_inclusion_keywords after keyword update')
self.assertEqual(self.filter.exclusion_filter.keywords,
list(self.exclusion_keywords) + list(new_exclusion_keywords),
'Incorrect exclusion_keywords after keyword update')
@parameterized.expand([(['new_exclusion', 'kw'],),
(None,)])
def test_set_keywords(self, new_exclusion_keywords: Union[Text, None]):
new_any_inclusion_keywords = ['new', 'keywords']
new_all_inclusion_keywords = []
new_regex_str = r'[A-Za-z0-9]'
self.filter.set_keywords(
any_inclusion_keywords=new_any_inclusion_keywords,
all_inclusion_keywords=new_all_inclusion_keywords,
exclusion_keywords=new_exclusion_keywords,
regex_string=new_regex_str
)
self.assertEqual(self.filter.any_inclusion_filter.keywords,
new_any_inclusion_keywords,
'Incorrect any_inclusion_keywords after replacing keywords')
self.assertEqual(self.filter.all_inclusion_filter.keywords,
[],
'Incorrect all_inclusion_keywords after replacing keywords')
self.assertEqual(self.filter.exclusion_filter.keywords,
new_exclusion_keywords or list(self.exclusion_keywords),
'Incorrect exclusion_keywords after replacing keywords')
self.assertEqual(self.filter.regex_filter.regex.pattern, new_regex_str,
'Failed to set new regex pattern')
def test_delete_keywords(self) -> NoReturn:
any_inclusion_keywords_to_delete = ['dog']
all_inclusion_keywords_to_delete = ['nonexistent']
self.filter.delete_keywords(
any_inclusion_keywords=any_inclusion_keywords_to_delete,
all_inclusion_keywords=all_inclusion_keywords_to_delete,
clear_regex=True
)
self.assertEqual(self.filter.any_inclusion_filter.keywords,
['cat'],
'Incorrect any_inclusion_keywords after deleting keywords')
self.assertEqual(self.filter.all_inclusion_filter.keywords,
list(self.all_inclusion_keywords),
'Incorrect all_inclusion_keywords after deleting keywords')
self.assertEqual(self.filter.exclusion_filter.keywords,
list(self.exclusion_keywords),
'Incorrect exclusion_keywords after deleting keywords')
self.assertEqual(self.filter.regex_filter.regex, None,
'Failed to delete regex pattern')
@parameterized.expand([("Planes and cars don't allow dogs", True, False),
("Dogs and cats but not the other keywords", False, False),
("Well we have a cat in the car but on on the red plane", False, False),
("The plane carries cats and cars", True, True),
("Just a car and a plane but no pets", False, False),
('123regex fail filter plane cats cars', False, False)])
def test_singular_filter(self, input_string: Text,
expected_with_casefold: bool, expected_without_casefold: bool):
self.assertEqual(self.filter.filter(input_string, casefold=True), expected_with_casefold)
self.assertEqual(self.filter.filter(input_string, casefold=False), expected_without_casefold)
def test_multi_filter(self):
input_list = ['cat plane car', 'dog cat', 'cat plane car grassy', '']
result = self.filter.multi_filter(input_list)
expected_result = ['cat plane car']
self.assertEqual(result, expected_result)
@parameterized.expand([('passing_file.txt', True, True),
('casefold_passing_file.txt', True, False),
('failing_file_1.txt', False, False),
('failing_file_2.txt', False, False),
('failing_file_3.txt', False, False)])
def test_file_filter(self, filename: Text,
expected_with_casefold: bool, expected_without_casefold: bool):
fp = os.path.join('test_files', filename)
for casefold in (True, False):
for safe in (True, False):
result = self.filter.file_filter(fp, safe=safe, casefold=casefold)
expected = expected_with_casefold if casefold else expected_without_casefold
self.assertEqual(result, expected)
| 7,345 | 2,026 |
#!/usr/bin/env python3
"""James Bond Theme, Composed by Monty Norman
Adapted for Roomba by Zach White
"""
from time import sleep
from pyroombaadapter import PyRoombaAdapter
notes0 = (
# note, duration
'B3', '1/2',
'C4', '1/2',
'C#4', '1/2',
'C4', '1/2',
'B3', '1/2',
'C4', '1/2',
'C#4', '1/2',
'C4', '1/2',
)
notes1 = ( # 3 times
'E4', '1/8',
'F#4', '1/16',
'F#4', '1/16',
'F#4', '1/8',
'F#4', '1/4',
'E4', '1/8',
'E4', '1/8',
'E4', '1/8',
'E4', '1/8',
'G4', '1/16',
'G4', '1/16',
'G4', '1/8',
'G4', '1/4',
'F#4', '1/8',
'F#4', '1/8',
'F#4', '1/8',
)
notes2 = (
'D#5', '1/8',
'D5', '1/2',
'B4', '1/8',
'A4', '1/8',
'B4', '1'
)
notes3 = (
'E4', '1/8',
'G4', '1/4',
'D#5', '1/8',
'D5', 'd1/4',
'G4', '1/8',
'A#4', '1/8',
'B4', 'd1/2',
'G4', '1/4',
'A4', '1/16',
'G4', '1/16',
'F#4', 'd1/4',
'B3', '1/8',
'E4', '1/8',
'C#4', '1'
)
PORT = "/dev/ttyUSB0"
roomba = PyRoombaAdapter(PORT)
roomba.change_mode_to_full()
print('Sending song')
roomba.send_song_cmd(0, notes0, 5)
roomba.send_song_cmd(1, notes1, 5)
roomba.send_song_cmd(2, notes2, 5)
roomba.send_song_cmd(3, notes3, 5)
for song_num in 0, 1, 1, 1, 2, 1, 1, 1, 2, 0, 3, 3, 0, 2:
print('Playing segment', song_num)
roomba.send_play_cmd(song_num)
# Wait for it to start playing
while roomba.readings['song_playing'] != 1:
sleep(0.05)
print('Segment Started')
# Wait for it to stop playing
while roomba.readings['song_playing'] != 0:
sleep(0.005)
print('Segment Ended')
print('Song Ended')
roomba.turn_off_power()
| 1,694 | 893 |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.views import generic
from . import models
def profile_view(request):
return render(request, "users/profile.html")
| 227 | 61 |
from pathlib import Path
import sys
import time
# add parent dir of imxpy
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from easydict import EasyDict as edict
import pytest
from imx_client import IMXClient
from imx_objects import *
def random_number():
import random
return random.randint(0, 100000000000000000000000000000000000)
@pytest.fixture
def random_str():
return str(random_number())
@pytest.fixture
def acc1():
acc = edict()
acc.pk = "4c4b2554e43b374f4cafdd5adaeea5e9aff9b3be54d329bc939752bb747294b9"
acc.addr = "0x77406103701907051070fc029e0a90d5be82f76c"
return acc
@pytest.fixture
def acc2():
acc = edict()
acc.pk = "ac5d52cc7f75e293ecf2a95f3fafef23c9f5345b4a434ed5bacffccbdbe944fd"
acc.addr = "0xea047d1919b732a4b9b12337a60876536f4f2659"
return acc
@pytest.fixture
def acc3():
acc = edict()
acc.pk = "bfde975ea5aa3779c7e2f2aade7c2a594b53e32ee23a2ae395927ec5fce4aa4b"
acc.addr = "0xd5f5ad7968147c2e198ddbc40868cb1c6f059c6d"
return acc
@pytest.fixture
def one_eth():
return 1_000_000_000_000_000_000
@pytest.fixture
def half_eth(one_eth):
return one_eth // 2
@pytest.fixture(scope="function")
def client(acc1):
return IMXClient("test", pk=acc1.pk)
@pytest.fixture(scope="function")
def mainnet_client():
return IMXClient("main")
@pytest.fixture(scope="function")
def client2(acc2):
return IMXClient("test", pk=acc2.pk)
@pytest.fixture(scope="function")
def project_id(client, acc1):
params = CreateProjectParams(
name="test_proj", company_name="test_company", contact_email="test@test.com"
)
res = client.create_project(params)
res = res.result()
return res["result"]["id"]
@pytest.fixture(scope="function")
def random_addr():
import random
allowed = "abcdef0123456789"
addr = f"0x{''.join(random.choice(allowed) for _ in range(40))}"
return addr
@pytest.fixture
def contract_addr():
return "0xb72d1aa092cf5b3b50dabb55bdab0f33dfab37b7"
@pytest.fixture
def unregistered_contract_addr():
return "0xb55016be31047c16c951612f3b0f7c5f92f1faf5"
@pytest.fixture(scope="function")
def token_id(client2, acc1, acc2, contract_addr):
_token_id = 0
yield _token_id
params = TransferParams(
sender=acc2.addr,
receiver=acc1.addr,
token=ERC721(token_id=_token_id, contract_addr=contract_addr),
)
client2.transfer(params)
def mint_params(contract_addr, id_, addr):
params = MintParams(
contract_addr=contract_addr,
targets=[
MintTarget(
addr=addr,
tokens=[
MintableToken(
id=id_,
blueprint=str(id_),
),
],
),
],
)
return params
@pytest.fixture(scope="function")
def minted_nft_id(client, acc1, contract_addr):
token_id = random_number()
params = mint_params(contract_addr, token_id, acc1.addr)
res = client.mint(params)
res = res.result()
# wait until the database has applied the state
time.sleep(2)
return token_id
@pytest.fixture(scope="function")
def valid_order_params(client, client2, acc2, contract_addr):
# client1 is in control of the sc therefore he mints to acc2
token_id = random_number()
params = mint_params(contract_addr, token_id, acc2.addr)
res = client.mint(params)
time.sleep(2)
# client2 now has the nft and can create the order which client1 will buy
params = CreateOrderParams(
sender=acc2.addr,
token_sell=ERC721(token_id=token_id, contract_addr=contract_addr),
token_buy=ETH(quantity="0.000001"),
)
res = client2.create_order(params)
res = res.result()
time.sleep(2)
return (res["result"]["order_id"], token_id)
@pytest.fixture
def unregistered_addr():
return "0xd2Bf8229D98716abEA9D22453C5C5613078B2c46"
@pytest.fixture
def erc20_contract_addr():
return "0x4c04c39fb6d2b356ae8b06c47843576e32a1963e"
@pytest.fixture
def gods_unchained_addr():
return "0xacb3c6a43d15b907e8433077b6d38ae40936fe2c"
@pytest.fixture
def gods_addr():
return "0xccc8cb5229b0ac8069c51fd58367fd1e622afd97"
| 4,263 | 1,856 |
from .source import SourceTwilioSinger
__all__ = ["SourceTwilioSinger"]
| 73 | 24 |
from typing import List
import numpy as np
from off_policy_rl.utils.selection_method import SelectionMethod
class Epoch:
def __init__(
self,
number_episodes: int,
selection_methods: List[SelectionMethod],
probabilities: List[float] = None
):
self.number_episodes = number_episodes
if len(selection_methods) > 1 and len(selection_methods) != len(probabilities):
raise AssertionError("The number of Selection Methods must match the number of probabilities")
self.selection_methods = selection_methods
if probabilities is not None:
if sum(probabilities) != 1.0:
raise AssertionError("The list of probabilities must add to 1.0"
" (current sum: {})".format(sum(probabilities)))
self.probabilities = probabilities
def get_selection_method(self) -> SelectionMethod:
if len(SelectionMethod) > 1:
return np.random.choice(self.selection_methods, p=self.probabilities)
return self.selection_methods[-1]
| 1,088 | 288 |
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': 5432
}
}
REDIS_HOST = '18.215.184.182'
REDIS_PORT = 6379
REDIS_PASSWORD = 'vwTe6yLaw5hX9dIdbfW0'
REDIS_DB = 0
| 306 | 143 |
"""
A deterministic decoder.
"""
import numpy as np
import sys
import os.path as osp
from collections import defaultdict
from warnings import warn
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
# We include the path of the toplevel package in the system path so we can always use absolute imports within the package.
toplevel_path = osp.abspath(osp.join(osp.dirname(__file__), '..'))
if toplevel_path not in sys.path:
sys.path.insert(1, toplevel_path)
from model.base_decoder import BaseDecoder # noqa: E402
from util.error import InvalidArgumentError # noqa: E402
__author__ = "Tom Pelsmaeker"
__copyright__ = "Copyright 2018"
class DeterministicDecoder(BaseDecoder):
"""A deterministic decoder, i.e. a RNN with next-word prediction objective.
Args:
device(torch.device): the device (cpu/gpu) on which the model resides.
seq_len(int): maximum length of sequences passed to the model.
kl_step(int): step size of linear kl weight increment during training of the model.
word_p(float): probability of dropping a word, i.e. mapping it to <unk>, before decoding.
parameter_p(float): probability of dropping a row in the weight layers, using Gal's dropout on non-rec layers.
var_mask(boolean): whether to use a different parameter dropout mask at every timestep.
unk_index(int): index of the <unk> token in a one-hot representation.
css(boolean): whether to use CSS softmax approximation.
N(int): number of sequences in the dataset, for the regularization weight.
rnn_type(str): which RNN to use. [GRU, LSTM] are supported.
v_dim(int): size of the vocabulary.
x_dim(int): size of input embeddings.
h_dim(int): size of hidden layers of the RNN.
l_dim(int): number of layers of the RNN.
"""
def __init__(self, device, seq_len, word_p, parameter_p, drop_type, unk_index, css, sparse, N, rnn_type,
tie_in_out, v_dim, x_dim, h_dim, s_dim, l_dim):
super(DeterministicDecoder, self).__init__(device, seq_len, word_p, parameter_p, drop_type, unk_index, css, N,
rnn_type, v_dim, x_dim, h_dim, s_dim, l_dim)
self.tie_in_out = tie_in_out
# The model embeds words and passes them through the RNN to get a probability of next words.
self.emb = nn.Embedding(v_dim, x_dim, sparse=bool(sparse))
# We currently support GRU and LSTM type RNNs
if rnn_type == "GRU":
if self.drop_type in ["varied", "shared"]:
# Varied and shared dropout modes only drop input and output layer. Shared shares between timesteps.
self.grnn = nn.GRU(x_dim, h_dim, l_dim, batch_first=True)
else:
self.grnn = nn.ModuleList([nn.GRUCell(x_dim, h_dim, 1)])
self.grnn.extend([nn.GRUCell(h_dim, h_dim, 1)
for _ in range(l_dim - 1)])
elif rnn_type == "LSTM":
if self.drop_type in ["varied", "shared"]:
self.grnn = nn.LSTM(x_dim, h_dim, l_dim, batch_first=True)
else:
self.grnn = nn.ModuleList([nn.LSTMCell(x_dim, h_dim, 1)])
self.grnn.extend([nn.LSTMCell(h_dim, h_dim, 1)
for _ in range(l_dim - 1)])
self.linear = nn.Linear(h_dim, v_dim)
@property
def linear(self):
return self._linear
@linear.setter
def linear(self, val):
self._linear = val
if self.tie_in_out:
if self.h_dim != self.x_dim:
raise InvalidArgumentError("h_dim should match x_dim when tying weights.")
self._linear.weight = self.emb.weight
def forward(self, data, log_likelihood=False, extensive=False):
"""Forward pass through the decoder which returns a loss and prediction.
Args:
data(list of torch.Tensor): a batch of datapoints, containing at least a tensor of sequences and optionally
tensors with length information and a mask as well, given variable length sequences.
Returns:
losses(dict of torch.FloatTensor): computed losses, averaged over the batch, summed over the sequence.
pred(torch.LongTensor): most probable sequences given the data, as predicted by the model.
"""
x_in, x_len, x_mask = self._unpack_data(data, 3)
losses = defaultdict(lambda: torch.tensor(0., device=self.device))
# Before decoding, we map a fraction of words to <UNK>, weakening the Decoder
self.word_dropout.sample_mask(self.word_p, x_in.shape)
x_dropped = x_in.clone()
x_dropped[self.word_dropout._mask == 0] = self.unk_index
x = self.emb(x_dropped[:, :-1])
scores = self._rnn_forward(x, x_len)
# Compute loss, averaged over the batch, but summed over the sequence
if self.css and self.training:
loss = self._css(scores, x_in[:, 1:])
else:
loss = self.reconstruction_loss(scores.contiguous().view(
[-1, scores.shape[2]]), x_in[:, 1:].contiguous().view([-1])).view(scores.shape[0], scores.shape[1])
if x_len is not None:
# If we had padded sequences as input, we need to mask the padding from the loss
losses["NLL"] = torch.sum(torch.mean(loss * x_mask[:, 1:], 0))
else:
losses["NLL"] = torch.sum(torch.mean(loss, 0))
# We also return the predictions, i.e. the most probable token per position in the sequences
pred = torch.max(scores.detach(), dim=2)[1]
# We use L2-regularization scaled by dropout on the network layers (Gal, 2015)
losses["L2"] = self._l2_regularization()
if log_likelihood:
losses["NLL"] = losses["NLL"].unsqueeze(0)
if extensive:
return losses, pred, x.new_tensor([[1, 1]]), x.new_tensor([[1, 1]]), x.new_tensor([[1, 1]]), \
x.new_tensor([[1, 1]]), x.new_tensor([[1]]), x.new_tensor([[1]])
else:
return losses, pred
def _rnn_forward(self, x, x_len):
"""Recurrent part of the forward pass. Decides between fast or slow based on the dropout type."""
# Drop rows of the input
shape = torch.Size(x.shape) if self.var_mask else torch.Size([x.shape[0], 1, self.x_dim])
h = self.parameter_dropout_in(x, self.parameter_p, shape=shape)
# We have to run a (slow) for loop to use recurrent dropout
if self.drop_type == "recurrent":
# Sample fixed dropout masks for every timestep
shape = torch.Size([x.shape[0], int(self.h_dim/self.l_dim)])
for i in range(self.l_dim):
self.parameter_dropout_hidden[i].sample_mask(self.parameter_p, shape)
self.parameter_dropout_out[i].sample_mask(self.parameter_p, shape)
if self.rnn_type == "LSTM":
self.parameter_dropout_context[i].sample_mask(self.parameter_p, shape)
# Forward passing with application of dropout
scores = []
if self.rnn_type == "GRU":
h_p = list(torch.unbind(self._init_hidden(x.shape[0])))
else:
h_p = list(torch.unbind(self._init_hidden(x.shape[0])))
c_p = list(torch.unbind(self._init_hidden(x.shape[0])))
for j in range(x.shape[1]):
h_j = h[:, j, :]
for i, grnn in enumerate(self.grnn):
if self.rnn_type == "GRU":
h_j = grnn(h_j, h_p[i])
h_p[i] = self.parameter_dropout_hidden[i].apply_mask(h_j)
else:
h_j, c_j = grnn(h_j, (h_p[i], c_p[i]))
h_p[i] = self.parameter_dropout_hidden[i].apply_mask(h_j)
c_p[i] = self.parameter_dropout_context[i].apply_mask(c_j)
h_j = self.parameter_dropout_out[i].apply_mask(h_j)
scores.append(self.linear(h_j))
scores = torch.stack(scores, 1)
# For the input/output dropout we can use fast CUDA RNNs
else:
# To h: [batch_size, seq_len, h_dim] we apply the same mask: [batch_size, 1, h_dim] at every timestep
shape = torch.Size(h.shape) if self.var_mask else torch.Size([x.shape[0], 1, self.h_dim])
if x_len is not None:
h = pack_padded_sequence(h, x_len - 1, batch_first=True)
h, _ = self.grnn(h)
if x_len is not None:
h = pad_packed_sequence(h, batch_first=True, total_length=x.shape[1])[0]
# We also apply the same dropout mask to every timestep in the output hidden states
h = self.parameter_dropout_out(h, self.parameter_p, shape=shape)
scores = self.linear(h)
return scores
def sample_sequences(self, x_i, seq_len, eos_token, pad_token, sample_softmax=False):
"""'Sample' sequences from the (learned) decoder given a prefix of tokens.
Args:
x_i(torch.Tensor): initial tokens or sequence of tokens to start generating from.
seq_len(int): length of the sampled sequences after the prefix. Defaults to preset seq_len.
eos_token(int): the end of sentence indicator.
pad_token(int): the token used for padding sentences shorter than seq_len.
Returns:
list: a list of sampled sequences of pre-defined length.
"""
if seq_len is not None:
self.seq_len = seq_len
else:
warn("No sequence length provided, preset seq_len will be used.")
with torch.no_grad():
if sample_softmax:
h_i = None
c_i = None
else:
h_i = self._sample_hidden(x_i.shape[0])
c_i = self._sample_hidden(x_i.shape[0])
samples = []
# Sampling pass through the sequential decoder
# The prefix is automatically consumed by the first step through the RNN
for i in range(x_i.shape[1]):
samples.append(x_i[:, i].squeeze().tolist())
for i in range(self.seq_len):
x_i = self.emb(x_i)
if self.rnn_type == "GRU":
h, h_i = self.grnn(x_i, h_i)
else:
h, h_i, c_i = self.grnn(x_i, (h_i, c_i))
# scores: [batch_size, h_dim]
scores = self.linear(h[:, -1])
# x_i: [batch_size, 1]
if sample_softmax:
# Sample the output Bernoulli
x_i = torch.multinomial(F.softmax(scores, 1), 1)
else:
# Argmax based on stochasticity from hidden
x_i = torch.max(scores, dim=1, keepdim=True)[1]
samples.append(x_i.squeeze().tolist())
# Pad samples after the first <eos> token
samples = np.array(samples).T
eos_spot = np.argwhere(samples == eos_token)
prev_row = -1
for spot in eos_spot:
if spot[0] != prev_row:
try:
samples[spot[0], spot[1]+1:] = pad_token
except IndexError:
pass
else:
pass
prev_row = spot[0]
return list(samples)
def _sample_hidden(self, batch_size):
"""Sample the hidden state of a GRU RNN from a standard normal."""
return torch.normal(mean=torch.zeros((self.l_dim, batch_size, self.h_dim), device=self.device))
| 11,761 | 3,733 |
# Copyright 2016 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from trove.tests.scenario import runners
from trove.tests.scenario.runners.test_runners import SkipKnownBug
from trove.tests.scenario.runners.test_runners import TestRunner
class InstanceForceDeleteRunner(TestRunner):
def __init__(self):
super(InstanceForceDeleteRunner, self).__init__(sleep_time=1)
self.build_inst_id = None
def run_create_build_instance(self, expected_states=['NEW', 'BUILD'],
expected_http_code=200):
if self.is_using_existing_instance:
raise SkipTest("Using an existing instance.")
name = self.instance_info.name + '_build'
flavor = self.get_instance_flavor()
client = self.auth_client
inst = client.instances.create(
name,
self.get_flavor_href(flavor),
self.instance_info.volume,
nics=self.instance_info.nics,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
self.assert_client_code(client, expected_http_code)
self.assert_instance_action([inst.id], expected_states)
self.build_inst_id = inst.id
def run_delete_build_instance(self, expected_http_code=202):
if self.build_inst_id:
client = self.admin_client
client.instances.force_delete(self.build_inst_id)
self.assert_client_code(client, expected_http_code)
def run_wait_for_force_delete(self):
raise SkipKnownBug(runners.BUG_FORCE_DELETE_FAILS)
# if self.build_inst_id:
# self.assert_all_gone([self.build_inst_id], ['SHUTDOWN'])
| 2,316 | 723 |
import pytest
provider = 'twilio'
class TestTwilio:
def test_twilio_metadata(self, provider):
assert provider.metadata == {
'base_url': 'https://api.twilio.com/2010-04-01/Accounts/{}/Messages.json',
'name': 'twilio',
'site_url': 'https://www.twilio.com/'
}
@pytest.mark.online
def test_sanity(self, provider):
data = {
'message': 'foo'
}
provider.notify(**data, raise_on_errors=True)
| 489 | 164 |
from pathlib import Path
import tempfile
from unittest.mock import MagicMock
import pytest
import numpy as np
import pandas as pd
from scipy import sparse
import nibabel
import nilearn
from nilearn.datasets import _testing
from nilearn.datasets._testing import request_mocker # noqa: F401
def make_fake_img():
rng = np.random.default_rng(0)
img = rng.random(size=(4, 3, 5))
return nibabel.Nifti1Image(img, np.eye(4))
@pytest.fixture()
def fake_img():
return make_fake_img()
def make_fake_data():
n_voxels, n_components, n_studies, n_terms = 23, 8, 12, 9
rng = np.random.default_rng(0)
difumo_maps = rng.random((n_components, n_voxels))
difumo_maps[rng.binomial(1, 0.3, size=difumo_maps.shape).astype(int)] = 0
difumo_inverse_covariance = np.linalg.pinv(difumo_maps.dot(difumo_maps.T))
difumo_maps = sparse.csr_matrix(difumo_maps)
projections = rng.random((n_studies, n_components))
term_projections = rng.random((n_terms, n_components))
articles_info = pd.DataFrame({"pmid": np.arange(n_studies) + 100})
articles_info["title"] = [
f"title {pmid}" for pmid in articles_info["pmid"]
]
articles_info["pubmed_url"] = [
f"url {pmid}" for pmid in articles_info["pmid"]
]
mask = np.zeros(4 * 3 * 5, dtype=int)
mask[:n_voxels] = 1
mask = mask.reshape((4, 3, 5))
mask_img = nibabel.Nifti1Image(mask, np.eye(4))
doc_freq = pd.DataFrame(
{
"term": ["term_{i}" for i in range(n_terms)],
"document_frequency": np.arange(n_terms),
}
)
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir = Path(temp_dir)
sparse.save_npz(temp_dir / "difumo_maps.npz", difumo_maps)
np.save(
temp_dir / "difumo_inverse_covariance.npy",
difumo_inverse_covariance,
)
np.save(temp_dir / "projections.npy", projections)
np.save(temp_dir / "term_projections.npy", term_projections)
articles_info.to_csv(temp_dir / "articles-info.csv", index=False)
mask_img.to_filename(str(temp_dir / "mask.nii.gz"))
doc_freq.to_csv(
str(temp_dir / "document_frequencies.csv"), index=False
)
archive = _testing.dict_to_archive(
{"neuroquery_image_search_data": temp_dir}
)
return archive
@pytest.fixture(autouse=True)
def temp_data_dir(tmp_path_factory, monkeypatch):
home_dir = tmp_path_factory.mktemp("temp_home")
monkeypatch.setenv("HOME", str(home_dir))
monkeypatch.setenv("USERPROFILE", str(home_dir))
data_dir = home_dir / "neuroquery_data"
data_dir.mkdir()
monkeypatch.setenv("NEUROQUERY_DATA", str(data_dir))
@pytest.fixture(autouse=True, scope="function")
def map_mock_requests(request_mocker):
request_mocker.url_mapping[
"https://osf.io/mx3t4/download"
] = make_fake_data()
return request_mocker
@pytest.fixture(autouse=True)
def patch_nilearn(monkeypatch):
def fake_motor_task(*args, **kwargs):
return {"images": [make_fake_img()]}
monkeypatch.setattr(
nilearn.datasets, "fetch_neurovault_motor_task", fake_motor_task
)
monkeypatch.setattr("webbrowser.open", MagicMock())
| 3,225 | 1,247 |
from core.handler import handle
import os
from datetime import datetime
import psutil
@handle("os/name")
def get_name():
return os.uname().nodename
@handle("os/kernel")
def get_kernel():
return os.uname().release
@handle("os/user")
def get_user():
return psutil.Process().username()
@handle("os/boottime")
def get_boottime():
return datetime.fromtimestamp(psutil.boot_time()).strftime("%Y-%m-%d %H:%M:%S")
@handle("os/processes")
def get_processes():
return psutil.Process().as_dict()
| 515 | 179 |
# coding: utf-8
import traceback
from abc import ABCMeta, abstractmethod
from mall_spider.spiders.actions.executable import Executable
class Action(Executable):
__metaclass__ = ABCMeta
def execute(self, context):
self.on_create(context=context)
self.on_start(context=context)
try:
result = self.do_execute(context=context)
self.on_complete(context=context)
return result
except Exception as e:
import sys
exc_info = sys.exc_info()
self.on_error(context=context, exp=traceback.format_exc())
# raise exc_info[0], exc_info[1], exc_info[2]
raise e
finally:
self.on_destroy(context=context)
@abstractmethod
def do_execute(self, context):
pass
@abstractmethod
def on_create(self, context):
pass
@abstractmethod
def on_start(self, context):
pass
@abstractmethod
def on_error(self, context, exp):
pass
@abstractmethod
def on_complete(self, context):
pass
@abstractmethod
def on_destroy(self, context):
pass
| 1,162 | 338 |
import re
import sys
real_num = r'[+-]?\d+(?:\.\d+)?'
# Falta colocar as os paratenses para identificar o grupo correto
coord = rf'\(({real_num}),\s*({real_num})\)'
for line in sys.stdin:
line = re.sub(coord,r"<point lat='\1', lon='\2' />",line)
if (line):
print(line)
quit()
# Tambem dá para executar assim
coord = rf'\((?P<lat>{real_num}),\s*(?P<lon>{real_num})\)'
for line in sys.stdin:
line = re.sub(coord,r"<point lat='\g<lat>', lon='\g<lon>' />",line)
if (line):
print(line) | 524 | 227 |
import pandas as pd
import numpy as np
def see():
m = pd.read_csv('C:/dag/Expenditure.csv')
#m = pd.read_csv('C:\dag\Expendture.csv')
# print(m.head())
countt= m ['Category'].value_counts(sort=True, ascending=True).to_frame()
print(countt)
pivottable= m.pivot_table(index=['Category'], values=['Myself'], aggfunc='sum')
print(pivottable)
see()
| 375 | 147 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from datetime import datetime
from utils import *
#===== time stamp for experiment file names
timestamp = datetime.now()
timestamp = timestamp.strftime("%d-%m-%Y_%H%M")
script_name = 'main' # main script to be executed
#================================
# args for main script #
#================================
seed= 1 # setting random seed for reproducibility
#===== MODEL =====
#
model_type= 'MLP1'
no_bias= True # don't use biases in layers
make_linear= False # linear activation function (if False, then ReLU)
no_BN= True # disable BatchNorm
NTK_style= True # NTK-style parametrization of the network
base_width= 8
all_widths= [8, 32, 128, 216, 328, 512, 635]
fract_freeze_cl= 0 # allowed fraction of all cl-layer weights that may be frozen
dense_only= False # consider dense models only, no weight freezing
#===== TRAINING =====
#
no_ES= True # disable Early Stopping
train_subset_size= 2048 # train on a subset of the train set
mbs= 256 # mini-batch size
max_epochs= 300 # max number of training epochs
#===== DATASET =====
#
dataset= 'MNIST'
normalize_pixelwise= True
#=== for NTK-style nets, the LR value is width-dependent
# loading optimized LR values for each width from file
if NTK_style: bta_avg_and_lr= torch.load('optimized_LR_for_NTK_style_MLP1.pt')
# NWTF (for "Num. Weights To Freeze") is a dictionary with
# key = width
# val = [(nwtf_cl, nwtf_fc)_1, (nwtf_cl, nwtf_fc)_2, ...]
# i.e., a list of valid combinations of weights to freeze for the respective layer (cl and fc)
if dense_only:
NWTF = {base_width: [(0,0)]}
else:
NWTF = get_NWTF(base_width, all_widths, fract_freeze_cl)
#=== tags for file names
bias_tag='_no_bias' if no_bias else ''
NTK_tag='_NTK_style' if NTK_style else ''
act_fctn='Linear' if make_linear else 'ReLU'
job_configs=[]
for width, val in NWTF.items():
for nwtf_cl,nwtf_fc in val:
cur_base_width=width if nwtf_cl==nwtf_fc else base_width
# compose name for output dir
output_dir = f'{dataset}_{model_type}_{NTK_tag}'
output_dir+= f'_base_{cur_base_width}_width_{width}_{act_fctn}{bias_tag}'
if train_subset_size>0: output_dir+=f'_train_on_{train_subset_size}_samples'
if normalize_pixelwise: output_dir+=f'_pixelwise_normalization'
if NTK_style: # get LR from file
lrkey=f'{cur_base_width}_{width}'
lr=bta_avg_and_lr[lrkey]
else:
lr= 0.1
config ={
'base_width': int(cur_base_width),
'width': int(width),
'lr': lr,
'seed': seed,
'nwtf_cl': int(nwtf_cl),
'nwtf_fc': int(nwtf_fc),
'dataset': dataset,
'normalize_pixelwise': normalize_pixelwise,
'train_subset_size': train_subset_size,
'no_ES': no_ES,
'max_epochs': max_epochs,
'mbs': mbs,
'no_bias': no_bias,
'NTK_style': NTK_style,
'make_linear': make_linear,
'no_BN': no_BN,
'output_dir': output_dir
}
job_configs.append(config)
for config in job_configs:
my_str=f'\npython -m {script_name} '
for k, v in config.items():
if isinstance(v, bool):
if v: my_str+=f'--{k} '
else:
my_str+=f'--{k} {v} '
print(my_str)
| 4,134 | 1,439 |
import json
import sys
from multiprocessing import Pool
DOMAIN = "TM"
def read_file(year):
data = []
with open("%s-%s.json" % (DOMAIN, year), 'rb') as f:
j = json.loads(f.read())
for row in j:
data.append({
'domain': row[1],
'country': row[2],
'country_code': int(row[3]),
'item': row[4],
'item_code': int(row[5]),
'element': row[6],
'element_code': int(row[7]),
'year': int(row[8]),
'units': row[9],
'value': row[10],
'flag': row[11]
})
print j[0]
#print year, len(data)
#print data[100]
return data
if __name__ == "__main__":
pool = Pool(processes=3)
#datas = pool.map(read_file, (2011, 2009, 2008))
datas = []
for year in (2011, 2009, 2008):
datas.append(read_file(year))
data = reduce(lambda a, b: a+b, datas)
| 854 | 348 |
import pandas as pd
import matplotlib.pyplot as plt
import librosa
import seaborn as sns
from sklearn.model_selection import train_test_split
import math
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.metrics import mean_squared_error, mean_absolute_error
import traceback
import statistics
# Regression Model
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import Lars
from sklearn.linear_model import BayesianRidge
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import RANSACRegressor
from pyfiglet import Figlet
from sklearn.model_selection import cross_val_score
from joblib import dump, load
from sklearn.kernel_ridge import KernelRidge
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import VotingRegressor
from sklearn.ensemble import StackingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import cross_val_score
import statistics
from sklearn.model_selection import cross_validate
# Dimensionality reduction
from sklearn.decomposition import PCA
from sklearn import manifold
import numpy as np
from sklearn.model_selection import GridSearchCV
from scipy.special import entr
import random
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn import metrics
from art import *
class EasyRegression:
def __init__(self):
print(text2art('Easy'))
print(text2art('Regression'))
self.seed = 40
self.strategy = None
self.parameterFound = dict()
self.configured = False
self.models = None
# Scalers
self.std = StandardScaler()
self.mmax = MinMaxScaler()
self.random_state = 42
self.feature_set = dict()
self.label_set = dict()
self.groups = None
self.datasets = None
self.label = None
self.flagDataset = False
self.flagGroup = False
self.flagParameterFind = False
self.train_test = None
self.cross_val = None
self.leave_group = None
self.leave_dataset = None
self.stratified = None
def loadFeature(self,feature_file,feature_type,feature_name):
if len(self.feature_set) == 0:
print('-------------------------------')
print(' STEP : Features loading')
print('-------------------------------')
if feature_type not in ['ind','grp']:
print('===> Error: Undefined feature type')
return
else:
try:
if feature_name in self.feature_set.keys():
print('===> Feature with name ',feature_name,' already exist. Choose a different name')
return
else:
tmp = pd.read_csv(feature_file)
if len(self.feature_set) > 0:
first_feat = self.feature_set[list(self.feature_set.keys())[0]]
if tmp.shape[0] != first_feat[2].shape[0]:
print('===> Error: Mismatch in feature size with previously added features ',first_feat[1] )
return
self.feature_set[feature_name] = [feature_type,feature_name,tmp]
print('===> Feature file:',feature_file,' is loaded successfully !')
print('===> Summary:')
print(' #instances:',tmp.shape[0])
print(' #attributes:',tmp.shape[1])
num_cols = tmp.select_dtypes(['int64','float64'])
print(' #numeric-attributes:',num_cols.shape[1])
print('')
return num_cols
except:
print('===> Error occurred while loading the file')
traceback.print_exc()
def loadLabels(self,label_file):
try:
print('-------------------------------')
print(' STEP : Labels loading')
print('-------------------------------')
tmp = pd.read_csv(label_file)
if len(self.feature_set) > 0:
first_feat = self.feature_set[list(self.feature_set.keys())[0]]
if tmp.shape[0] != first_feat[2].shape[0]:
print(' Error: Mismatch in feature size with loaded feature ',first_feat[1] )
return None
for label in tmp.columns:
self.label_set[label] = tmp[label]
print('===> Label file:',label_file,' is loaded successfully !')
print('===> Summary:')
print(' #labels:',len(tmp.columns.tolist()))
print(' labels:', tmp.columns.tolist())
print('')
return tmp
except:
print('===> Error occurred while loading the file:',label_file)
traceback.print_exc()
return None
def feature_name_check(self,feature_name):
if feature_name not in self.feature_set.keys():
print(' Feature name:', feature_name,' is not available.')
return None
def label_name_check(self,label_name):
if label_name not in self.label_set.keys():
print(' Label name:',label_name,' is not available.')
return None
def extractFeatures(self,data,cor=.80):
print('-------------------------------')
print(' STEP : Feature Extraction ')
print('-------------------------------')
correlated_features = set()
features = data
correlation_matrix = features.corr()
for i in range(len(correlation_matrix .columns)):
for j in range(i):
if abs(correlation_matrix.iloc[i, j]) > cor:
colname = correlation_matrix.columns[i]
correlated_features.add(colname)
#print('Correlated Features:')
#print(correlated_features)
features.drop(labels=correlated_features,axis=1,inplace=True)
print('===> ',len(correlated_features),' correlated features are removed.')
print('===> Final features shape:',features.shape)
return features
def findCorrelation(self,label_name=None,sort=True):
if self.dataReady == False:
print('Data is not ready yet for analysis.')
return
if label_name is not None:
if label_name in self.labels.columns:
tmp_features = self.features.copy()
tmp_features[label_name] = self.labels[label_name]
cor_table = tmp_features.corr()
print(' Correlation ')
print(' -------------------------------')
print(cor_table[label_name])
print(' -------------------------------')
else:
if self.labels.shape[1] > 1:
print(' There are more than one label available.')
print(self.labels.columns)
print('Deafult: first column is used to computer correlation')
label_name = self.labels.columns[0]
tmp_features = self.features.copy()
tmp_features[label_name] = self.labels[label_name]
cor_table = tmp_features.corr()
print(' Correlation ')
print(' -------------------------------')
print(cor_table[label_name])
print(' -------------------------------')
def setGroupFeatureLabels(self,feat_labels):
self.group_feature_labels = feat_labels
"""
This function performs group-level feature computation
supported fusions: Dimensionality reduction, Entropy, Gini, Average
"""
def getGroupFeatures(self,data):
group_feature_labels = ['add','del','speak','turns']
features_group = dict()
# iterate for each group-level feature
for grp_feature in group_feature_labels:
tmp = list()
# get all column names similar to grp_feature
for indiv_feature in data.columns:
if grp_feature in indiv_feature:
tmp.append(indiv_feature)
features_group[grp_feature] = tmp.copy()
return features_group
# preparing gini coefficient
def getGINI(self,data):
"""Calculate the Gini coefficient of a numpy array."""
print('-------------------------------')
print(' STEP : Feature Fusion using Gini')
print('-------------------------------')
group_features = self.getGroupFeatures(data)
gini = dict()
for key in group_features.keys():
tmp = data[group_features[key]].values
tmp = tmp + 0.0000001
tmp = np.sort(tmp)
index = np.arange(1,tmp.shape[1]+1)
n = tmp.shape[1]
key = 'grp_gini_'+key
gini[key] = ((np.sum((2 * index - n - 1) * tmp,axis=1)) / (n * np.sum(tmp,axis=1))) #Gini coefficient
gini_features = pd.DataFrame(gini)
return gini_features
# Compute entropy features for individual features
def getEntropy(self,data):
print('-------------------------------')
print(' STEP : Feature Fusion using Entropy')
print('-------------------------------')
group_features = self.getGroupFeatures(data)
entropy = dict()
for key in group_features.keys():
tmp = data[group_features[key]].values
tmp = tmp
tmp_sum = tmp.sum(axis=1,keepdims=True) + .0000000000001
p = tmp/tmp_sum
key = 'grp_entropy_'+key
entropy[key] = entr(p).sum(axis=1)/np.log(2)
entropy_features = pd.DataFrame(entropy)
return entropy_features
"""
Apply dimentionality reduction on features
PCA
"""
def Scaling(self,data,algo):
print('-------------------------------')
print(' STEP : Feature Scaling')
print('-------------------------------')
if algo in ['std','mmax']:
if algo == 'std':
res = pd.DataFrame(self.std.fit_transform(data), columns=data.columns)
print('===> Successfully applied Standard Scaling')
return res
elif algo == 'mmax':
res = pd.DataFrame(self.mmax.fit_transform(data), columns=data.columns)
print('===> Successfully applied MinMax Scaling')
return res
else:
print('===> Error: Unsupported scaling method')
return None
def DimRed(self,algo,data,params=None):
print('-------------------------------')
print(' STEP : Feature fusion using DimRed')
print('-------------------------------')
if algo not in ['pca','mds','isomap','tsne']:
print('===> Erro: Unsupported dimension reduction algorithm specified')
return None
else:
if algo!='pca' and len(params) ==0:
print('===> Error: Specify n_components/n_neighbors parameters')
return None
else:
# Dimensionality reduction
X_train, X_test, y_train, y_test = train_test_split(data,self.label_set[self.label],train_size=.7,random_state=self.seed)
self.pca = PCA(random_state = self.seed)
self.mds = manifold.MDS(n_components=params['n_components'],max_iter=100,n_init=1,random_state = self.seed)
self.isomap = manifold.Isomap(n_neighbors=params['n_neighbors'],n_components=params['n_components'])
self.tsne = manifold.TSNE(n_components=params['n_components'],init='pca',random_state = self.seed)
if algo == 'pca':
self.pca.fit(X_train)
pca_features = self.pca.transform(data)
print('===> Successfully applied PCA')
pca_columns = [None] * pca_features.shape[1]
for k in range(pca_features.shape[1]):
pca_columns[k] = 'pca_' + str(k)
return pd.DataFrame(pca_features,columns=pca_columns)
if algo == 'mds':
self.mds.fit(X_train)
mds_features = self.mds.transform(data)
mds_columns = [None] * mds_features.shape[1]
for k in range(mds_features.shape[1]):
mds_columns[k] = 'mds_' + str(k)
print('===> Successfully applied MDS')
return pd.DataFrame(mds_features,columns=mds_columns)
if algo== 'isomap':
self.isomap.fit(X_train)
isomap_features = self.isomap.transform(data)
print('===> Successfully applied ISOMAP')
isomap_columns = [None] * isomap_features.shape[1]
for k in range(isomap_features.shape[1]):
isomap_columns[k] = 'iso_' + str(k)
return pd.DataFrame(isomap_features,columns=isomap_columns)
if algo=='tsne':
tsne_features = self.tsne.fit_transform(data)
print('===> Successfully applied t-SNE')
tsne_columns = [None] * tsne_features.shape[1]
for k in range(tsne_features.shape[1]):
tsne_columns[k] = 'tsne_' + str(k)
return pd.DataFrame(tsne_features,columns=tsne_columns) ;
def loadConfiguredModules(self,modules):
print('-------------------------------')
print(' STEP : Configured Regression Moduel Loaded')
print('-------------------------------')
self.models = modules
self.configured = True
def regressionModelInitialize(self):
print('-------------------------------')
print(' STEP : Regression Moduel Initialised')
print('-------------------------------')
self.models = dict()
self.params=dict()
self.models['knn'] = KNeighborsRegressor()
self.models['rf'] = RandomForestRegressor(random_state = self.seed)
self.models['ada'] = AdaBoostRegressor(random_state = self.seed)
self.models['gb'] = GradientBoostingRegressor(random_state = self.seed)
self.models['xg'] = XGBRegressor(random_state = self.seed)
self.models['mlp'] = MLPRegressor()
self.models['svm'] = SVR()
self.models['vot'] = VotingRegressor([('knn',self.models['knn']),('ada',self.models['ada']),('rand',self.models['rf']),('svm',self.models['svm'])])
# Preparing parameter for finding optimal parameters
self.params['knn'] ={'n_neighbors':[2,3,4,5],'algorithm':['auto', 'ball_tree', 'kd_tree', 'brute']}
self.params['rf'] = {'max_depth':[2,3,4,5,6],'n_estimators':[50,100,150,200],'min_samples_split':[3,4,5]}
self.params['ada'] = {'learning_rate':[.01,.001,.0001],'n_estimators':[50,100,150,200],'loss':['linear', 'square', 'exponential']}
self.params['gb'] = {'learning_rate':[.01,.001,.0001],'n_estimators':[50,100,150,200],'loss':['ls', 'lad', 'huber', 'quantile'],'min_samples_split':[3,4,5]}
self.params['xg']={'booster':['gbtree', 'gblinear','dart']}
self.params['mlp']={'solver':['lbfgs','sgd','adam'],'activation':['identity', 'logistic', 'tanh', 'relu'],'hidden_layer_sizes':[(5,5,5),(5,4,3),(10,10,5)]}
k=['rbf', 'linear','poly','sigmoid']
c= [1,10,100,.1]
g=[.0001,.001,.001,.01,.1]
self.params['svm']=dict(kernel=k, C=c, gamma=g)
print('-------------------------------------------')
print('===> K-Nearest Neighbors initialized')
print('===> Random Forest initialized')
print('===> AdaBoost initialized')
print('===> Gradient Boost initialized')
print('===> XGBoost initialized')
print('===> Neural Network initialized')
print('===> SVM initialized')
print('===> Voting classifier with KNN, AdaBoost, SVM and Random Forest')
def findParametersAndEvaluate(self,data,strategy,label_name,group=None,dataset=None,cv=5):
self.strategy = strategy
self.results = {}
print('-------------------------------')
print(' STEP : Finding Parameters & Evaluate Models')
print('-------------------------------')
self.label_name_check(label_name)
#print(self.labelset.columns)
# store performance data for each strategy
if (strategy == 'train_test_split' or strategy == 'all'):
self.train_test = dict()
for model in self.models.keys():
self.train_test[model] = None
print('===> Evaluation strategy: Train and Test Split ')
X_train, X_test, y_train, y_test = train_test_split(data,self.label_set[label_name],train_size=.7,random_state=self.seed)
print('===> Parameters find-> Start')
for model in self.models.keys():
if model == 'vot':
continue
if not self.configured:
gd = GridSearchCV(self.models[model],self.params[model],cv=cv,scoring='neg_root_mean_squared_error')
gd.fit(X_train,y_train)
print(' Parameters for ',model,': ',gd.best_params_)
self.models[model] = gd.best_estimator_
print('===> Parameters find-> End')
test_performances = dict()
print('===> Test data performance[RMSE] ')
for model in self.models.keys():
self.models[model].fit(X_train,y_train)
test_performances[model] = mean_squared_error(y_test,self.models[model].predict(X_test),squared=False)
#print(' Model[',model,']:',test_performances[model])
self.train_test[model] = test_performances[model]
print(self.train_test)
self.results['train_test'] = self.train_test
if (strategy == 'cross_val' or strategy == 'all'):
self.cross_val = dict()
cross_val = dict()
for model in self.models.keys():
self.cross_val[model] = None
print('==============================================')
print('Evaluation strategy: Cross Validation')
print('==============================================')
for model in self.models.keys():
if model != 'vot' and not self.configured:
print(' ==> Finding params for ',model)
gd = GridSearchCV(self.models[model],self.params[model],cv=10,scoring='neg_root_mean_squared_error')
gd.fit(data,self.label_set[label_name])
print(' Parameters: ',gd.best_params_)
self.models[model] = gd.best_estimator_
cross_val[model] = cross_val_score(self.models[model],data,self.label_set[label_name],scoring='neg_root_mean_squared_error',cv=cv)
#print(' Score[',model,']:',cross_val_scores[model])
cross_val_mean = -1 * statistics.mean(cross_val[model])
cross_val_var = statistics.variance(cross_val[model])
self.cross_val[model] = [cross_val_mean,cross_val_var]
self.results['cross_val'] = self.cross_val
if (strategy == 'leave_one_group_out' or strategy == 'all'):
self.leave_group = dict()
for model in self.models.keys():
self.leave_group[model] = None
print('==============================================')
print('Evaluation strategy: Leave one group out')
print('==============================================')
logo = LeaveOneGroupOut()
n_splits = logo.get_n_splits(groups=group)
error= dict()
for model in self.models.keys():
error[model] = [None]*n_splits
k =0
for train_index, test_index in logo.split(data,self.label_set[label_name],group):
#print(test_index)
X_train, y_train = data.iloc[train_index],self.label_set[label_name][train_index]
X_test, y_test = data.iloc[test_index],self.label_set[label_name][test_index]
for model in self.models.keys():
if model != 'vot' and not self.configured:
print(' ==> Finding params for ',model)
gd = GridSearchCV(self.models[model],self.params[model],cv=10,scoring='neg_root_mean_squared_error')
gd.fit(X_train,y_train)
print(' Parameters: ',gd.best_params_)
estimator = gd.best_estimator_
self.models[model] = estimator
self.models[model].fit(X_train,y_train)
error[model][k] = mean_squared_error(y_test,self.models[model].predict(X_test),squared=False)
#print(' Model[',model,']:',error[model])
k = k+1
for model in self.models.keys():
err_mean = statistics.mean(error[model])
err_var = statistics.variance(error[model])
self.leave_group[model] = [err_mean,err_var]
self.results['leave_group'] = self.leave_group
if (strategy == 'leave_one_dataset_out' or strategy == 'all'):
self.leave_dataset = dict()
for model in self.models.keys():
self.leave_dataset[model] = None
print('==============================================')
print('Evaluation strategy: Leave one dataset out')
print('==============================================')
logo = LeaveOneGroupOut()
n_splits = logo.get_n_splits(groups=dataset)
error= dict()
for model in self.models.keys():
error[model] = [None]*n_splits
k =0
for train_index, test_index in logo.split(data,self.label_set[label_name],dataset):
X_train, y_train = data.iloc[train_index],self.label_set[label_name][train_index]
X_test, y_test = data.iloc[test_index],self.label_set[label_name][test_index]
for model in self.models.keys():
if model != 'vot' and not self.configured:
print(' ==> Finding params for ',model)
gd = GridSearchCV(self.models[model],self.params[model],cv=10,scoring='neg_root_mean_squared_error')
gd.fit(X_train,y_train)
#print(' Parameters: ',gd.best_params_)
estimator = gd.best_estimator_
self.models[model] = estimator
self.models[model].fit(X_train,y_train)
error[model][k] = mean_squared_error(y_test,self.models[model].predict(X_test),squared=False)
#print(' Model[',model,']:',error[model])
k = k+1
for model in self.models.keys():
err_mean = statistics.mean(error[model])
err_var = statistics.variance(error[model])
self.leave_dataset[model] = [err_mean,err_var]
self.results['leave_dataset'] = self.leave_dataset
if (strategy=='sorted_stratified' or strategy == 'all') :
self.stratified = dict()
for model in self.models.keys():
self.stratified[model] = None
# idea from https://scottclowe.com/2016-03-19-stratified-regression-partitions/
print('==============================================')
print('Evaluation strategy: Sorted Stratification')
print('==============================================')
label_df = pd.DataFrame(self.label_set)
indices = label_df.sort_values(by=[label_name]).index.tolist()
splits = dict()
error = dict()
for model in self.models.keys():
error[model] = [None]*cv
for i in range(cv):
splits[i] = list()
for i in range(len(indices)):
if i%cv == 0:
pick = random.sample(range(cv),cv)
cur_pick = pick.pop()
splits[cur_pick].append(indices[i])
for i in range(cv):
test_index = splits[i]
train_index = []
for j in range(cv):
if j != i:
train_index = train_index + splits[j]
##########################################
# Code to training model on sorted stratified set
X_train, y_train = data.iloc[train_index],self.label_set[label_name][train_index]
X_test, y_test = data.iloc[test_index],self.label_set[label_name][test_index]
for model in self.models.keys():
if model != 'vot' and not self.configured:
print(' ==> Finding params for ',model)
gd = GridSearchCV(self.models[model],self.params[model],cv=10,scoring='neg_root_mean_squared_error')
gd.fit(X_train,y_train)
print(' Parameters: ',gd.best_params_)
estimator = gd.best_estimator_
self.models[model] = estimator
self.models[model].fit(X_train,y_train)
error[model][i] = mean_squared_error(y_test,self.models[model].predict(X_test),squared=False)
#print(' Model[',model,']:',error[model])
for model in self.models.keys():
err_mean = statistics.mean(error[model])
err_var = statistics.variance(error[model])
self.stratified[model] = [err_mean,err_var]
##########################################
self.results['stratified'] = self.stratified
else:
print('Unsupported evaluation strategy')
return None
return self.results
# Preparing dataframe with results for report generation
"""
if strategy == 'train_test_split':
df = pd.DataFrame(columns = ['model','train_test])
for model in self.models.keys():
df = df.append({'model':model,'train_test':self.train_test[model]},ignore_index=True)
if strategy == 'cross_val':
df = pd.DataFrame(columns = ['model','train_test_mean','train_test_var'])
for model in self.models.keys():
df = df.append({'model':model,'train_test_mean':self.train_test[model][0],'train_test_var':self.train_test[model][1]},ignore_index=True)
if strategy == 'leave_one_group_out':
df = pd.DataFrame(columns = ['model','train_test_mean','train_test_var'])
for model in self.models.keys():
df = df.append({'model':model,'train_test_mean':self.train_test[model][0],'train_test_var':self.train_test[model][1]},ignore_index=True)
if strategy == 'leave_one_dataset_out':
df = pd.DataFrame(columns = ['model','train_test_mean','train_test_var'])
for model in self.models.keys():
df = df.append({'model':model,'train_test_mean':self.train_test[model][0],'train_test_var':self.train_test[model][1]},ignore_index=True)
if strategy == 'sorted_stratified':
df = pd.DataFrame(columns = ['model','train_test_mean','train_test_var'])
for model in self.models.keys():
df = df.append({'model':model,'train_test_mean':self.train_test[model][0],'train_test_var':self.train_test[model][1]},ignore_index=True)
if strategy == 'all':
df = pd.DataFrame(columns = ['model','train_test','cross_val','leave_group','leave_dataset','stratified'])
for model in self.models.keys():
df = df.append({'model':model,'train_test':self.train_test[model],'cross_val':self.cross_val[model],'leave_group':self.leave_group[model],'leave_dataset':self.leave_dataset[model],'stratified':self.stratified[model]},ignore_index=True)
return df
"""
def report(self,currentOutput,report_name=''):
df = pd.DataFrame(columns = ['model','train_test','cross_val_mean','cross_val_var','leave_group_mean','leave_group_var','leave_dataset_mean','leave_dataset_var','stratified_mean','stratified_var'])
for model in self.models.keys():
df = df.append({'model':model,'train_test':self.train_test[model],'cross_val_mean':self.cross_val[model][0],'cross_val_var':self.cross_val[model][1],'leave_group_mean':self.leave_group[model][0],'leave_group_var':self.leave_group[model][1],'leave_dataset_mean':self.leave_dataset[model][0],'leave_dataset_var':self.leave_dataset[model][1],'stratified_mean':self.stratified[model][0],'stratified_var':self.stratified[model][1]},ignore_index=True)
filename = report_name
df.to_csv(filename,index=False)
print('==============================================')
print(' Report Generation')
print('==============================================')
print(' ===> Successfully generated ')
print(' ===> Results saved in easyRegress_report.csv file')
def activateGroups(self,groups):
self.groups = groups
self.flagGroup = True
def activateDatasets(self,datasets):
self.datasets = datasets
self.flagDataset = True
def activateLabel(self,label):
self.label = label
def buildPipeline(self,sequence,report_name=''):
"""
<feature_name> : Name of feature
feature_extraction: Apply feature extraction based on correlation
feature_scaling: Apply feature scaling. Options: Standard, MinMax
feature_fusion: Apply feature fusion. Options: gini, entropy, pca, isomap, mds, tsne
load_models: Load regression models.
find_evaluate: Model evaluation. Options: train_test_split, cross_validation, leave_one_group_out, leave_one_dataset_out, sorted_stratified
report_results: Report results. Options: table, chart
"""
currentOutput = None
for index, step in enumerate(sequence):
label = self.label
groups = self.groups
datasets = self.datasets
if index == 0:
self.feature_name_check(step)
currentOutput = self.feature_set[step][2]
elif step == 'feature_extraction':
results = self.extractFeatures(currentOutput)
currentOutput = results
elif step == 'feature_scaling_std':
print(currentOutput.shape)
results = self.Scaling(currentOutput,'std')
currentOutput = results
elif step == 'feature_scaling_mmax':
results = self.Scaling(currentOutput,'mmax')
currentOutput = results
elif step == 'feature_fusion_pca':
results = self.DimRed('pca',currentOutput,{'n_components':2,'n_neighbors':3})
currentOutput = results
elif step == 'feature_fusion_mds':
results = self.DimRed('mds',currentOutput,{'n_components':2,'n_neighbors':3})
currentOutput = results
elif step == 'feature_fusion_isomap':
results = self.DimRed('isomap',currentOutput,{'n_components':2,'n_neighbors':3})
currentOutput = results
elif step == 'feature_fusion_tsne':
results = self.DimRed('tsne',currentOutput,{'n_components':2,'n_neighbors':3})
currentOutput = results
elif step == 'feature_fusion_entropy':
results = self.getEntropy(currentOutput)
currentOutput = results
print(results)
elif step == 'feature_fusion_gini':
results = self.getGINI(currentOutput)
currentOutput = results
print(results)
elif step == 'load_modules':
self.regressionModelInitialize()
elif step == 'evaluate_train_test':
if label == None:
print(' ====> Error: labels are not loaded')
results =self.findParametersAndEvaluate(currentOutput,'train_test_split',label)
currentOutput = results
elif step == 'evaluate_cross_val':
if label == None:
print(' ====> Error: labels are not loaded')
results =self.findParametersAndEvaluate(currentOutput,'cross_val',label)
currentOutput = results
elif step == 'evaluate_leave_group_out':
if label == None:
print(' ====> Error: labels are not loaded')
if self.flagDataset == False:
print(' ====> Error: groups ids are not loaded')
results =self.findParametersAndEvaluate(currentOutput,'leave_one_group_out',label,group=groups)
currentOutput = results
elif step == 'evaluate_leave_dataset_out':
if label == None:
print(' ====> Error: labels are not loaded')
if self.flagDataset == False:
print(' ====> Error: datasets ids are not loaded')
results =self.findParametersAndEvaluate(currentOutput,'leave_one_dataset_out',label,dataset = datasets)
currentOutput = results
elif step == 'evaluate_stratified':
if label == None:
print(' ====> Error: labels are not loaded')
results =self.findParametersAndEvaluate(currentOutput,'sorted_stratified',label)
currentOutput = results
elif step == 'all':
if label == None:
print(' ====> Error: labels are not loaded')
results =self.findParametersAndEvaluate(currentOutput,'all',label,group = groups, dataset = datasets)
currentOutput = results
elif step == 'report_csv':
self.report(currentOutput,report_name)
else:
print(' Unsupported module ',step,' is specified')
| 38,712 | 10,471 |
import sys
lines = open(sys.argv[1], 'r')
for line in lines:
line = line.replace('\n', '').replace('\r', '')
if len(line) > 0:
n = int(line)
primes = set([2])
num = 3
while num < n:
if all(num % i != 0 for i in primes):
primes = set(list(primes) + [num])
num = num + 1
primes = sorted(list(primes))
print(','.join([str(x) for x in primes]))
lines.close()
| 469 | 175 |
from rest_framework import serializers
from gemtown.modelphotos import models as modelphoto_models
from gemtown.modelers import models as modeler_models
from gemtown.musicians import models as musician_models
from . import models
import time
class TimestampField(serializers.Field):
def to_representation(self, value):
return int(time.mktime(value.timetuple()))
class UsernameSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = (
'username',
)
class MusicianSerializer(serializers.ModelSerializer):
class Meta:
model = musician_models.Musician
fields = (
'id',
'nickname',
'country',
)
class ModelPhotoSerializer(serializers.ModelSerializer):
class Meta:
model = modelphoto_models.ModelPhoto
fields = (
'file',
'photo_type',
)
class ModelerSerializer(serializers.ModelSerializer):
cover_image = ModelPhotoSerializer()
class Meta:
model = modeler_models.Modeler
fields = (
'id',
'cover_image',
'nickname',
'country',
)
class UserSerializer(serializers.ModelSerializer):
created_at = TimestampField()
updated_at = TimestampField()
followers = UsernameSerializer(many=True)
followings = UsernameSerializer(many=True)
musician = MusicianSerializer()
modeler = ModelerSerializer()
class Meta:
model = models.User
fields = (
'id',
'username',
'email',
'first_name',
'last_name',
'user_class',
'gem_amount',
'musician',
'modeler',
'gender',
'profile_photo',
'country',
'mobile_number',
'mobile_country',
'followers',
'followings',
'is_superuser',
'is_staff',
'created_at',
'updated_at'
)
| 2,117 | 547 |
#
# Copyright 2018 Expedia Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functions import delivery_time
test_cases = [
{
'data': [[3, 3, 3], 1, 3],
'result': 9
},
{
'data': [[3, 3, 3], 1, 10],
'result': 30
},
{
'data': [[3, 3, 3], 2, 3],
'result': 6
},
{
'data': [[3, 3, 3], 3, 3],
'result': 3
},
]
results = []
class DeliveryTimeTestCase(unittest.TestCase):
""" docstring """
def setUp(self):
for i in range(len(test_cases)):
results.append(
delivery_time(
test_cases[i]['data']
)
)
def test_result(self):
""" docstring """
for i in range(len(test_cases)):
self.assertEqual(results[i], test_cases[i]['result'])
if __name__ == '__main__':
unittest.main()
# tests_sc = [
# {
# 'num': np.float64(3),
# 'wip': 1,
# 'runsdim': np.float64(10),
# 'runstot': 1000,
# 'fun': 1
# },
# {
# 'num': np.float64(3),
# 'wip': 2,
# 'runsdim': np.float64(10),
# 'runstot': 1000,
# 'fun': 1
# },
# {
# 'num': np.float64(3),
# 'wip': 3,
# 'runsdim': np.float64(10),
# 'runstot': 1000,
# 'fun': 1
# },
# {
# 'num': np.float64(4),
# 'wip': 1,
# 'runsdim': np.float64(10),
# 'runstot': 500,
# 'fun': 1
# },
# {
# 'num': np.float64(4),
# 'wip': 2,
# 'runsdim': np.float64(10),
# 'runstot': 500,
# 'fun': 1
# },
# {
# 'num': np.float64(4),
# 'wip': 3,
# 'runsdim': np.float64(10),
# 'runstot': 500,
# 'fun': 1
# },
# {
# 'num': np.float64(3),
# 'wip': 1,
# 'runsdim': np.float64(20),
# 'runstot': 1500,
# 'fun': 1
# },
# {
# 'num': np.float64(3),
# 'wip': 2,
# 'runsdim': np.float64(20),
# 'runstot': 1500,
# 'fun': 1
# },
# {
# 'num': np.float64(3),
# 'wip': 3,
# 'runsdim': np.float64(20),
# 'runstot': 1500,
# 'fun': 1
# }
# ]
# results_sc = []
# montecarlo_results_sc = []
# results_expected_sc = [
# 4,
# 7,
# 10,
# 3,
# 5,
# 7,
# 7,
# 13,
# 19
# ]
# tots_expected_sc = [
# 1000,
# 1000,
# 1000,
# 500,
# 500,
# 500,
# 1500,
# 1500,
# 1500
# ]
# for i in range(len(tests_sc)):
# results_sc.append(
# result(
# tests_sc[i]['num'],
# tests_sc[i]['wip'],
# tests_sc[i]['runsdim'],
# tests_sc[i]['runstot'],
# tests_sc[i]['fun']
# )
# )
# montecarlo_results_sc.append(
# stories_completed(
# (
# [tests_sc[i]['num']],
# tests_sc[i]['wip'],
# tests_sc[i]['runsdim']
# )
# )
# )
# class Test_stories_completed:
# """ test for boostrap helper"""
# for i in range(len(tests_sc)):
# def test_length(self):
# assert len(results_sc[i]) == tots_expected_sc[i]
# def test_mean(self):
# for j in range(len(results_sc[i])):
# assert results_sc[i][j] == results_expected_sc[i]
# class Test_montecarlo_stories_completed:
# """ test for boostrap helper"""
# for i in range(len(tests_sc)):
# def test_mean(self):
# assert montecarlo_results_sc[i] == results_expected_sc[i]
# tests_sn = [
# {
# 'num': np.float64(3),
# 'wip': 1,
# 'runsdim': np.float64(10),
# 'runstot': 1000,
# 'fun': 2
# },
# {
# 'num': np.float64(3),
# 'wip': 2,
# 'runsdim': np.float64(10),
# 'runstot': 1000,
# 'fun': 2
# },
# {
# 'num': np.float64(3),
# 'wip': 3,
# 'runsdim': np.float64(10),
# 'runstot': 1000,
# 'fun': 2
# },
# {
# 'num': np.float64(4),
# 'wip': 1,
# 'runsdim': np.float64(10),
# 'runstot': 500,
# 'fun': 2
# },
# {
# 'num': np.float64(4),
# 'wip': 2,
# 'runsdim': np.float64(10),
# 'runstot': 500,
# 'fun': 2
# },
# {
# 'num': np.float64(4),
# 'wip': 3,
# 'runsdim': np.float64(10),
# 'runstot': 500,
# 'fun': 2
# },
# {
# 'num': np.float64(3),
# 'wip': 1,
# 'runsdim': np.float64(20),
# 'runstot': 1500,
# 'fun': 2
# },
# {
# 'num': np.float64(3),
# 'wip': 2,
# 'runsdim': np.float64(20),
# 'runstot': 1500,
# 'fun': 2
# },
# {
# 'num': np.float64(3),
# 'wip': 3,
# 'runsdim': np.float64(20),
# 'runstot': 1500,
# 'fun': 2
# }
# ]
# results_sn = []
# montecarlo_results_sn = []
# results_expected_sn = [
# 4,
# 2,
# 2,
# 3,
# 2,
# 1,
# 7,
# 4,
# 3
# ]
# tots_expected_sn = [
# 1000,
# 1000,
# 1000,
# 500,
# 500,
# 500,
# 1500,
# 1500,
# 1500
# ]
# for i in range(len(tests_sn)):
# results_sn.append(
# result(
# tests_sn[i]['num'],
# tests_sn[i]['wip'],
# tests_sn[i]['runsdim'],
# tests_sn[i]['runstot'],
# tests_sn[i]['fun']
# )
# )
# montecarlo_results_sn.append(
# sprints_needed(
# (
# [tests_sn[i]['num']],
# tests_sn[i]['wip'],
# tests_sn[i]['runsdim']
# )
# )
# )
# class Test_sprints_needed:
# """ test for boostrap helper"""
# for i in range(len(tests_sn)):
# def test_length(self):
# assert len(results_sn[i]) == tots_expected_sn[i]
# def test_mean(self):
# for j in range(len(results_sn[i])):
# assert results_sn[i][j] == results_expected_sn[i]
# class Test_montecarlo_sprints_needed:
# """ test for boostrap helper"""
# for i in range(len(tests_sn)):
# def test_mean(self):
# assert montecarlo_results_sn[i] == results_expected_sn[i]
# tests_bt = [
# {
# 'sample': [np.float64(3) for x in range(10)],
# 'predstot': 500,
# 'predsdim': 1,
# },
# {
# 'sample': [np.float64(4) for x in range(10)],
# 'predstot': 750,
# 'predsdim': 5,
# },
# {
# 'sample': [np.float64(5) for x in range(10)],
# 'predstot': 1000,
# 'predsdim': 10,
# },
# ]
# results_bt = []
# results_expected_bt = [
# 3,
# 4,
# 5
# ]
# tots_expected_bt = [
# 500,
# 750,
# 1000
# ]
# for i in range(len(tests_bt)):
# results_bt.append(
# bootstrap(
# tests_bt[i]['sample'],
# tests_bt[i]['predstot'],
# tests_bt[i]['predsdim']
# )
# )
# class Test_bootstrap:
# """ test for boostrap helper"""
# for i in range(len(tests_bt)):
# def test_length(self):
# assert len(results_bt[i]) == tots_expected_bt[i]
# def test_mean(self):
# assert np.mean(results_bt[i]) == results_expected_bt[i]
# def test_max(self):
# assert np.amax(results_bt[i]) == results_expected_bt[i]
# def test_min(self):
# assert np.amin(results_bt[i]) == results_expected_bt[i]
| 8,294 | 3,318 |
# -*- coding: utf-8 -*-
import logging
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.utils import is_request_type, is_intent_name
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
from ask_sdk_model.ui import SimpleCard
import os
from alexa_skills import aws_utils
CIS_SERVICE_URL = os.environ['CIS_SERVICE_URL']
CIS_AWS_ACCESS_KEY_ID = os.environ['CIS_AWS_ACCESS_KEY_ID']
CIS_AWS_SECRET_ACCESS_KEY = os.environ.get('CIS_AWS_SECRET_ACCESS_KEY')
skill_name = "CISDiagnosis"
help_text = ("Please tell me your medical condition. You can say "
"I have cold headache.")
report_slot = "report"
sb = SkillBuilder()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
@sb.request_handler(can_handle_func=is_request_type("LaunchRequest"))
def launch_request_handler(handler_input):
"""Handler for Skill Launch."""
# type: (HandlerInput) -> Response
speech = "Welcome, Tell me your medical condition."
handler_input.response_builder.speak(
speech + " " + help_text).ask(help_text)
return handler_input.response_builder.response
@sb.request_handler(can_handle_func=is_intent_name("AMAZON.HelpIntent"))
def help_intent_handler(handler_input):
"""Handler for Help Intent."""
# type: (HandlerInput) -> Response
handler_input.response_builder.speak(help_text).ask(help_text)
return handler_input.response_builder.response
@sb.request_handler(
can_handle_func=lambda handler_input:
is_intent_name("AMAZON.CancelIntent")(handler_input) or
is_intent_name("AMAZON.StopIntent")(handler_input))
def cancel_and_stop_intent_handler(handler_input):
"""Single handler for Cancel and Stop Intent."""
# type: (HandlerInput) -> Response
speech_text = "Goodbye!"
return handler_input.response_builder.speak(speech_text).response
@sb.request_handler(can_handle_func=is_request_type("SessionEndedRequest"))
def session_ended_request_handler(handler_input):
"""Handler for Session End."""
# type: (HandlerInput) -> Response
return handler_input.response_builder.response
from io import StringIO
def getMedicalAnalysis(medical_report):
client = aws_utils.get_boto3_client(CIS_AWS_ACCESS_KEY_ID, CIS_AWS_SECRET_ACCESS_KEY, 'comprehendmedical')
response = client.detect_entities_v2(
Text=medical_report
)
mc_dict = {}
for entity in response['Entities']:
if entity["Category"] == "MEDICAL_CONDITION" and len(entity["Traits"]) > 0:
#print(f'| {entity["Text"]} |{entity["Category"]} |')
mc_dict[entity["Text"]] = entity["Category"]
#print(mc_dict)
string_buffer = StringIO()
for item in mc_dict:
string_buffer.write( item + ' is ' + mc_dict[item] + ' ')
return string_buffer.getvalue()
@sb.request_handler(can_handle_func=is_intent_name("MedicalIntent"))
def my_medical_diagnosis_handler(handler_input):
"""Check if color is provided in slot values. If provided, then
set your favorite color from slot value into session attributes.
If not, then it asks user to provide the color.
"""
# type: (HandlerInput) -> Response
slots = handler_input.request_envelope.request.intent.slots
if report_slot in slots:
medical_report = slots[report_slot].value
speakOutput = getMedicalAnalysis(medical_report)
# build json object as per the CISApi
# handler_input.attributes_manager.session_attributes[color_slot_key] = fav_color
speech = "Identified diseases are " + speakOutput
reprompt = ("That's " + speakOutput)
else:
speech = "I'm not sure, please try again"
reprompt = ("I'm not sure, please try again")
handler_input.response_builder.speak(speech).ask(reprompt)
return handler_input.response_builder.response
@sb.request_handler(can_handle_func=is_intent_name("AMAZON.FallbackIntent"))
def fallback_handler(handler_input):
"""AMAZON.FallbackIntent is only available in en-US locale.
This handler will not be triggered except in that locale,
so it is safe to deploy on any locale.
"""
# type: (HandlerInput) -> Response
speech = (
"The {} skill can't help you with that. " + help_text ).format(skill_name)
reprompt = (help_text)
handler_input.response_builder.speak(speech).ask(reprompt)
return handler_input.response_builder.response
def convert_speech_to_text(ssml_speech):
"""convert ssml speech to text, by removing html tags."""
# type: (str) -> str
s = SSMLStripper()
s.feed(ssml_speech)
return s.get_data()
@sb.global_response_interceptor()
def add_card(handler_input, response):
"""Add a card by translating ssml text to card content."""
# type: (HandlerInput, Response) -> None
response.card = SimpleCard(
title=skill_name,
content=convert_speech_to_text(response.output_speech.ssml))
@sb.global_response_interceptor()
def log_response(handler_input, response):
"""Log response from alexa service."""
# type: (HandlerInput, Response) -> None
print("Alexa Response: {}\n".format(response))
@sb.global_request_interceptor()
def log_request(handler_input):
"""Log request to alexa service."""
# type: (HandlerInput) -> None
print("Alexa Request: {}\n".format(handler_input.request_envelope.request))
@sb.exception_handler(can_handle_func=lambda i, e: True)
def all_exception_handler(handler_input, exception):
"""Catch all exception handler, log exception and
respond with custom message.
"""
# type: (HandlerInput, Exception) -> None
print("Encountered following exception: {}".format(exception))
speech = "Sorry, there was some problem. Please try again!!"
handler_input.response_builder.speak(speech).ask(speech)
return handler_input.response_builder.response
######## Convert SSML to Card text ############
# This is for automatic conversion of ssml to text content on simple card
# You can create your own simple cards for each response, if this is not
# what you want to use.
from six import PY2
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
class SSMLStripper(HTMLParser):
def __init__(self):
self.reset()
self.full_str_list = []
if not PY2:
self.strict = False
self.convert_charrefs = True
def handle_data(self, d):
self.full_str_list.append(d)
def get_data(self):
return ''.join(self.full_str_list)
################################################
# Handler to be provided in lambda console.
lambda_handler = sb.lambda_handler()
| 6,702 | 2,119 |
"""
---------------------------------------------------------------------
Continuos Optimization Problems
---------------------------------------------------------------------
"""
"""
---------------------------------------------------------------------
GA configuration
---------------------------------------------------------------------
"""
_standardInputsGA = [
{
"id": "generaciones",
"desc": "Número de generaciones:",
"default": 10,
"step": 1,
"min": 1
},
{
"id": "poblacionGeneracional",
"desc": "Tamaño de la población:",
"default": 30,
"step":1,
"min":10
}
]
_operadoresGA = [
{
'id': 'parentSelection',
'desc': "Método de selección de padres:",
'items': [
{
'label': 'roulette_sampler',
'inputs':[]
},
{
'label':'stochastic_universal_sampler',
'inputs':[]
},
{
'label':'deterministic_sampler',
'inputs':[]
},
{
'label':'tournament_sampler',
'inputs':[
{
"id": "chunk",
"desc": "Tamaño de grupos:",
"default": 2,
"step": 1,
"min": 2
},
{
"id": "prob",
"desc": "Probabilidad de seleccionar al mejor:",
"default": 0.5,
"step": 0.01,
"min": 0,
"max": 1
}
]
}
]
},
{
'id':'crossOver',
'desc':'Operador de cruza:',
'items':[
{
'label': 'n_point_crossover',
'inputs':[
{
"id": "nPoint",
"desc": "Número de puntos de cruza:",
"default": 1,
"step": 1,
"min": 1
}
]
},
{
'label':'uniform_crossover',
'inputs':[
{
"id": "uniform",
"desc": "Probabilidad de cruza:",
"default": 0.5,
"step": 0.01,
"min": 0,
"max":1
}
]
},
{
'label':'simulated_binary_crossover',
'inputs':[
{
"id": "simulatedBinary",
"desc": "nc:",
"default": 1
}
]
},
{
'label':'discrete_crossover',
'inputs':[]
},
{
'label':'intermediate_crossover',
'inputs': [
{
'id':'alphaGA',
'desc': 'Aporte del P1:',
'default':0.5,
'step':0.1,
'min':0.01,
'max':1
}
]
}
]
},
{
'id':'mutation',
'desc': 'Operadores de mutación',
'items':[
{
'label':'boundary_mutator',
'inputs':[]
},
{
'label':'uniform_mutator',
'inputs':[]
},
{
'label':'non_uniform_mutator',
'inputs':[
{
'id':'sigmaNonUniform',
'desc':'Valor de sigma:',
'default':1,
'min':0.01
}
]
}
]
},
{
'id':'survivorSelectionGA',
'desc': 'Esquemas de selección:',
'items':[
{
'label':'merge_selector',
'inputs':[]
},
{
'label':'replacement_selector',
'inputs':[]
}
]
}
]
"""
---------------------------------------------------------------------
ES configuration
---------------------------------------------------------------------
"""
_standardInputsES = [
{
"id": "generaciones",
"desc": "Número de generaciones:",
"default": 10,
"step": 1,
"min": 1
},
{
"id": "poblacionGeneracional",
"desc": "Tamaño de la población padre:",
"default": 30,
"step":1,
"min":10
},
{
'id':'poblacionHijos',
'desc':'Tamaño de la población hija:',
'default':30,
'step':1,
'min':10
},
{
'id':'epsilonSigma',
'desc':'Mínimo valor aceptado sigma:',
'default':0.001,
'min':0,
'step':0.01
}
]
_operadoresES = [
{
'id': 'crossoverSolES',
'desc': "Operadores de cruza para la solución:",
'items': [
{
'label':'discrete_crossover',
'inputs':[]
},
{
'label':'intermediate_crossover',
'inputs': [
{
'id':'alphaEEX',
'desc': 'Aporte del P1:',
'default':0.5,
'step':0.1,
'min':0.01,
'max':1
}
]
}
]
},
{
'id':'mutationSolES',
'desc':'Operadores de mutación para la solución:',
'items': [
{
'label':'sigma_mutator',
'inputs':[]
}
]
},
{
'id':'crossoverSigmaES',
'desc':'Operadores de cruza para los sigma:',
'items':[
{
'label':'discrete_crossover',
'inputs':[]
},
{
'label':'intermediate_crossover',
'inputs': [
{
'id':'alphaEESigma',
'desc': 'Aporte del P1:',
'default':0.5,
'step':0.1,
'min':0.01,
'max':1
}
]
}
]
},
{
'id':'mutationSigmaES',
'desc': 'Operadores de mutación para los sigma:',
'items':[
{
'label':'single_sigma_adaptive_mutator',
'inputs':[]
},
{
'label':'mult_sigma_adaptive_mutator',
'inputs':[ ]
}
]
},
{
'id':'survivorSelectionES',
'desc':'Esquema de selección de sobrevivientes:',
'items': [
{
'label':'merge_selector',
'inputs':[]
},
{
'label':'replacement_selector',
'inputs':[]
}
]
}
]
"""
---------------------------------------------------------------------
EP configuration
---------------------------------------------------------------------
"""
_standardInputsEP = [
{
"id": "generaciones",
"desc": "Número de generaciones:",
"default": 10,
"step": 1,
"min": 1
},
{
"id": "poblacionGeneracional",
"desc": "Tamaño de la población:",
"default": 30,
"step":1,
"min":10
}
]
_operadoresEP = [
{
'id': 'operadorMutacionX',
'desc': "Operadores de mutación en la solución X:",
'items': [
{
'label': 'sigma_mutator',
'inputs':[]
}
]
},
{
'id': 'operadorMutacionSigma',
'desc': "Operadores de mutación en la variable de sigma: ",
'items': [
{
'label': "sigma_ep_adaptive_mutator",
'inputs':[
{
"id": "alpha",
"desc": "Valor alpha:",
"default": 0.5,
"step": 0.01,
"min": 0,
"max": 1
}
]
}
]
},
{
'id':'survivorSelectionPE',
'desc': 'Esquemas de selección:',
'items':[
{
'label':'merge_selector',
'inputs':[]
},
{
'label':'replacement_selector',
'inputs':[]
}
]
}
]
def getOperands(name):
if name == 'EP':
return _operadoresEP
elif name== 'EE':
return _operadoresES
elif name == 'GA':
return _operadoresGA | 9,360 | 2,511 |
#!/usr/bin/env python3
import numpy as np
from PIL import Image
def smdimerge(pargs, oargs):
if len(pargs) != 3:
return -1
path_spec, path_gloss, path_target = pargs
try:
spec = Image.open(path_spec).convert("RGBA")
gloss = Image.open(path_gloss).convert("RGBA")
except:
print("Failed to read images. Please check your paths.")
return 1
if spec.size != gloss.size:
print("Image sizes do not match, aborting.")
return 1
smdi = Image.new("RGBA", spec.size, "white")
data = np.array(smdi)
r,g,b,a = data.transpose()
g = np.array(spec).transpose()[0]
b = np.array(gloss).transpose()[0]
data = np.array([r,g,b,a]).transpose()
smdi = Image.fromarray(data)
try:
smdi.save(path_target)
except:
print("Failed to write final image to disk. Check permissions.")
return 1
else:
print("SMDI map saved at: {}".format(path_target))
return 0
| 988 | 350 |
#!/usr/bin/python3
import datetime
import os
import requests
import sys
if sys.version_info[0] >= 3:
import urllib.parse as urllib
else:
import urllib
endpoint = os.environ['ELASTICSEARCH_URL']
username = os.environ['ELASTICSEARCH_USERNAME']
password = os.environ['ELASTICSEARCH_PASSWORD']
prune_start = datetime.datetime.utcnow() - datetime.timedelta(days=60)
r = requests.get('{0}/_cat/indices'.format(endpoint),
auth=(username, password))
for line in r.text.splitlines():
data = line.split(' ')
if len(data) > 10:
index = data[2]
indexd = index.split('-')
if len(indexd) > 1:
d = datetime.datetime.strptime(indexd[1], "%Y.%m.%d")
if d < prune_start:
r2 = requests.delete(
'{0}/{1}'.format(endpoint, urllib.quote(index)),
auth=(username, password))
print(index, r2.json())
| 932 | 313 |
#!/usr/bin/env python
#!coding=utf-8
import rospy
import numpy as np
import PIL.Image as pilimage
from sensor_msgs.msg import CompressedImage
from sensor_msgs.msg import Image
from std_msgs.msg import Float64
from cv_bridge import CvBridge, CvBridgeError
import cv2
import time
from yolo import YOLO
from sensor_msgs.msg import Joy
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from tf.transformations import *
from math import pi
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import Header
from sensor_msgs.msg import JointState
from threading import Thread
import threading
global RV2_motor1_joint
yolo = YOLO()
bridge = CvBridge()
def send():
rospy.Subscriber('/mid_camera/color/image_raw/compressed', CompressedImage, ReceiveVideo_right)
rospy.spin()
def ReceiveVideo_right(data):
global cv_image
# print(1)
cv_image = bridge.compressed_imgmsg_to_cv2(data, 'bgr8')
def main():
global delta_x,cv_image
time.sleep(4)
fps = 0
while not rospy.is_shutdown():
t1 = time.time()
# 读取某一帧
frame = cv2.cvtColor(cv_image,cv2.COLOR_BGR2RGB)
# 转变成Image
frame = pilimage.fromarray(np.uint8(frame))
# 进行检测
frame, bbox_list, label_list = yolo.detect_image(frame)
frame = np.array(frame)
# RGBtoBGR满足opencv显示格式
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
fps = ( fps + (1./(time.time()-t1)) ) / 2
print("fps= %.2f"%(fps))
frame = cv2.putText(frame, "fps= %.2f"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
print(frame.shape)
cv2.imshow("video",frame)
cv2.waitKey(3)
# c= cv2.waitKey(1) & 0xff
# if c==27:
# break
if type(label_list) != int: # 没检测到物体的时候,bbox_list和label_list为1
num_of_obj = len(label_list)
#print('num_of_object:', num_of_obj)
#确定跟踪物体与图像中点的相对坐标
for i in range(num_of_obj):
if 'banana' in label_list[i]:
object_center = (bbox_list[i][1]+bbox_list[i][3])*0.5
delta_x = 320-object_center
#print(delta_x)
#return delta_x
# location_pub.publish(delta_x)
#motor1_move()
elif 'bed' in label_list[i]:
print("yyy")
pass
else:
print('yolo未识别到任何物体')
pass
def motor1_move():
time.sleep(1)
global command_vel_pub_m, delta_x, RV2_motor1_joint
delta_x = 0
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
# rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback)
while not rospy.is_shutdown():
print(delta_x)
#中间位判断
if -1.5 < RV2_motor1_joint < 1.5:
#左转判断条件
if delta_x > 200:
motor_vel.velocity = [0.48]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif 80 < delta_x < 200:
motor_vel.velocity = [(delta_x - 40) * 0.003]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -200:
motor_vel.velocity = [-0.48]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif -200 < delta_x < -80:
motor_vel.velocity = [(delta_x + 40) * 0.003]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
#左限位判断条件
if 1.5 < RV2_motor1_joint:
#左转判断条件
if delta_x > 80:
motor_vel.velocity = [0]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -200:
motor_vel.velocity = [-0.48]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif -200 < delta_x < -80:
motor_vel.velocity = [(delta_x + 40) * 0.003]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
#右限位判断条件
if RV2_motor1_joint < -1.5:
#左转判断条件
if delta_x > 200:
motor_vel.velocity = [0.48]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif 80 < delta_x < 200:
motor_vel.velocity = [(delta_x - 40) * 0.003]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
else:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
#for object in vision_database_dict:
# 再将opencv格式额数据转换成ros image格式的数据发布
# try:
# #self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
# location_pub.publish(location_pub)
# except CvBridgeError as e:
# print('e')
def RV2_motorjointstate_callback(data):
# 定义RV2 motor数据全局变量,进行赋值
global RV2_motor1_joint
RV2_motor1_joint = data.position[0]
print(RV2_motor1_joint)
if __name__ == '__main__':
# 初始化ros节点
rospy.init_node("cv_bridge_test")
rospy.loginfo("Starting cv_bridge_test node")
global command_vel_pub_m, delta_x
#创建发布者
command_vel_pub_m = rospy.Publisher('/motor_control/input/velocity', JointState, queue_size = 100, latch=True)
#订阅躯干点击位置信息
rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback)
#定义yolo识别子程序
t_send = threading.Thread(target = send)
t_send.start()
t_main = threading.Thread(target=main)
t_main.start()
#time.sleep(2)
# 定义躯干运动子进程
t_motor1 = threading.Thread(target = motor1_move)
t_motor1.start()
rospy.spin()
# except KeyboardInterrupt:
# print("Shutting down cv_bridge_test node.")
# cv2.destroyAllWindows()
| 7,001 | 2,666 |
# (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# CAUTION: There are two implementations of the collection loader.
# They must be kept functionally identical, although their implementations may differ.
#
# 1) The controller implementation resides in the "lib/ansible/utils/collection_loader/" directory.
# It must function on all Python versions supported on the controller.
# 2) The ansible-test implementation resides in the "test/lib/ansible_test/_util/target/legacy_collection_loader/" directory.
# It must function on all Python versions supported on managed hosts which are not supported by the controller.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from collections.abc import Mapping # pylint: disable=ansible-bad-import-from
except ImportError:
from collections import Mapping # pylint: disable=ansible-bad-import-from
from ansible.module_utils.common.yaml import yaml_load
def _meta_yml_to_dict(yaml_string_data, content_id):
"""
Converts string YAML dictionary to a Python dictionary. This function may be monkeypatched to another implementation
by some tools (eg the import sanity test).
:param yaml_string_data: a bytes-ish YAML dictionary
:param content_id: a unique ID representing the content to allow other implementations to cache the output
:return: a Python dictionary representing the YAML dictionary content
"""
# NB: content_id is passed in, but not used by this implementation
routing_dict = yaml_load(yaml_string_data)
if not routing_dict:
routing_dict = {}
if not isinstance(routing_dict, Mapping):
raise ValueError('collection metadata must be an instance of Python Mapping')
return routing_dict
| 1,841 | 501 |
def run(config, status):
raise TypeError("THE SKY IS FALLING!!")
| 69 | 23 |
#!/usr/bin/python
import git
from git import *
import threading
import os
import sys
import getopt
from enum import Enum
class GitCommandType(Enum):
pull = 1
push = 2
nop = 3
def yes_or_no(msg: str):
yes_no = input(msg + " ? [Y]es or [n]o?")
yes_no = yes_no.lower()
if yes_no == "yes" or yes_no == "y":
return True
elif yes_no == "no" or yes_no == "n":
return False
else:
return True
# is_git_dir returns if current directory has .git/
def is_git_dir(dir_path: str):
repo_git_dir = os.path.join(dir_path, '.git')
if not os.path.exists(repo_git_dir):
return False
return True
def update_git_repo(git_cmd_type: GitCommandType, git_repo_dir: str, git_stash_if_have_uncommitted_changes: bool,
unhandled_git_repo_dirs: list):
try:
git_repo = git.Repo(git_repo_dir)
if git_cmd_type == GitCommandType.pull and git_repo.is_dirty():
if not git_stash_if_have_uncommitted_changes:
if not yes_or_no("Repo " + git_repo_dir + " have uncommitted changes, \n\tgit reset --hard"):
unhandled_git_repo_dirs.append(git_repo_dir)
return
try:
git_repo.git.stash('save', True)
except Exception as exception:
print(
"git stash repo:" + git_repo_dir + " Failed:\r\n git reset --hard recommended" + str(exception))
unhandled_git_repo_dirs.append(git_repo_dir)
return
remote_repo = git_repo.remote()
print("start git %s from remote for: %s" % (git_cmd_type.name, git_repo_dir), end='')
try:
if git_cmd_type == GitCommandType.pull:
remote_repo.pull()
elif git_cmd_type == GitCommandType.push:
remote_repo.push()
elif git_cmd_type == GitCommandType.nop:
pass
else:
print("")
raise Exception('unrecognised git command: ' + git_cmd_type.name)
except Exception as exception:
print("")
print(
"git " + git_cmd_type.name + " repo:" + git_repo_dir + " Failed:\r\n git reset --hard recommended" + str(
exception))
unhandled_git_repo_dirs.append(git_repo_dir)
return
print("... Done.")
except NoSuchPathError as e:
pass
except InvalidGitRepositoryError as e:
pass
finally:
pass
def update_git_repo_thread(git_cmd_type: GitCommandType, root_path: str, git_stash_if_have_uncommitted_changes: bool,
dirty_git_repo_dirs: list,
git_update_thread_pools: list):
if git_stash_if_have_uncommitted_changes:
git_update_thread_ = threading.Thread(target=update_git_repo,
args=(git_cmd_type, root_path, True, dirty_git_repo_dirs))
git_update_thread_.start()
git_update_thread_pools.append(git_update_thread_)
else:
update_git_repo(git_cmd_type, root_path, False, dirty_git_repo_dirs)
def walk_and_update(git_cmd_type: GitCommandType, root_path: str, continue_when_meet_git: bool, depth: int,
max_depth: int,
git_stash_if_have_uncommitted_changes: bool, dirty_git_repo_dirs: list,
git_update_thread_pools: list):
if depth >= max_depth:
print("jump for %s too deep: depth[%d] max_depth[%d]" % (root_path, depth, max_depth))
return
if is_git_dir(root_path):
update_git_repo_thread(git_cmd_type, root_path, git_stash_if_have_uncommitted_changes, dirty_git_repo_dirs,
git_update_thread_pools)
if not continue_when_meet_git:
# print("jump subdirs for %s meet git" % (root_path))
return
depth = depth + 1
for root_dir, sub_dirs, sub_files in os.walk(root_path):
for sub_dir in sub_dirs:
walk_and_update(git_cmd_type, os.path.join(root_dir, sub_dir), continue_when_meet_git, depth,
max_depth, git_stash_if_have_uncommitted_changes, dirty_git_repo_dirs,
git_update_thread_pools)
sub_dirs.clear()
sub_files.clear()
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
g_git_cmd_type: GitCommandType = GitCommandType.nop
g_walk_paths: list = ["."]
g_git_stash_if_have_uncommitted_changes: bool = False
g_continue_when_meet_git: bool = False
g_stop_when_meet_max_depth: int = 10
opts, args = getopt.getopt(argv[1:], "hycd:",
["help", "path", "git_stash_if_have_uncommitted_changes",
"continue_when_meet_git", "stop_when_meet_max_depth=10"])
if len(args) > 0:
g_git_cmd_type = GitCommandType[args[0]]
if len(args) > 1:
g_walk_paths = args[1:]
for op, value in opts:
if op == "-y":
g_git_stash_if_have_uncommitted_changes = True
if op == "-c":
g_continue_when_meet_git = True
elif op == "-d":
g_stop_when_meet_max_depth = value
elif op == "-h":
print("=======""Usage:")
print("python git_pull_all.py pull|push .")
print("python git_pull_all.py -y -c -d 10 pull|push YourPath")
print("python git_pull_all.py"
" --git_stash_if_have_uncommitted_changes "
"--continue_when_meet_git "
"--stop_when_meet_max_depth=10 pull|push YourPath")
print("=======")
Usage("-h")
sys.exit()
g_dirty_git_repo_dirs = []
g_git_update_thread_pools = []
for walk_path in g_walk_paths:
walk_and_update(g_git_cmd_type, walk_path, g_continue_when_meet_git, 0,
g_stop_when_meet_max_depth, g_git_stash_if_have_uncommitted_changes,
g_dirty_git_repo_dirs, g_git_update_thread_pools)
for git_update_thread in g_git_update_thread_pools:
git_update_thread.join(30)
if len(g_dirty_git_repo_dirs) != 0:
print('these repos have uncommitted changes or conflicts:\r\n')
for dirty_repo_dir in g_dirty_git_repo_dirs:
print('dir %s has uncommited changes or conflicts, please check\r\n' % (dirty_repo_dir))
print("Done git " + g_git_cmd_type.name + " all")
except getopt.error as msg:
raise Usage(msg)
except Usage as err:
print >> sys.stderr, err.msg
print >> sys.stderr, "for help use --help"
return 2
if __name__ == "__main__":
sys.exit(main())
| 7,184 | 2,277 |
#!/usr/bin/env python3
"""
Take a timestamp like:
25/11/2016 23:05:03
Convert it to:
25 November 2016, 13:05 PST
25 November 2016, 16:05 EST
25 November 2016, 21:05 GMT
25 November 2016, 21:05 UTC
25 November 2016, 23:05 EET
26 November 2016, 02:35 IST
26 November 2016, 05:05 CST
26 November 2016, 06:05 JST
26 November 2016, 08:05 AEDT
"""
import argparse
import pytz # pip install pytz
from dateutil.parser import parse # pip install python-dateutil
def utc_to_local(utc_dt, local_tz):
local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_tz.normalize(local_dt) # .normalize might be unnecessary
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert a timestamp into eight others.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("timestamp", help="Input timestamp")
args = parser.parse_args()
# print(args.timestamp)
indate = parse(args.timestamp, dayfirst=True, yearfirst=False)
local_tz = pytz.timezone("Europe/Helsinki")
# print(indate, local_tz)
localdt = local_tz.localize(indate)
us_pacific = pytz.timezone("US/Pacific")
us_eastern = pytz.timezone("US/Eastern")
london = pytz.timezone("Europe/London")
india = pytz.timezone("Asia/Calcutta")
china = pytz.timezone("Asia/Shanghai")
japan = pytz.timezone("Asia/Tokyo")
sydney = pytz.timezone("Australia/Sydney")
for tz in [
us_pacific,
us_eastern,
london,
pytz.UTC,
local_tz,
india,
china,
japan,
sydney,
]:
timezone_name = tz.localize(indate).tzname()
local_date = localdt.astimezone(tz).strftime("%d %B %Y, %H:%M")
print(f"{local_date} {timezone_name}")
# x = tz.localize(indate)
# print("{} ({})".format(localdt.astimezone(tz), x.tzname()))
# print()
# End of file
| 1,943 | 793 |
from priv_tube.database.models.system_flags import SystemFlags as Model
from priv_tube.database import db
class SystemFlags:
"""
Repository for interacting with the `system_flags` database table responsible for system-wide toggles.
"""
@staticmethod
def is_enabled(setting_name: str) -> bool:
flag: Model = Model.query.filter_by(flag_name=setting_name).first()
return bool(flag.value)
@staticmethod
def enable(setting_name: str):
model: Model = Model.query.filter_by(flag_name=setting_name).first()
model.value = True
db.session.commit()
@staticmethod
def disable(setting_name: str):
model: Model = Model.query.filter_by(flag_name=setting_name).first()
model.value = False
db.session.commit()
| 799 | 235 |
"""
{
'vNID': {
'anyType': [
'0105558096348'
]
},
'vtin': None,
'vtitleName': {
'anyType': [
'บริษัท'
]
},
'vName': {
'anyType': [
'โฟลว์แอคเคาท์ จำกัด'
]
},
'vSurname': {
'anyType': [
'-'
]
},
'vBranchTitleName': {
'anyType': [
'บริษัท'
]
},
'vBranchName': {
'anyType': [
'โฟลว์แอคเคาท์ จำกัด'
]
},
'vBranchNumber': {
'anyType': [
0
]
},
'vBuildingName': {
'anyType': [
'ชุดสกุลไทย สุรวงศ์ ทาวเวอร์'
]
},
'vFloorNumber': {
'anyType': [
'11'
]
},
'vVillageName': {
'anyType': [
'-'
]
},
'vRoomNumber': {
'anyType': [
'12B'
]
},
'vHouseNumber': {
'anyType': [
'141/12'
]
},
'vMooNumber': {
'anyType': [
'-'
]
},
'vSoiName': {
'anyType': [
'-'
]
},
'vStreetName': {
'anyType': [
'สุรวงศ์'
]
},
'vThambol': {
'anyType': [
'สุริยวงศ์'
]
},
'vAmphur': {
'anyType': [
'บางรัก'
]
},
'vProvince': {
'anyType': [
'กรุงเทพมหานคร'
]
},
'vPostCode': {
'anyType': [
'10500'
]
},
'vBusinessFirstDate': {
'anyType': [
'2016/04/07'
]
},
'vmsgerr': None
}
"""
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import stringcase as stringcase
from loguru import logger
def unnest(soap_data: Dict[str, Optional[Dict[str, List[Union[str, int]]]]], nonull:bool=True):
# drop none
if nonull:
notnull = {k: v for k, v in soap_data.items() if v}
# anytype flatten
flatten = {}
for k, v in notnull.items():
if k.startswith('v'):
k = k[1:]
k = stringcase.snakecase(k)
try:
flatten[k] = v['anyType'][0]
if len(v['anyType']) > 1:
logger.info(
"please let dev. know this case exists. by creating an issue on https://github.com/CircleOnCircles/borisat/issues.")
except Exception as e:
logger.exception("unseen format")
return flatten
def get_error(unnested: Dict[str, Any]) -> Optional[str]:
""" get error if any"""
if error_message := unnested.get('msgerr'):
return error_message
else:
return False
| 2,899 | 1,147 |
#!/usr/bin/env python
import os
import sys
import urllib2
import zipfile
import numpy as np
import pandas as pd
from openpyxl import load_workbook
from misc import intersect_index
def main():
# Read URLs to GDSC datasets
urls_file = sys.argv[1]
urls = []
with open(urls_file) as f:
for line in f:
if line.startswith('http://') or line.startswith('https://'):
urls.append(line[:-1])
# Create data folder
directory = '%s/data' % os.getcwd()
if not os.path.exists(directory):
os.makedirs(directory)
# Download datasets
for url in urls:
print 'Downloading %s' % url
local_fn = os.path.join(directory, os.path.basename(url))
remote_file = urllib2.urlopen(url)
with open(local_fn, 'wb') as local_file:
local_file.write(remote_file.read())
remote_file.close()
if local_fn.endswith('.zip'):
with zipfile.ZipFile(local_fn, 'r') as zip_ref:
zip_ref.extractall(directory)
print 'Preprocessing the GDSC dataset...'
# Read Gene expression dataset
GEX_file = '%s/Cell_line_RMA_proc_basalExp.txt' % directory
GEX = pd.read_csv(GEX_file, sep='\t')
GEX_gene_symbols = np.array(GEX['GENE_SYMBOLS'], dtype='str')
GEX = GEX.drop(['GENE_SYMBOLS', 'GENE_title'], axis=1)
GEX_cell_ids = np.array(GEX.columns, dtype='str')
for i, cell_id in enumerate(GEX_cell_ids):
GEX_cell_ids[i] = cell_id[5:]
GEX = np.array(GEX.values, dtype=np.float).T
# Read Exome sequencing dataset
WES_file = '%s/CellLines_CG_BEMs/PANCAN_SEQ_BEM.txt' % directory
WES = pd.read_csv(WES_file, sep='\t')
WES_CG = np.array(WES['CG'], dtype='str')
WES = WES.drop(['CG'], axis=1)
WES_cell_ids = np.array(WES.columns, dtype='str')
WES = np.array(WES.values, dtype=np.int).T
# Read Copy number dataset
CNV_file = '%s/CellLine_CNV_BEMs/PANCAN_CNA_BEM.rdata.txt' % directory
CNV = pd.read_csv(CNV_file, sep='\t')
CNV_cell_ids = np.array(CNV['Unnamed: 0'], dtype='str')
CNV = CNV.drop(['Unnamed: 0'], axis=1)
CNV_cna = np.array(CNV.columns, dtype='str')
CNV = np.array(CNV.values, dtype=int)
# Read Methylation dataset
MET_file = '%s/METH_CELLLINES_BEMs/PANCAN.txt' % directory
MET = pd.read_csv(MET_file, sep='\t')
MET_met = np.array(MET['Unnamed: 0'], dtype='str')
MET = MET.drop(['Unnamed: 0'], axis=1)
MET_cell_ids = np.array(MET.columns, dtype='str')
MET = np.array(MET.values, dtype=int).T
# Read LOG_IC50 dataset
IC50_file = '%s/TableS4A.xlsx' % directory
wb = load_workbook(filename=IC50_file)
sheet = wb['TableS4A-IC50s']
IC50_cell_ids, IC50_cell_names = [], []
for i in range(7, 997):
IC50_cell_ids.append('%s' % sheet['A%s' % i].value)
IC50_cell_names.append(('%s' % sheet['B%s' % i].value).strip())
IC50_cell_ids = np.array(IC50_cell_ids, dtype='str')
IC50_cell_names = np.array(IC50_cell_names, dtype='str')
IC50_drug_ids, IC50_drug_names = [], []
for i, (cell_row5, cell_row6) in enumerate(zip(sheet[5], sheet[6])):
if i > 1:
IC50_drug_ids.append('%s' % cell_row5.value)
IC50_drug_names.append(('%s' % cell_row6.value).strip())
IC50_drug_ids = np.array(IC50_drug_ids, dtype='str')
IC50_drug_names = np.array(IC50_drug_names, dtype='str')
IC50 = np.ones([IC50_cell_ids.shape[0], IC50_drug_ids.shape[0]]) * np.nan
for i in range(7, 997):
for j, cell in enumerate(sheet[i]):
if j > 1:
if cell.value != 'NA':
IC50[i - 7, j - 2] = cell.value
# Read LOG_IC50 Threshold
threshold_file = '%s/TableS5C.xlsx' % directory
wb = load_workbook(filename=threshold_file)
sheet = wb['Table-S5C binaryIC50s']
threshold = []
for i, cell in enumerate(sheet[7]):
if i > 1:
threshold.append(cell.value)
threshold = np.array(threshold)
drug_ids_file = '%s/TableS1F.xlsx' % directory
wb = load_workbook(filename=drug_ids_file)
sheet = wb['TableS1F_ScreenedCompounds']
threshold_drug_ids = []
for i in range(4, 269):
threshold_drug_ids.append('%s' % sheet['B%s'%i].value)
threshold_drug_ids = np.array(threshold_drug_ids)
# Normalize IC50 by the threshold
merged = intersect_index(IC50_drug_ids, threshold_drug_ids)
IC50_keep_index = np.array(merged['index1'].values, dtype=np.int)
IC50_drug_ids = IC50_drug_ids[IC50_keep_index]
IC50 = IC50[:, IC50_keep_index]
threshold_keep_index = np.array(merged['index2'].values, dtype=np.int)
threshold_drug_ids = threshold_drug_ids[threshold_keep_index]
threshold = threshold[threshold_keep_index]
IC50_norm = - (IC50 - threshold)
IC50_norm_min = np.min(IC50_norm[~np.isnan(IC50_norm)])
IC50_norm = IC50_norm - IC50_norm_min
# Save the GEX features and normalized IC50 dataset
merged = intersect_index(GEX_cell_ids, IC50_cell_ids)
GEX_keep_index = np.array(merged['index1'].values, dtype=np.int)
IC50_keep_index = np.array(merged['index2'].values, dtype=np.int)
GEX = GEX[GEX_keep_index]
GEX_cell_ids = GEX_cell_ids[GEX_keep_index]
GEX_cell_names = IC50_cell_names[IC50_keep_index]
IC50 = IC50_norm[IC50_keep_index]
np.savez('%s/GDSC_GEX.npz' % directory, X=GEX, Y=IC50, cell_ids=GEX_cell_ids, cell_names=GEX_cell_names,
drug_ids=IC50_drug_ids, drug_names=IC50_drug_names, GEX_gene_symbols=GEX_gene_symbols)
print 'Gene expression (GEX) dataset: {} cell lines, {} features, {} drugs'.format(GEX.shape[0], GEX.shape[1], IC50.shape[1])
# Save the WES features and normalized IC50 dataset
merged = intersect_index(WES_cell_ids, IC50_cell_ids)
WES_keep_index = np.array(merged['index1'].values, dtype=np.int)
IC50_keep_index = np.array(merged['index2'].values, dtype=np.int)
WES = WES[WES_keep_index]
WES_cell_ids = WES_cell_ids[WES_keep_index]
WES_cell_names = IC50_cell_names[IC50_keep_index]
IC50 = IC50_norm[IC50_keep_index]
np.savez('%s/GDSC_WES.npz' % directory, X=WES, Y=IC50, cell_ids=WES_cell_ids, cell_names=WES_cell_names,
drug_ids=IC50_drug_ids, drug_names=IC50_drug_names, WES_CG=WES_CG)
print 'Whole-exome sequencing (WES) dataset: {} cell lines, {} features, {} drugs'.format(WES.shape[0], WES.shape[1], IC50.shape[1])
# Save the CNV features and normalized IC50 dataset
merged = intersect_index(CNV_cell_ids, IC50_cell_ids)
CNV_keep_index = np.array(merged['index1'].values, dtype=np.int)
IC50_keep_index = np.array(merged['index2'].values, dtype=np.int)
CNV = CNV[CNV_keep_index]
CNV_cell_ids = CNV_cell_ids[CNV_keep_index]
CNV_cell_names = IC50_cell_names[IC50_keep_index]
IC50 = IC50_norm[IC50_keep_index]
np.savez('%s/GDSC_CNV.npz' % directory, X=CNV, Y=IC50, cell_ids=CNV_cell_ids, cell_names=CNV_cell_names,
drug_ids=IC50_drug_ids, drug_names=IC50_drug_names, CNV_cna=CNV_cna)
print 'Copy number variation (CNV) dataset: {} cell lines, {} features, {} drugs'.format(CNV.shape[0], CNV.shape[1], IC50.shape[1])
# Save the MET features and normalized IC50 dataset
merged = intersect_index(MET_cell_ids, IC50_cell_ids)
MET_keep_index = np.array(merged['index1'].values, dtype=np.int)
IC50_keep_index = np.array(merged['index2'].values, dtype=np.int)
MET = MET[MET_keep_index]
MET_cell_ids = MET_cell_ids[MET_keep_index]
MET_cell_names = IC50_cell_names[IC50_keep_index]
IC50 = IC50_norm[IC50_keep_index]
np.savez('%s/GDSC_MET.npz' % directory, X=MET, Y=IC50, cell_ids=MET_cell_ids, cell_names=MET_cell_names,
drug_ids=IC50_drug_ids, drug_names=IC50_drug_names, MET_met=MET_met)
print 'Methylation (MET) dataset: {} cell lines, {} features, {} drugs'.format(MET.shape[0], MET.shape[1], IC50.shape[1])
print 'Finished.'
if __name__ == '__main__':
main()
| 7,969 | 3,292 |
# -- Imports --
from datetime import datetime
from colorama import Back
from rich.console import Console
from .colors import get_bright_color, get_color
# -- Mappings --
log_color_mapping = {
"error": get_bright_color("RED"),
"warning": get_bright_color("YELLOW"),
"message": get_color("CYAN"),
"success": get_bright_color("GREEN"),
"info": get_bright_color("MAGENTA"),
"critical": get_bright_color("RED") + Back.YELLOW,
"flash": get_bright_color("BLUE"),
}
log_mapping = {
"error": f"[{log_color_mapping['error']}%{get_color('RESET')}]",
"warning": f"[{log_color_mapping['warning']}!{get_color('RESET')}]",
"message": f"[{log_color_mapping['message']}>{get_color('RESET')}]",
"success": f"[{log_color_mapping['success']}+{get_color('RESET')}]",
"info": f"[{log_color_mapping['info']}#{get_color('RESET')}]",
"critical": f"[{log_color_mapping['critical']}X{get_color('RESET')}{Back.RESET}]",
"flash": f"[{log_color_mapping['flash']}-{get_color('RESET')}]",
}
class Logger:
def __init__(self):
self._console = Console()
@staticmethod
def _append_date(message: str) -> str:
timestamp = datetime.now()
timestamp = (
f"{get_bright_color('CYAN')}"
f"{timestamp.hour}:{timestamp.minute}:{timestamp.second}"
f"{get_bright_color('RESET')}"
)
return f"[{timestamp}]{message}"
def error(self, message: str, date: bool = True) -> None:
log_type = "error"
message_prefix = log_mapping[log_type]
message = f"{message_prefix} {log_color_mapping[log_type]}{message}"
if date:
message = self._append_date(message)
print(message)
def warning(self, message: str, date: bool = True) -> None:
log_type = "warning"
message_prefix = log_mapping[log_type]
message = f"{message_prefix} {log_color_mapping[log_type]}{message}"
if date:
message = self._append_date(message)
print(message)
def message(self, username: str, message: str, date: bool = True, **kwargs) -> None:
log_type = "message"
message_prefix = log_mapping[log_type]
message_pre = f"{get_bright_color('YELLOW')} {username}{get_color('RESET')} {message_prefix} "
if date:
message_pre = self._append_date(message_pre)
print(message_pre, end="")
self._console.print(message, **kwargs)
def success(self, message: str, date: bool = True) -> None:
log_type = "success"
message_prefix = log_mapping[log_type]
message = f"{message_prefix} {log_color_mapping[log_type]}{message}"
if date:
message = self._append_date(message)
print(message)
def info(self, message: str, date: bool = True) -> None:
log_type = "info"
message_prefix = log_mapping[log_type]
message = f"{message_prefix} {log_color_mapping[log_type]}{message}"
if date:
message = self._append_date(message)
print(message)
def critical(self, message: str, date: bool = True) -> None:
log_type = "critical"
message_prefix = log_mapping[log_type]
message = f"{message_prefix} {log_color_mapping[log_type]}{message}"
if date:
message = self._append_date(message)
print(message)
def flash(self, message: str, date: bool = True) -> None:
log_type = "flash"
message_prefix = log_mapping[log_type]
message = f"{message_prefix} {log_color_mapping[log_type]}{message}"
if date:
message = self._append_date(message)
print(message)
| 3,696 | 1,216 |
from munch import Munch
from .utils import import_class
from .serialization import YAMLMixin
from .micro_task import import_class
class DataConnector(YAMLMixin):
"""Encapsulates a data handler along with zero or more micro tasks.
With the earliest access to the underlying data (through the `values` property), the class will first execute the
micro tasks in the same order that they are defined in, and then stores the augmented volatile data internally.
Tasks that attempt to access the data through this connector will receive the augmented data, while the original
data handler remains intact.
"""
def __init__(self, plainDataConnector, dataHandler):
self.dataHandler = dataHandler
self.augmentedVolatileData = None
self.microTasks = [import_class(plainMicroTask.spec)(plainMicroTask)
for plainMicroTask in plainDataConnector.get('microTasks', Munch({}))]
@property
def values(self):
if self.augmentedVolatileData is None:
self.augmentedVolatileData = self.dataHandler.values # Data handler will read from the source.
for microTask in self.microTasks:
self.augmentedVolatileData = microTask.execute(self.augmentedVolatileData)
return self.augmentedVolatileData
@values.setter
def values(self, newValue):
self.dataHandler.values = newValue # Data handler will write back to the source.
def toDict(self):
return {} # ToDo
| 1,506 | 402 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-11-26 16:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gallery', '0010_auto_20161126_1543'),
]
operations = [
migrations.AlterField(
model_name='photograph',
name='copyright',
field=models.TextField(blank=True, help_text='Leave blank for default'),
),
migrations.AlterField(
model_name='photograph',
name='credit',
field=models.TextField(blank=True, help_text='Leave blank for default'),
),
]
| 682 | 227 |
from rlbot.agents.base_agent import SimpleControllerState
from mechanic.base_mechanic import BaseMechanic
from mechanic.drive_arrive_in_time import DriveArriveInTime
from skeleton.util.structure import Player
from util.linear_algebra import norm
from util.path_finder import find_fastest_path, first_target, optional_boost_target
class DriveNavigateBoost(BaseMechanic):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mechanic = DriveArriveInTime(self.agent, rendering_enabled=self.rendering_enabled)
def step(self, car: Player, boost_pads, target_loc, target_dt=0) -> SimpleControllerState:
# path = find_fastest_path(boost_pads, car.location, target_loc, car.velocity, car.boost)
# target = first_target(boost_pads, target_loc, path)
target = optional_boost_target(boost_pads, car.location, target_loc, car.velocity, car.boost)
time = target_dt if (target == target_loc).all() else 0
# updating status
if norm(car.location - target_loc) < 25 and abs(target_dt) < 0.05:
self.finished = True
else:
self.finished = False
return self.mechanic.step(car, target, time)
| 1,213 | 383 |
"""
Created on Wed Apr 3 13:07:18 2019
Author:
Corey R. Randall
Summary:
If the user wishes to solve the Dual Problem over the Primal one, this
function provides support to appropriately convert the problem into its
alternate form.
"""
""" Import needed modules """
"-----------------------------------------------------------------------------"
import numpy as np
""" Function definition """
"-----------------------------------------------------------------------------"
def dual_problem(user_inputs, conversion):
# Extract dictionary for readibility:
A = conversion['A']
b = conversion['b']
c_coeff = conversion['c_coeff']
n = conversion['n']
m = conversion['m']
n_slack = conversion['n_slack']
# Convert A, c_coeff to allow for unrestricted y (i.e. y = y' - y''):
A_temp = np.repeat(A.T, 2)
A_temp[1::2] = -A_temp[1::2]
A = np.reshape(A_temp, [A.shape[1], 2*A.shape[0]])
A = np.hstack([A, np.identity(A.shape[0])])
b_temp = np.repeat(b.T, 2) # the obj. coeff. in (D) are b from (P)
b_temp[1::2] = -b_temp[1::2]
b = np.reshape(b_temp, [b.shape[1], 2*b.shape[0]])
b = np.hstack([b, np.zeros([1, A.shape[0]])])
# Ensure no negative values on RHS:
for i in range(c_coeff.shape[1]):
if c_coeff[0,i] < 0: # the RHS, b values, in (D) are c from (P)
A[i,:] = -A[i,:]
c_coeff[0,i] = -c_coeff[0,i]
# Generate dictionary for outputs:
dual_conversion = {}
dual_conversion['A'] = A
dual_conversion['b'] = c_coeff.T
dual_conversion['c_coeff'] = -b
dual_conversion['n'] = 2*m
dual_conversion['m'] = n +n_slack
dual_conversion['n_slack'] = n +n_slack
dual_conversion['n_prim'] = n
dual_conversion['n_slack_prim'] = n_slack
return dual_conversion
| 1,907 | 746 |
from api import my_api
from service.federated.client import Client
from utils.common_utils import Common
from flask import request
@my_api.route('/')
def index():
return '<h1>Hello, this is client!</h1>'
@my_api.route("/federated_train_size", methods=["GET", "POST"])
def federated_train_size():
return Client.get_federated_train_size()
@my_api.route("/federated_train", methods=["GET", "POST"])
def federated_train():
# receive the server training epoch and initial or federated averaging model
pickled_server_epoch = request.files["server_epoch"].read()
pickled_server_model_params = request.files["server_model_params"].read()
server_epoch = Common.get_object_by_pickle_bytes_func(pickled_server_epoch)
server_model_params = Common.get_object_by_pickle_bytes_func(pickled_server_model_params)
# return the local model after training of current client to server
return Client.train(server_model_params=server_model_params, epoch=server_epoch)
@my_api.route("/federated_test", methods=["GET", "POST"])
def federated_test():
# receive the final best model from server and do the evaluating
pickled_best_model_params = request.files["best_model_params"].read()
best_model_params = Common.get_object_by_pickle_bytes_func(pickled_best_model_params)
return Client.test(test_model_params=best_model_params, mode="test")
@my_api.route("/federated_detect", methods=["GET", "POST"])
def federated_detect():
# receive the final best model from server and do the evaluating
pickled_best_model_params = request.files["best_model_params"].read()
best_model_params = Common.get_object_by_pickle_bytes_func(pickled_best_model_params)
return Client.detect(detect_model_params=best_model_params)
| 1,764 | 565 |
from mock import Mock
from tracim_backend.app_models.applications import Application
from tracim_backend.app_models.contents import content_status_list
from tracim_backend.app_models.workspace_menu_entries import all_content_menu_entry
from tracim_backend.app_models.workspace_menu_entries import dashboard_menu_entry
from tracim_backend.lib.core.application import ApplicationApi
from tracim_backend.models.roles import WorkspaceRoles
from tracim_backend.tests import DefaultTest
class TestApplicationApi(DefaultTest):
def test_get_default_workspace_menu_entry__ok__nominal_case(self):
"""
Show only enabled app
"""
app_config = Mock()
app_config.APPS_COLORS = {}
app_config.APPS_COLORS["primary"] = "#fff"
thread = Application(
label="Threads",
slug="contents/thread",
fa_icon="comments-o",
is_active=True,
config={},
main_route="/ui/workspaces/{workspace_id}/contents?type=thread",
app_config=app_config,
)
thread.add_content_type(
slug="thread",
label="Thread",
creation_label="Start a topic",
available_statuses=content_status_list.get_all(),
file_extension=".thread.html",
)
markdownpluspage = Application(
label="Markdown Plus Documents",
# TODO - G.M - 24-05-2018 - Check label
slug="contents/markdownpluspage",
fa_icon="file-code-o",
is_active=False,
config={},
main_route="/ui/workspaces/{workspace_id}/contents?type=markdownpluspage",
app_config=app_config,
)
markdownpluspage.add_content_type(
slug="markdownpage",
label="Rich Markdown File",
creation_label="Create a Markdown document",
available_statuses=content_status_list.get_all(),
)
app_api = ApplicationApi(app_list=[thread, markdownpluspage], show_all=False)
workspace = Mock()
workspace.workspace_id = 12
workspace.agenda_enabled = True
default_workspace_menu_entry = app_api.get_default_workspace_menu_entry(workspace=workspace)
assert len(default_workspace_menu_entry) == 3
assert default_workspace_menu_entry[0].label == dashboard_menu_entry.label
assert default_workspace_menu_entry[1].label == all_content_menu_entry.label
assert default_workspace_menu_entry[2].label == thread.label
def test_get_default_workspace_menu_entry__ok__folder_case(self):
"""
main route for folder is empty, that why it should not be included
in default_menu entry
:return:
"""
app_config = Mock()
app_config.APPS_COLORS = {}
app_config.APPS_COLORS["primary"] = "#fff"
folder = Application(
label="Folder",
slug="contents/folder",
fa_icon="folder-o",
is_active=True,
config={},
main_route="",
app_config=app_config,
)
folder.add_content_type(
slug="folder",
label="Folder",
creation_label="Create a folder",
available_statuses=content_status_list.get_all(),
allow_sub_content=True,
minimal_role_content_creation=WorkspaceRoles.CONTENT_MANAGER,
)
app_api = ApplicationApi(app_list=[folder], show_all=False)
workspace = Mock()
workspace.workspace_id = 12
workspace.agenda_enabled = True
default_workspace_menu_entry = app_api.get_default_workspace_menu_entry(workspace=workspace)
assert len(default_workspace_menu_entry) == 2
assert default_workspace_menu_entry[0].label == dashboard_menu_entry.label
assert default_workspace_menu_entry[1].label == all_content_menu_entry.label
def test_get_default_workspace_menu_entry__ok__agenda_enabled_workspace_case(self):
app_config = Mock()
app_config.APPS_COLORS = {}
app_config.APPS_COLORS["primary"] = "#fff"
agenda = Application(
label="Agenda",
slug="agenda",
fa_icon="calendar",
is_active=True,
config={},
main_route="/ui/workspaces/{workspace_id}/agenda",
app_config=app_config,
)
app_api = ApplicationApi(app_list=[agenda], show_all=False)
workspace = Mock()
workspace.workspace_id = 12
workspace.agenda_enabled = True
default_workspace_menu_entry = app_api.get_default_workspace_menu_entry(workspace=workspace)
assert len(default_workspace_menu_entry) == 3
assert default_workspace_menu_entry[0].label == dashboard_menu_entry.label
assert default_workspace_menu_entry[1].label == all_content_menu_entry.label
assert default_workspace_menu_entry[2].label == agenda.label
def test_get_default_workspace_menu_entry__ok__agenda_disabled_workspace_case(self):
app_config = Mock()
app_config.APPS_COLORS = {}
app_config.APPS_COLORS["primary"] = "#fff"
agenda = Application(
label="Agenda",
slug="agenda",
fa_icon="calendar",
is_active=True,
config={},
main_route="/ui/workspaces/{workspace_id}/agenda",
app_config=app_config,
)
app_api = ApplicationApi(app_list=[agenda], show_all=False)
workspace = Mock()
workspace.workspace_id = 12
workspace.agenda_enabled = False
default_workspace_menu_entry = app_api.get_default_workspace_menu_entry(workspace=workspace)
assert len(default_workspace_menu_entry) == 2
assert default_workspace_menu_entry[0].label == dashboard_menu_entry.label
assert default_workspace_menu_entry[1].label == all_content_menu_entry.label
| 5,954 | 1,729 |
from kivy.event import EventDispatcher
from kivy.metrics import dp
from kivy.properties import ListProperty, StringProperty
from kivymd.uix.card import MDCard
from kivy.lang import Builder
__all__ = "M_CardLoader"
Builder.load_string(
"""
# kv_start
<M_CardLoader>:
md_bg_color: 0, 0, 0, 0
radius: [dp(10), ]
ripple_behavior: True
RelativeLayout:
AsyncImage:
id: image
color: 0,0,0,0
source: root.source
anim_delay: .1
allow_stretch: True
keep_ratio: False
nocache: True
on_load:
root.dispatch("on_load")
canvas.before:
StencilPush
RoundedRectangle:
pos: self.pos
size: self.size
radius: root.radius
StencilUse
canvas.after:
StencilUnUse
RoundedRectangle:
size: self.size
pos: self.pos
radius: root.radius
StencilPop
M_AKImageLoader:
id: loader
radius: root.radius
circle: False
MDBoxLayout:
id:box
opacity: 0
padding: dp(10)
adaptive_height: True
md_bg_color: 0, 0, 0, .6
radius: [0, 0, root.radius[0], root.radius[0]]
M_AKLabelLoader:
text: root.text
radius: root.text_radius
size_hint_y: None
theme_text_color: "Custom"
text_color: root.text_color
height: dp(20) if not self.text else self.texture_size[1]
font_style: "Money"
font_size: dp(16)
halign:"center"
# kv_end
"""
)
class M_CardLoader(MDCard):
text = StringProperty("")
text_radius = ListProperty([dp(5), ])
text_color = ListProperty([1, 1, 1, 1])
source = StringProperty("")
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.register_event_type("on_load")
def on_load(self):
self.ids.loader.opacity = 0
self.ids.image.color = [1, 1, 1, 1]
def on_touch_down(self, touch):
self.root.pause_clock()
def on_touch_up(self, touch):
timer = touch.time_end - touch.time_start
if timer < 0.2:
self.root.ids.raw.switch_tab("feeds")
self.root.resume_clock()
def on_release(self):
self.root.ids.feeds.dispatch("on_tab_release")
| 2,593 | 804 |
from setuptools import find_packages, setup # type: ignore
def readme():
with open('./README.md', encoding='utf-8') as f:
content = f.read()
return content
setup(
name='pre_commit_hooks',
version='0.1.0',
description='A pre-commit hook for OpenTAI projects',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/OpenTAI/pre-commit-hooks',
author='OpenTAI Team',
author_email='',
packages=find_packages(),
python_requires='>=3.6',
install_requires=['PyYAML'],
entry_points={
'console_scripts': [
'say-hello=pre_commit_hooks.say_hello:main',
'check-copyright=pre_commit_hooks.check_copyright:main',
],
},
)
| 763 | 256 |
import random
import numpy as np
def unif_range(a, b):
return random.random() * (b - a) + a
def rand_elem(xs):
return xs[random.randrange(len(xs))]
def rand_int_linspace(start, stop, num = 50):
return rand_elem([int(x) for x in np.linspace(start, stop, num)])
def mujoco():
return dict(
nsteps=2048,
nminibatches=32,
lam=0.95,
gamma=0.99,
noptepochs=10,
log_interval=1,
ent_coef=0.0,
lr=lambda f: 3e-4 * f,
cliprange=0.2,
value_network='copy'
)
def atari():
return dict(
nsteps=128, nminibatches=4,
lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
ent_coef=.01,
lr=lambda f : f * 2.5e-4,
cliprange=0.1,
)
def retro():
return atari()
def car_retrieval_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
#nminibatches = rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256]),
nminibatches = 1, # for lstm
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
'''
# best params for car retrieval bench
def car_retrieval_train():
lr = 0.002
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 128,
ent_coef = 0.01,
noptepochs = 33,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)
def car_retrieval_train1():
lr = 0.002
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 128,
ent_coef = 0.01,
noptepochs = 33,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)
def car_retrieval_train2():
lr = 0.002
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 128,
ent_coef = 0.01,
noptepochs = 33,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)
def car_retrieval_train3():
lr = 0.002
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 128,
ent_coef = 0.01,
noptepochs = 33,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)
def car_retrieval_train4():
lr = 0.002
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 128,
ent_coef = 0.01,
noptepochs = 33,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)
def car_retrieval_train5():
lr = 0.002
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 128,
ent_coef = 0.01,
noptepochs = 33,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)'''
def pendulum_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
#nminibatches = rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]),
nminibatches = 1, #for lstm
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
'''
# best version for pendulum
def pendulum_train():
lr = 0.0003
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 1,
ent_coef = 0.01,
noptepochs = 28,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)'''
def mountain_car_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 1, #rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]),
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
def quad_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
#nminibatches = rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256]),
nminibatches=1, # for lstm
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
def quad_r_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 1, #rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256]),
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
def acrobot_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 1, #rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]),
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
def cartpole_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 1, #rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]),
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
) | 6,037 | 2,906 |
#
# PySNMP MIB module DLINK-3100-JUMBOFRAMES-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DLINK-3100-JUMBOFRAMES-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:48:33 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint")
rnd, = mibBuilder.importSymbols("DLINK-3100-MIB", "rnd")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
IpAddress, Bits, ModuleIdentity, iso, NotificationType, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, TimeTicks, ObjectIdentity, Counter64, Counter32, Gauge32, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "Bits", "ModuleIdentity", "iso", "NotificationType", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "TimeTicks", "ObjectIdentity", "Counter64", "Counter32", "Gauge32", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
rlJumboFrames = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 91))
rlJumboFrames.setRevisions(('2007-01-02 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: rlJumboFrames.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts: rlJumboFrames.setLastUpdated('200701020000Z')
if mibBuilder.loadTexts: rlJumboFrames.setOrganization('Dlink, Inc. Dlink Semiconductor, Inc.')
if mibBuilder.loadTexts: rlJumboFrames.setContactInfo('www.dlink.com')
if mibBuilder.loadTexts: rlJumboFrames.setDescription('This private MIB module defines Jumbo Frames private MIBs.')
rlJumboFramesCurrentStatus = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 91, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlJumboFramesCurrentStatus.setStatus('current')
if mibBuilder.loadTexts: rlJumboFramesCurrentStatus.setDescription('Show the current Jumbo Frames status')
rlJumboFramesStatusAfterReset = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 91, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlJumboFramesStatusAfterReset.setStatus('current')
if mibBuilder.loadTexts: rlJumboFramesStatusAfterReset.setDescription('Set the Jumbo Frames status after reset')
mibBuilder.exportSymbols("DLINK-3100-JUMBOFRAMES-MIB", rlJumboFramesCurrentStatus=rlJumboFramesCurrentStatus, rlJumboFramesStatusAfterReset=rlJumboFramesStatusAfterReset, PYSNMP_MODULE_ID=rlJumboFrames, rlJumboFrames=rlJumboFrames)
| 3,339 | 1,328 |
def response(code=200, msg=None, data=None):
# 请求响应
msg = msg if msg else ""
data = data if data else ""
return {
"code": code,
"message": msg,
"data": data
}
| 204 | 75 |
__base_version__ = "1.0"
__post_version__ = "137"
__gitversion__ = "9e8e29af9b9a922eb114b2c716205d0772946e56"
| 110 | 71 |
import setting
import requests
import threading
import time
import sqlite3
from bs4 import BeautifulSoup
from urllib import parse
public_board = [["BBSMSTR_000000000059", "일반소식"], ["BBSMSTR_000000000060", "장학안내"], ["BBSMSTR_000000000055", "학사공지사항"]] # [boardId, 게시판 명칭]
db_conn = sqlite3.connect("NoticeBot.db", check_same_thread=False)
db_cur = db_conn.cursor()
db_cur.execute('SELECT * FROM sqlite_master WHERE type="table" AND name="final_ntt"') # 테이블 존재 여부 확인
r = db_cur.fetchall()
if r:
print("기존 데이터를 불러옵니다.")
else:
print("새로 데이터베이스를 구축합니다.")
db_conn.execute('CREATE TABLE final_ntt(boardId TEXT, final_nttId TEXT)')
for n in public_board:
db_conn.execute('INSERT INTO final_ntt VALUES ("' + n[0] + '", "1049241")') # 초기값 부여 시 검색 대상 게시판 중 하나의 게시글 하나를 적당히 선택하여 그 게시글의 nttId로 지정할 것. 제대로 지정하지 않으면 최초 구동 시 Many Request로 텔레그램 API 서버가 오류 발생시킴.
db_conn.commit()
def send_message(channel, message):
encode_message = parse.quote(message)
url = 'https://api.telegram.org/bot' + setting.bot_token + '/sendmessage?chat_id=' + channel + '&text=' + encode_message
response = requests.get(url)
if response.status_code != 200:
print("ERROR!!" + str(response.status_code))
def find_new_ntt(board_info):
try:
url = 'https://www.ut.ac.kr/cop/bbs/' + board_info[0] + '/selectBoardList.do'
response = requests.get(url)
if response.status_code == 200:
db_cur.execute("SELECT final_nttId FROM final_ntt WHERE boardId='" + board_info[0] + "'")
rows = db_cur.fetchall()
final = int(rows[0][0])
html = response.text
soup = BeautifulSoup(html, 'html.parser')
result_id = soup.findAll('input', {'name':'nttId', 'type':'hidden'})
r_n = soup.findAll('input', {'type':'submit'})
result_name = []
for n in r_n:
na = n.get('value')
if (na != "검색") & (na != "등록하기"): # 최상부 검색 버튼 및 최하부 페이지 만족도 조사 부분의 submit 버튼 예외 처리
result_name.append(na)
count = 0
result_name.reverse()
result_id.reverse()
for n in result_id:
i = int(n.get('value'))
if i == 0: # 최상부 검색 버튼 부분에 지정된 nttId 값 0에 대한 예외처리
break
if i <= final:
count += 1
continue
send_message(setting.all_notice_channel, "[" + board_info[1] + "] " + result_name[count] + " : http://www.ut.ac.kr/cop/bbs/" + board_info[0] + "/selectBoardArticle.do?nttId=" + str(i))
db_conn.execute("UPDATE final_ntt SET final_nttId='" + str(i) + "' WHERE boardId='" + board_info[0] + "'")
count += 1
db_conn.commit()
except:
now = time.localtime()
message = "EXCEPT!! " + board_info[1]
message += "%04d/%02d/%02d %02d:%02d:%02d" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
encode_message = parse.quote(message)
url = 'https://api.telegram.org/bot' + setting.bot_token + '/sendmessage?chat_id=' + setting.admin_channel + '&text=' + encode_message
response = requests.get(url)
if response.status_code != 200:
print("NETWORK ERROR!!" + str(response.status_code) + "\n" + message)
find_new_ntt(board_info)
def Bot_Start():
for c in public_board:
find_new_ntt(c)
threading.Timer(30, Bot_Start).start()
Bot_Start() | 3,509 | 1,518 |
from mozdns.address_record.models import AddressRecord
from mozdns.cname.models import CNAME
from mozdns.domain.models import Domain
from mozdns.mx.models import MX
from mozdns.nameserver.models import Nameserver
from mozdns.ptr.models import PTR
from mozdns.soa.models import SOA
from mozdns.srv.models import SRV
from mozdns.txt.models import TXT
from reversion.models import Version, Revision
def destroy():
Version.objects.all().delete()
Revision.objects.all().delete()
TXT.objects.all().delete()
SRV.objects.all().delete()
CNAME.objects.all().delete()
Nameserver.objects.all().delete()
PTR.objects.all().delete()
MX.objects.all().delete()
AddressRecord.objects.all().delete()
SOA.objects.all().delete()
Domain.objects.all().delete()
| 785 | 263 |
salario = float(input('Qual é salário do funcionário? R$'))
if salario <= 1250:
aumento = salario + (salario*15/100)
else:
aumento = salario + (salario * 10 / 100)
print('Quem ganhava R${} passa a ganhar R${:.2f} agora.'.format(salario, aumento))
| 255 | 109 |
# Generated by Django 2.2.5 on 2019-09-29 18:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0002_input_input2'),
]
operations = [
migrations.RemoveField(
model_name='input',
name='input2',
),
]
| 319 | 114 |
from .Bootstrap import Bootstrap
from .Init import Init
from .React import React
from .Reset import Reset
from .Vue import Vue
| 127 | 35 |
import ad3
import numpy as np
from pystruct.inference.common import _validate_params
class InferenceException(Exception):
pass
def inference_ad3_local(unary_potentials, pairwise_potentials, edges, relaxed=False,
verbose=0, return_energy=False, branch_and_bound=False,
inference_exception=None, return_marginals=False):
b_multi_type = isinstance(unary_potentials, list)
if b_multi_type:
res = ad3.general_graph(unary_potentials, edges, pairwise_potentials,
verbose=verbose, n_iterations=4000, exact=branch_and_bound)
else:
n_states, pairwise_potentials = \
_validate_params(unary_potentials, pairwise_potentials, edges)
unaries = unary_potentials.reshape(-1, n_states)
res = ad3.general_graph(unaries, edges, pairwise_potentials,
verbose=verbose, n_iterations=4000, exact=branch_and_bound)
unary_marginals, pairwise_marginals, energy, solver_status = res
if verbose:
print(solver_status)
if solver_status in ["fractional", "unsolved"] and relaxed:
if b_multi_type:
y = (unary_marginals, pairwise_marginals)
else:
unary_marginals = unary_marginals.reshape(unary_potentials.shape)
y = (unary_marginals, pairwise_marginals)
else:
if b_multi_type:
if inference_exception and solver_status in ["fractional", "unsolved"]:
raise InferenceException(solver_status)
ly = list()
_cum_n_states = 0
for unary_marg in unary_marginals:
ly.append(_cum_n_states + np.argmax(unary_marg, axis=-1))
_cum_n_states += unary_marg.shape[1]
y = np.hstack(ly)
else:
y = np.argmax(unary_marginals, axis=-1)
if return_energy:
return y, -energy
if return_marginals:
return y, unary_marginals
return y
| 2,041 | 669 |
#!/usr/bin/python3
from pyspark.sql import SparkSession
from haychecker.dhc.metrics import deduplication_approximated
spark = SparkSession.builder.appName("Deduplication_approximated_example").getOrCreate()
df = spark.read.format("csv").option("header", "true").load("examples/resources/employees.csv")
df.show()
r1, r2 = deduplication_approximated(["title", "city"], df)
print("Deduplication_approximated title: {}, deduplication_approximated city: {}".format(r1, r2))
task1 = deduplication_approximated(["title", "city"])
task2 = deduplication_approximated(["lastName"])
task3 = task1.add(task2)
result = task3.run(df)
r1, r2 = result[0]["scores"]
r3 = result[1]["scores"][0]
print("Deduplication_approximated title: {}, deduplication_approximated city: {}, "
"deduplication_approximated lastName: {}".format(r1, r2, r3)) | 839 | 301 |
# import keras
import keras
# import keras_retinanet
from keras_maskrcnn import models
from keras_maskrcnn.utils.visualization import draw_mask
from keras_retinanet.utils.visualization import draw_box, draw_caption, draw_annotations
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
from keras_retinanet.utils.colors import label_color
# import miscellaneous modules
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
import time
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from utils import binary_mask_to_rle
# set tf backend to allow memory to grow, instead of claiming everything
import tensorflow as tf
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
# use this environment flag to change which GPU to use
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# set the modified tf session as backend in keras
keras.backend.tensorflow_backend.set_session(get_session())
# adjust this to point to your downloaded/trained model
model_path = os.path.join('..', 'snapshots', 'resnet50_coco_v0.2.0.h5')
# load retinanet model
model = models.load_model(model_path, backbone_name='resnet50')
#print(model.summary())
# load label to names mapping for visualization purposes
labels_to_names = {0: 'aeroplane', 1: 'bicycle', 2: 'bird', 3: 'boat', 4: 'bottle', 5: 'bus', 6: 'car', 7: 'cat', 8: 'chair', 9: 'cow',
10: 'diningtable', 11: 'dog', 12: 'horse', 13: 'motorbike', 14: 'person', 15: 'pottedplant', 16: 'sheep', 17: 'sofa', 18: 'train', 19: 'tvmonitor'}
coco_dt = []
for imgid in range(100):
image = read_image_bgr("test_images/" + coco.loadImgs(ids=imgid)[0]['file_name'])
image = preprocess_image(image)
image, scale = resize_image(image)
outputs = model.predict_on_batch(np.expand_dims(image, axis=0))
boxes = outputs[-4][0]
scores = outputs[-3][0]
labels = outputs[-2][0]
masks = outputs[-1][0]
# correct for image scale
boxes /= scale
# visualize detections
for box, score, label, mask in zip(boxes, scores, labels, masks):
if score < 0.5:
break
pred = {}
pred['image_id'] = imgid # this imgid must be same as the key of test.json
pred['category_id'] = label
pred['segmentation'] = binary_mask_to_rle(mask) # save binary mask to RLE, e.g. 512x512 -> rle
pred['score'] = score
coco_dt.append(pred)
with open("submission.json", "w") as f:
json.dump(coco_dt, f) | 2,671 | 1,000 |
# Leo colorizer control file for lotos mode.
# This file is in the public domain.
# Properties for lotos mode.
properties = {
"commentEnd": "*)",
"commentStart": "(*",
"indentNextLines": "\\s*(let|library|process|specification|type|>>).*|\\s*(\\(|\\[\\]|\\[>|\\|\\||\\|\\|\\||\\|\\[.*\\]\\||\\[.*\\]\\s*->)\\s*",
}
# Attributes dict for lotos_main ruleset.
lotos_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "",
"highlight_digits": "false",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for lotos mode.
attributesDictDict = {
"lotos_main": lotos_main_attributes_dict,
}
# Keywords dict for lotos_main ruleset.
lotos_main_keywords_dict = {
"accept": "keyword1",
"actualizedby": "keyword1",
"any": "keyword1",
"basicnaturalnumber": "keyword2",
"basicnonemptystring": "keyword2",
"behavior": "keyword1",
"behaviour": "keyword1",
"bit": "keyword2",
"bitnatrepr": "keyword2",
"bitstring": "keyword2",
"bool": "keyword2",
"boolean": "keyword2",
"choice": "keyword1",
"decdigit": "keyword2",
"decnatrepr": "keyword2",
"decstring": "keyword2",
"element": "keyword2",
"endlib": "keyword1",
"endproc": "keyword1",
"endspec": "keyword1",
"endtype": "keyword1",
"eqns": "keyword1",
"exit": "keyword1",
"false": "literal1",
"fbool": "keyword2",
"fboolean": "keyword2",
"for": "keyword1",
"forall": "keyword1",
"formaleqns": "keyword1",
"formalopns": "keyword1",
"formalsorts": "keyword1",
"hexdigit": "keyword2",
"hexnatrepr": "keyword2",
"hexstring": "keyword2",
"hide": "keyword1",
"i": "keyword1",
"in": "keyword1",
"is": "keyword1",
"let": "keyword1",
"library": "keyword1",
"nat": "keyword2",
"natrepresentations": "keyword2",
"naturalnumber": "keyword2",
"noexit": "keyword1",
"nonemptystring": "keyword2",
"octdigit": "keyword2",
"octet": "keyword2",
"octetstring": "keyword2",
"octnatrepr": "keyword2",
"octstring": "keyword2",
"of": "keyword1",
"ofsort": "keyword1",
"opnnames": "keyword1",
"opns": "keyword1",
"par": "keyword1",
"process": "keyword1",
"renamedby": "keyword1",
"richernonemptystring": "keyword2",
"set": "keyword2",
"sortnames": "keyword1",
"sorts": "keyword1",
"specification": "keyword1",
"stop": "keyword1",
"string": "keyword2",
"string0": "keyword2",
"string1": "keyword2",
"true": "literal1",
"type": "keyword1",
"using": "keyword1",
"where": "keyword1",
}
# Dictionary of keywords dictionaries for lotos mode.
keywordsDictDict = {
"lotos_main": lotos_main_keywords_dict,
}
# Rules for lotos_main ruleset.
def lotos_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="(*", end="*)",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def lotos_rule1(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">>",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule2(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="[>",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule3(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="|||",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule4(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="||",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule5(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="|[",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule6(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="]|",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule7(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="[]",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule8(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for lotos_main ruleset.
rulesDict1 = {
"(": [lotos_rule0,],
"0": [lotos_rule8,],
"1": [lotos_rule8,],
"2": [lotos_rule8,],
"3": [lotos_rule8,],
"4": [lotos_rule8,],
"5": [lotos_rule8,],
"6": [lotos_rule8,],
"7": [lotos_rule8,],
"8": [lotos_rule8,],
"9": [lotos_rule8,],
">": [lotos_rule1,],
"@": [lotos_rule8,],
"A": [lotos_rule8,],
"B": [lotos_rule8,],
"C": [lotos_rule8,],
"D": [lotos_rule8,],
"E": [lotos_rule8,],
"F": [lotos_rule8,],
"G": [lotos_rule8,],
"H": [lotos_rule8,],
"I": [lotos_rule8,],
"J": [lotos_rule8,],
"K": [lotos_rule8,],
"L": [lotos_rule8,],
"M": [lotos_rule8,],
"N": [lotos_rule8,],
"O": [lotos_rule8,],
"P": [lotos_rule8,],
"Q": [lotos_rule8,],
"R": [lotos_rule8,],
"S": [lotos_rule8,],
"T": [lotos_rule8,],
"U": [lotos_rule8,],
"V": [lotos_rule8,],
"W": [lotos_rule8,],
"X": [lotos_rule8,],
"Y": [lotos_rule8,],
"Z": [lotos_rule8,],
"[": [lotos_rule2,lotos_rule7,],
"]": [lotos_rule6,],
"a": [lotos_rule8,],
"b": [lotos_rule8,],
"c": [lotos_rule8,],
"d": [lotos_rule8,],
"e": [lotos_rule8,],
"f": [lotos_rule8,],
"g": [lotos_rule8,],
"h": [lotos_rule8,],
"i": [lotos_rule8,],
"j": [lotos_rule8,],
"k": [lotos_rule8,],
"l": [lotos_rule8,],
"m": [lotos_rule8,],
"n": [lotos_rule8,],
"o": [lotos_rule8,],
"p": [lotos_rule8,],
"q": [lotos_rule8,],
"r": [lotos_rule8,],
"s": [lotos_rule8,],
"t": [lotos_rule8,],
"u": [lotos_rule8,],
"v": [lotos_rule8,],
"w": [lotos_rule8,],
"x": [lotos_rule8,],
"y": [lotos_rule8,],
"z": [lotos_rule8,],
"|": [lotos_rule3,lotos_rule4,lotos_rule5,],
}
# x.rulesDictDict for lotos mode.
rulesDictDict = {
"lotos_main": rulesDict1,
}
# Import dict for lotos mode.
importDict = {}
| 6,611 | 2,735 |
def diagonalDifference(arr):
a = 0
b = 0
for i in range(0, len(arr)):
a += arr[i][i]
b += arr[i][len(arr) - i - 1]
return abs(a - b)
if __name__ == '__main__':
arr = [[11, 2, 4],[4, 5, 6],[10, 8, -12]]
print(diagonalDifference(arr))
| 275 | 128 |
from robot.libraries.BuiltIn import BuiltIn
class BaseNPSPPage:
@property
def npsp(self):
return self.builtin.get_library_instance('NPSP')
@property
def pageobjects(self):
return self.builtin.get_library_instance("cumulusci.robotframework.PageObjects") | 296 | 95 |
import pybullet as p
import time
import numpy as np
import pybullet_data
from balltze_description import Balltze, BalltzeKinematics
import math
if __name__ == '__main__':
time_step = 1./240.
physicsClient = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0,0,-9.81)
p.setTimeStep(time_step)
planeId = p.loadURDF('plane.urdf')
robot = Balltze('../../../balltze_description/balltze_description/urdf/balltze.urdf', p, position=[0,0,0.11])
kinematics = BalltzeKinematics(None)
i = 0.0
dir = 1
while True:
try:
ends = kinematics.body_inverse([0.0,0.0,i], [0.0,i/10,0.02], [[0.1, -0.1, -0.06],[0.1, 0.06, -0.02],[-0.1, -0.06, -0.06],[-0.1, 0.06, -0.06]])
joints = kinematics.inverse(ends)
robot.set_joint_arr(np.array(joints.T).reshape(1,12)[0])
# print((kinematics.forward_leg(joints)*1000).astype(np.int64)/1000)
# print(joints)
# print(ends)
except Exception as e:
print(e)
i += dir*0.0007
if i >= np.pi/10:
dir = -1
if i <= -np.pi/10:
dir = 1
# robot.set_joint_arr([0, -np.pi/2, np.pi/2]*4)
p.stepSimulation()
time.sleep(time_step)
cubePos, cubeOrn = p.getBasePositionAndOrientation(robot)
print(cubePos,cubeOrn)
p.disconnect()
| 1,395 | 579 |
#!/usr/bin/python3
# Copyright (c) 2014 Wladimir J. van der Laan, Visucore
# Distributed under the MIT software license, see
# http://www.opensource.org/licenses/mit-license.php.
'''
urbit UDP sniffer
Usage: urbit_sniffer.py [-p <port1>-<port2>,<port3>,...] [-i <interface>]
'''
import struct, sys, io, argparse, datetime
from struct import pack,unpack
from binascii import b2a_hex
from urbit.util import format_hexnum,from_le,to_le,dump_noun
from urbit.cue import cue
from urbit.pname import pname
from urbit.crua import de_crua
from misc.sniffer import Sniffer, PCapLoader
if sys.version_info[0:2] < (3,0):
print("Requires python3", file=sys.stderr)
exit(1)
class Args: # default args
# interface we're interested in
interface = b'eth0'
# ports we're interested in
ports = set(list(range(4000,4008)) + [13337, 41954])
# known keys for decrypting packets
keys = {}
# dump entire nouns
show_nouns = True
# show hex for decrypted packets
show_raw = False
# show timestamps
show_timestamps = False
# show keyhashes for decrypted packets
always_show_keyhashes = False
# constants...
CRYPTOS = {0:'%none', 1:'%open', 2:'%fast', 3:'%full'}
# utilities...
def ipv4str(addr):
'''Bytes to IPv4 address'''
return '.'.join(['%i' % i for i in addr])
def crypto_name(x):
'''Name for crypto algo'''
if x in CRYPTOS:
return CRYPTOS[x]
else:
return 'unk%02i' % x
def hexstr(x):
'''Bytes to hex string'''
return b2a_hex(x).decode()
def colorize(str, col):
return ('\x1b[38;5;%im' % col) + str + ('\x1b[0m')
# cli colors and glyphs
COLOR_TIMESTAMP = 38
COLOR_RECIPIENT = 51
COLOR_IP = 21
COLOR_HEADER = 27
COLOR_VALUE = 33
COLOR_DATA = 250
COLOR_DATA_ENC = 245
v_arrow = colorize('→', 240)
v_attention = colorize('>', 34) + colorize('>', 82) + colorize('>', 118)
v_colon = colorize(':', 240)
v_equal = colorize('=', 245)
def parse_args():
args = Args()
parser = argparse.ArgumentParser(description='Urbit sniffer. Dump incoming and outgoing urbit packets.')
pdefault = '4000-4007,13337,41954' # update this when Args changes...
idefault = args.interface.decode()
parser.add_argument('-p, --ports', dest='ports', help='Ports to listen on (default: '+pdefault+')')
parser.add_argument('-i, --interface', dest='interface', help='Interface to listen on (default:'+idefault+')', default=idefault)
parser.add_argument('-k, --keys', dest='keys', help='Import keys from file (with <keyhash> <key> per line)', default=None)
parser.add_argument('-n, --no-show-nouns', dest='show_nouns', action='store_false', help='Don\'t show full noun representation of decoded packets', default=True)
parser.add_argument('-r, --show-raw', dest='show_raw', action='store_true', help='Show raw hex representation of decoded packets', default=False)
parser.add_argument('-t, --show-timestamp', dest='show_timestamps', action='store_true', help='Show timestamps', default=False)
parser.add_argument('-l, --read', dest='read_dump', help='Read a pcap dump file (eg from tcpdump)', default=None)
parser.add_argument('--always-show-keyhashes', dest='always_show_keyhashes', help='Show keyhashes even for decrypted packets (more spammy)', default=False)
r = parser.parse_args()
if r.read_dump is not None:
args.packet_source = PCapLoader(r.read_dump)
else:
args.packet_source = Sniffer(r.interface.encode())
if r.ports is not None:
args.ports = set()
for t in r.ports.split(','):
(a,_,b) = t.partition('-')
ai = int(a)
bi = int(b) if b else ai
args.ports.update(list(range(int(ai), int(bi)+1)))
if r.keys is not None:
args.keys = {}
print(v_attention + ' Loading decryption keys from ' + r.keys)
with open(r.keys, 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
l = line.split()
# filter out '.' so that keys can be copied directly
args.keys[int(l[0].replace('.',''))] = int(l[1].replace('.',''))
args.show_nouns = r.show_nouns
args.show_raw = r.show_raw
args.show_timestamps = r.show_timestamps
args.always_show_keyhashes = r.always_show_keyhashes
return args
def dump_urbit_packet(args, timestamp, srcaddr, sport, dstaddr, dport, data):
try:
# Urbit header and payload
urhdr = unpack('<L', data[0:4])[0]
proto = urhdr & 7
mug = (urhdr >> 3) & 0xfffff
yax = (urhdr >> 23) & 3
yax_bytes = 1<<(yax+1)
qax = (urhdr >> 25) & 3
qax_bytes = 1<<(qax+1)
crypto = (urhdr >> 27)
sender = from_le(data[4:4+yax_bytes])
receiver = from_le(data[4+yax_bytes:4+yax_bytes+qax_bytes])
payload = data[4+yax_bytes+qax_bytes:]
if crypto == 2: # %fast
keyhash = from_le(payload[0:16])
payload = payload[16:]
else:
keyhash = None
except (IndexError, struct.error):
print('Warn: invpkt')
return
# Decode packet if crypto known
decrypted = False
if crypto in [0,1]: # %none %open
decrypted = True
if crypto == 2 and keyhash in args.keys: # %fast
payload = from_le(payload)
payload = de_crua(args.keys[keyhash], payload)
payload = to_le(payload)
decrypted = True
# Print packet
hdata = [('proto', str(proto)),
('mug', '%05x' % mug),
('crypto', crypto_name(crypto))]
if keyhash is not None and (args.always_show_keyhashes or not decrypted):
hdata += [('keyhash', format_hexnum(keyhash))]
if srcaddr is not None:
metadata = ''
if args.show_timestamps:
metadata += colorize(datetime.datetime.utcfromtimestamp(timestamp).strftime('%H%M%S.%f'), COLOR_TIMESTAMP) + ' '
metadata += (colorize(ipv4str(srcaddr), COLOR_IP) + v_colon + colorize(str(sport), COLOR_IP) + ' ' +
colorize(pname(sender), COLOR_RECIPIENT) + ' ' +
v_arrow + ' ' +
colorize(ipv4str(dstaddr), COLOR_IP) + v_colon + colorize(str(dport), COLOR_IP) + ' ' +
colorize(pname(receiver), COLOR_RECIPIENT))
else:
metadata = (' %fore ' + # nested packet
colorize(pname(sender), COLOR_RECIPIENT) + ' ' +
v_arrow + ' ' +
colorize(pname(receiver), COLOR_RECIPIENT))
print( metadata + v_colon + ' ' +
' '.join(colorize(key, COLOR_HEADER) + v_equal + colorize(value, COLOR_VALUE) for (key,value) in hdata))
if decrypted: # decrypted or unencrypted data
if args.show_raw:
print(' ' + colorize(hexstr(payload), COLOR_DATA))
cake = cue(from_le(payload))
if cake[0] == 1701998438: # %fore
subpacket = to_le(cake[1][1][1])
dump_urbit_packet(args, None, None, None, None, None, subpacket)
else:
if args.show_nouns:
sys.stdout.write(' ')
dump_noun(cake, sys.stdout)
sys.stdout.write('\n')
else: # [sealed]
print(' [' + colorize(hexstr(payload), COLOR_DATA_ENC)+']')
def main(args):
print(v_attention + ' Listening on ' + args.packet_source.name + ' ports ' + (',').join(str(x) for x in args.ports))
for timestamp,packet in args.packet_source:
try:
# IP header
iph = unpack('!BBHHHBBH4s4s', packet[0:20])
ihl = (iph[0] & 15)*4
if ihl < 20: # cannot handle IP headers <20 bytes
# print("Warn: invhdr")
continue
protocol = iph[6]
srcaddr = iph[8]
dstaddr = iph[9]
if protocol != 17: # not UDP
#print("Warn: invproto")
continue
# UDP header
(sport, dport, ulength, uchecksum) = unpack('!HHHH', packet[ihl:ihl+8])
data = packet[ihl+8:ihl+ulength]
if len(data) != (ulength-8):
print("Warn: invlength")
continue # invalid length packet
if dport not in args.ports and sport not in args.ports: # only urbit ports
continue
except (IndexError, struct.error):
print('Warn: invpkt')
continue
dump_urbit_packet(args, timestamp, srcaddr, sport, dstaddr, dport, data)
if __name__ == '__main__':
# Force UTF8 out
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf8', line_buffering=True)
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf8', line_buffering=True)
try:
main(parse_args())
except KeyboardInterrupt:
pass
| 8,850 | 3,087 |
"""
Python3 program to calculate pi to 100 places using Archimedes method
"""
from decimal import Decimal, getcontext
from time import time
def pi_archimedes(n):
"""
Calculate n iterations of Archimedes PI
"""
polygon_edge_length_squared = Decimal(2) # Decimal for precision
polygon_sides = 2
for i in range(n):
polygon_edge_length_squared = 2 - 2 * (1 - polygon_edge_length_squared / 4).sqrt()
polygon_sides *= 2
return polygon_sides * polygon_edge_length_squared.sqrt()
def main():
"""
Try the series
"""
# Pi to 1000 places for reference
Pi = 3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989
places = 100
old_result = None
start = time() # Timestamp for timing
for n in range(10*places):
# Do calculations with double precision
getcontext().prec = 2*places # Sets precision to 2*places. This is to reduce loss due to rounding
result = pi_archimedes(n)
# Print the result with single precision
getcontext().prec = places
result = +result # Do the rounding on result from 2*places to 1*places (ie the number we want)
error = result - Decimal(Pi) # Simple error calculation
#print("Result: ", result)
#print("Error: ", Decimal(error))
if result == old_result: # If the numbers we get are the same, break. This is as close as we'll get
break
old_result = result
print("Result: ", result)
print("Error: ", Decimal(error))
print("Time: ", time()-start)
if __name__ == "__main__":
main()
| 2,556 | 1,478 |
import unittest
import torch
import advi.transformations as trans
class Test_Transformations(unittest.TestCase):
def approx(self, a, b, eps=1E-8):
self.assertTrue(abs(a - b) < eps)
def test_bounded(self):
p = torch.tensor(.5)
self.assertTrue(trans.logit(p) == 0)
q = torch.tensor(10.0)
self.approx(trans.logit(q, 5, 16), -.1823216, 1e-5)
self.approx(trans.invlogit(trans.logit(q, 5, 16), 5, 16), q, 1e-5)
def test_simplex(self):
p_orig = [.1, .3, .6]
p = torch.tensor(p_orig)
x = trans.invsoftmax(p)
p_tti = torch.softmax(x, 0)
self.assertTrue(p_tti.sum() == 1)
for j in range(len(p_orig)):
self.approx(p_tti[j].item(), p_orig[j], 1e-6)
def test_lpdf_logx(self):
gam = torch.distributions.gamma.Gamma(2, 3)
x = torch.tensor(3.)
z = trans.lpdf_logx(torch.log(x), gam.log_prob)
print(z)
self.approx(z, -4.6055508, eps=1e-6)
def test_lpdf_logitx(self):
beta = torch.distributions.beta.Beta(2, 3)
x = torch.tensor(.6)
z = trans.lpdf_logitx(trans.logit(x), beta.log_prob,
a=torch.tensor(0.), b=torch.tensor(1.))
print(z)
self.approx(z, -1.285616793366446, eps=1e-6)
def test_lpdf_real_dirichlet(self):
# This tests if the dirichlet in the two-dimensional case
# (which is essentially a beta) works properly.
# TODO: Higher dimensional cases are harder to check, but should
# be done eventually.
alpha = torch.tensor([2., 3.])
dirichlet = torch.distributions.dirichlet.Dirichlet(alpha)
p = torch.tensor([.6, .4])
r = trans.invsoftmax(p)
z = trans.lpdf_real_dirichlet(r, dirichlet.log_prob)
print(z)
self.approx(z, -1.285616793366446, eps=1e-6)
if __name__ == '__main__':
unittest.main()
| 1,942 | 788 |
import unittest
import json
from app.app import create_app
POST_PRODUCT_URL = '/api/v1/products'
GET_A_SINGLE_PRODUCT = '/api/v1/product/1'
GET_ALL_PRODUCTS = '/api/v1/products'
class TestProduct(unittest.TestCase):
def setUp(self):
"""Initialize the api with test variable"""
self.app = create_app('testing')
self.client = self.app.test_client()
self.create_product = json.dumps(dict(
product_name="shoes",
stock=2,
price=3000
))
def test_add_product(self):
"""Test for post product"""
resource = self.client.post(
POST_PRODUCT_URL,
data=self.create_product,
content_type='application/json')
data = json.loads(resource.data.decode())
print(data)
self.assertEqual(resource.status_code, 201, msg='CREATED')
self.assertEqual(resource.content_type, 'application/json')
def test_get_products(self):
"""test we can get products"""
resource = self.client.get(POST_PRODUCT_URL,
data=json.dumps(self.create_product),
content_type='application/json')
get_data = json.dumps(resource.data.decode())
print(get_data)
self.assertEqual(resource.content_type, 'application/json')
self.assertEqual(resource.status_code, 200)
def test_get(self):
"""test we can get a single products"""
resource = self.client.get(GET_A_SINGLE_PRODUCT)
self.assertEqual(resource.status_code, 404)
if __name__ == '__main__':
unittest.main()
| 1,638 | 515 |
import boto3
from botocore.client import BaseClient
from ..key_store import KeyStore
from .object import CryptoObject
from .stream_body_wrapper import StreamBodyWrapper
class CryptoS3(object):
def __init__(
self,
client: BaseClient,
key_store: KeyStore,
) -> None:
self._client = client
self._key_store = key_store
def put_object(self, CSEKeyId: str, Bucket: str, Key: str, **kwargs):
obj = CryptoObject(
key_store=self._key_store,
object=boto3.resource("s3").Object(Bucket, Key),
)
return obj.put(CSEKeyId=CSEKeyId, **kwargs)
def get_object(self, **kwargs):
obj = self._client.get_object(**kwargs)
obj["Body"] = StreamBodyWrapper(
key_store=self._key_store,
stream_body=obj["Body"],
metadata=obj["Metadata"],
)
return obj
def __getattr__(self, name: str):
"""Catch any method/attribute lookups that are not defined in this class and try
to find them on the provided bridge object.
:param str name: Attribute name
:returns: Result of asking the provided client object for that attribute name
:raises AttributeError: if attribute is not found on provided bridge object
"""
return getattr(self._client, name)
| 1,346 | 379 |
"""
Unit test for EC2 subnet.
"""
import unittest
import mock
from treadmill.infra.subnet import Subnet
class SubnetTest(unittest.TestCase):
@mock.patch('treadmill.infra.connection.Connection')
def test_init(self, ConnectionMock):
conn_mock = ConnectionMock()
Subnet.ec2_conn = Subnet.route53_conn = conn_mock
subnet = Subnet(
id=1,
vpc_id='vpc-id',
metadata={
'Tags': [{
'Key': 'Name',
'Value': 'goo'
}]
}
)
self.assertEquals(subnet.vpc_id, 'vpc-id')
self.assertEquals(subnet.name, 'goo')
self.assertEquals(subnet.ec2_conn, conn_mock)
@mock.patch('treadmill.infra.connection.Connection')
def test_create_tags(self, ConnectionMock):
conn_mock = ConnectionMock()
conn_mock.create_tags = mock.Mock()
Subnet.ec2_conn = Subnet.route53_conn = conn_mock
subnet = Subnet(
name='foo',
id='1',
vpc_id='vpc-id'
)
subnet.create_tags()
conn_mock.create_tags.assert_called_once_with(
Resources=['1'],
Tags=[{
'Key': 'Name',
'Value': 'foo'
}]
)
@mock.patch('treadmill.infra.connection.Connection')
def test_create(self, ConnectionMock):
ConnectionMock.context.region_name = 'us-east-1'
conn_mock = ConnectionMock()
subnet_json_mock = {
'SubnetId': '1'
}
conn_mock.create_subnet = mock.Mock(return_value={
'Subnet': subnet_json_mock
})
conn_mock.create_route_table = mock.Mock(return_value={
'RouteTable': {'RouteTableId': 'route-table-id'}
})
Subnet.ec2_conn = Subnet.route53_conn = conn_mock
_subnet = Subnet.create(
cidr_block='172.23.0.0/24',
vpc_id='vpc-id',
name='foo',
gateway_id='gateway-id'
)
self.assertEqual(_subnet.id, '1')
self.assertEqual(_subnet.name, 'foo')
self.assertEqual(_subnet.metadata, subnet_json_mock)
conn_mock.create_subnet.assert_called_once_with(
VpcId='vpc-id',
CidrBlock='172.23.0.0/24',
AvailabilityZone='us-east-1a'
)
conn_mock.create_tags.assert_called_once_with(
Resources=['1'],
Tags=[{
'Key': 'Name',
'Value': 'foo'
}]
)
conn_mock.create_route_table.assert_called_once_with(
VpcId='vpc-id'
)
conn_mock.create_route.assert_called_once_with(
RouteTableId='route-table-id',
DestinationCidrBlock='0.0.0.0/0',
GatewayId='gateway-id'
)
conn_mock.associate_route_table.assert_called_once_with(
RouteTableId='route-table-id',
SubnetId='1',
)
@mock.patch('treadmill.infra.connection.Connection')
def test_refresh(self, ConnectionMock):
conn_mock = ConnectionMock()
subnet_json_mock = {
'VpcId': 'vpc-id',
'Foo': 'bar'
}
conn_mock.describe_subnets = mock.Mock(return_value={
'Subnets': [subnet_json_mock]
})
Subnet.ec2_conn = Subnet.route53_conn = conn_mock
_subnet = Subnet(id='subnet-id', vpc_id=None, metadata=None)
_subnet.refresh()
self.assertEqual(_subnet.vpc_id, 'vpc-id')
self.assertEqual(_subnet.metadata, subnet_json_mock)
@mock.patch.object(Subnet, 'refresh')
@mock.patch.object(Subnet, 'get_instances')
@mock.patch('treadmill.infra.connection.Connection')
def test_show(self, ConnectionMock, get_instances_mock, refresh_mock):
conn_mock = ConnectionMock()
Subnet.ec2_conn = Subnet.route53_conn = conn_mock
_subnet = Subnet(id='subnet-id',
vpc_id='vpc-id',
metadata=None)
_subnet.instances = None
result = _subnet.show()
self.assertEqual(
result,
{
'VpcId': 'vpc-id',
'SubnetId': 'subnet-id',
'Instances': None
}
)
get_instances_mock.assert_called_once_with(refresh=True, role=None)
refresh_mock.assert_called_once()
@mock.patch('treadmill.infra.connection.Connection')
def test_persisted(self, ConnectionMock):
_subnet = Subnet(id='subnet-id', metadata={'foo': 'goo'})
self.assertFalse(_subnet.persisted)
_subnet.metadata['SubnetId'] = 'subnet-id'
self.assertTrue(_subnet.persisted)
@mock.patch('treadmill.infra.connection.Connection')
def test_persist(self, ConnectionMock):
ConnectionMock.context.region_name = 'us-east-1'
conn_mock = ConnectionMock()
Subnet.ec2_conn = Subnet.route53_conn = conn_mock
conn_mock.create_subnet = mock.Mock(
return_value={
'Subnet': {
'foo': 'bar'
}
}
)
_subnet = Subnet(
id='subnet-id', metadata=None, vpc_id='vpc-id', name='subnet-name'
)
_subnet.persist(
cidr_block='cidr-block',
gateway_id='gateway-id',
)
self.assertEqual(_subnet.metadata, {'foo': 'bar'})
conn_mock.create_subnet.assert_called_once_with(
VpcId='vpc-id',
CidrBlock='cidr-block',
AvailabilityZone='us-east-1a'
)
| 5,627 | 1,831 |
import tempfile
from langumo.building import TrainTokenizer, TokenizeSentences
from langumo.utils import AuxiliaryFileManager
_dummy_corpus_content = (
'Wikipedia is a multilingual online encyclopedia created and maintained '
'as an open collaboration project by a community of volunteer editors '
'using a wiki-based editing system. It is the largest and most popular '
'general reference work on the World Wide Web. It is also one of the 15 '
'most popular websites ranked by Alexa, as of August 2020. It features '
'exclusively free content and no commercial ads. It is hosted by the '
'Wikimedia Foundation, a non-profit organization funded primarily through '
'donations.\n'
'Wikipedia was launched on January 15, 2001, and was created by Jimmy '
'Wales and Larry Sanger. Sanger coined its name as a portmanteau of the '
'terms "wiki" and "encyclopedia". Initially an English-language '
'encyclopedia, versions of Wikipedia in other languages were quickly '
'developed. With 6.1 million articles, the English Wikipedia is the '
'largest of the more than 300 Wikipedia encyclopedias. Overall, Wikipedia '
'comprises more than 54 million articles attracting 1.5 billion unique '
'visitors per month.\n'
'In 2005, Nature published a peer review comparing 42 hard science '
'articles from Encyclopædia Britannica and Wikipedia and found that '
'Wikipedia\'s level of accuracy approached that of Britannica, although '
'critics suggested that it might not have fared so well in a similar '
'study of a random sampling of all articles or one focused on social '
'science or contentious social issues. The following year, Time stated '
'that the open-door policy of allowing anyone to edit had made Wikipedia '
'the biggest and possibly the best encyclopedia in the world, and was a '
'testament to the vision of Jimmy Wales.\n'
'Wikipedia has been criticized for exhibiting systemic bias and for being '
'subject to manipulation and spin in controversial topics; Edwin Black '
'has criticized Wikipedia for presenting a mixture of "truth, half truth, '
'and some falsehoods". Wikipedia has also been criticized for gender '
'bias, particularly on its English-language version, where the dominant '
'majority of editors are male. However, edit-a-thons have been held to '
'encourage female editors and increase the coverage of women\'s topics. '
'Facebook announced that by 2017 it would help readers detect fake news '
'by suggesting links to related Wikipedia articles. YouTube announced a '
'similar plan in 2018.'
)
def test_subset_file_creation():
with tempfile.TemporaryDirectory() as tdir, \
AuxiliaryFileManager(f'{tdir}/workspace') as afm:
corpus = afm.create()
with corpus.open('w') as fp:
fp.write('hello world!\n' * 100)
with (TrainTokenizer(subset_size=1024)
._create_subset_file(afm, corpus)
.open('r')) as fp:
assert len(fp.readlines()) == 79
with (TrainTokenizer(subset_size=128)
._create_subset_file(afm, corpus)
.open('r')) as fp:
assert len(fp.readlines()) == 10
with (TrainTokenizer(subset_size=2000)
._create_subset_file(afm, corpus)
.open('r')) as fp:
assert len(fp.readlines()) == 100
def test_training_wordpiece_tokenizer():
with tempfile.TemporaryDirectory() as tdir, \
AuxiliaryFileManager(f'{tdir}/workspace') as afm:
corpus = afm.create()
with corpus.open('w') as fp:
fp.write(_dummy_corpus_content)
# Train WordPiece tokenizer and get vocabulary file.
vocab = (TrainTokenizer(vocab_size=128,
limit_alphabet=64,
unk_token='[UNK]')
.build(afm, corpus))
# Read subwords from the vocabulary file.
with vocab.open('r') as fp:
words = fp.readlines()
# Check if the number of total words equals to vocabulary size and the
# vocabulary contains unknown token.
assert len(words) == 128
assert words[0].strip() == '[UNK]'
def test_subword_tokenization():
with tempfile.TemporaryDirectory() as tdir, \
AuxiliaryFileManager(f'{tdir}/workspace') as afm:
corpus = afm.create()
with corpus.open('w') as fp:
fp.write(_dummy_corpus_content)
# Train WordPiece vocabulary and tokenize sentences.
vocab = (TrainTokenizer(vocab_size=128, limit_alphabet=64)
.build(afm, corpus))
tokenized = (TokenizeSentences(unk_token='[UNK]')
.build(afm, corpus, vocab))
# Test if the tokenization is correctly applied to the corpus. Note
# that the tokenizer model will normalize the sentences.
with tokenized.open('r') as fp:
assert (fp.read().strip().replace('##', '').replace(' ', '')
== _dummy_corpus_content.lower().replace(' ', ''))
| 5,138 | 1,513 |
# pylint: disable=W0221
# pylint: disable=W0613
# pylint: disable=E1102
# pylint: disable=W0223
import shutil
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import Dataset, DataLoader
from transformers import (
BertModel,
BertTokenizer,
AdamW,
get_linear_schedule_with_warmup,
)
import argparse
import os
from tqdm import tqdm
import requests
import torchtext.datasets as td
import mlflow.pytorch
class_names = ["World", "Sports", "Business", "Sci/Tech"]
class AGNewsDataset(Dataset):
"""
Constructs the encoding with the dataset
"""
def __init__(self, reviews, targets, tokenizer, max_len):
self.reviews = reviews
self.targets = targets
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.reviews)
def __getitem__(self, item):
review = str(self.reviews[item])
target = self.targets[item]
encoding = self.tokenizer.encode_plus(
review,
add_special_tokens=True,
max_length=self.max_len,
return_token_type_ids=False,
padding="max_length",
return_attention_mask=True,
return_tensors="pt",
truncation=True,
)
return {
"review_text": review,
"input_ids": encoding["input_ids"].flatten(),
"attention_mask": encoding["attention_mask"].flatten(),
"targets": torch.tensor(target, dtype=torch.long),
}
class NewsClassifier(nn.Module):
def __init__(self, args):
super(NewsClassifier, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.PRE_TRAINED_MODEL_NAME = "bert-base-uncased"
self.EPOCHS = args.max_epochs
self.df = None
self.tokenizer = None
self.df_train = None
self.df_val = None
self.df_test = None
self.train_data_loader = None
self.val_data_loader = None
self.test_data_loader = None
self.optimizer = None
self.total_steps = None
self.scheduler = None
self.loss_fn = None
self.BATCH_SIZE = 16
self.MAX_LEN = 160
self.NUM_SAMPLES_COUNT = args.num_samples
n_classes = len(class_names)
self.VOCAB_FILE_URL = args.vocab_file
self.VOCAB_FILE = "bert_base_uncased_vocab.txt"
self.drop = nn.Dropout(p=0.2)
self.bert = BertModel.from_pretrained(self.PRE_TRAINED_MODEL_NAME)
for param in self.bert.parameters():
param.requires_grad = False
self.fc1 = nn.Linear(self.bert.config.hidden_size, 512)
self.out = nn.Linear(512, n_classes)
def forward(self, input_ids, attention_mask):
"""
:param input_ids: Input sentences from the batch
:param attention_mask: Attention mask returned by the encoder
:return: output - label for the input text
"""
pooled_output = self.bert(input_ids=input_ids, attention_mask=attention_mask).pooler_output
output = F.relu(self.fc1(pooled_output))
output = self.drop(output)
output = self.out(output)
return output
@staticmethod
def process_label(rating):
rating = int(rating)
return rating - 1
def create_data_loader(self, df, tokenizer, max_len, batch_size):
"""
:param df: DataFrame input
:param tokenizer: Bert tokenizer
:param max_len: maximum length of the input sentence
:param batch_size: Input batch size
:return: output - Corresponding data loader for the given input
"""
ds = AGNewsDataset(
reviews=df.description.to_numpy(),
targets=df.label.to_numpy(),
tokenizer=tokenizer,
max_len=max_len,
)
return DataLoader(ds, batch_size=batch_size, num_workers=4)
def prepare_data(self):
"""
Creates train, valid and test dataloaders from the csv data
"""
td.AG_NEWS(root="data", split=("train", "test"))
extracted_files = os.listdir("data/AG_NEWS")
train_csv_path = None
for fname in extracted_files:
if fname.endswith("train.csv"):
train_csv_path = os.path.join(os.getcwd(), "data/AG_NEWS", fname)
self.df = pd.read_csv(train_csv_path)
self.df.columns = ["label", "title", "description"]
self.df.sample(frac=1)
self.df = self.df.iloc[: self.NUM_SAMPLES_COUNT]
self.df["label"] = self.df.label.apply(self.process_label)
if not os.path.isfile(self.VOCAB_FILE):
filePointer = requests.get(self.VOCAB_FILE_URL, allow_redirects=True)
if filePointer.ok:
with open(self.VOCAB_FILE, "wb") as f:
f.write(filePointer.content)
else:
raise RuntimeError("Error in fetching the vocab file")
self.tokenizer = BertTokenizer(self.VOCAB_FILE)
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
self.df_train, self.df_test = train_test_split(
self.df, test_size=0.1, random_state=RANDOM_SEED, stratify=self.df["label"]
)
self.df_val, self.df_test = train_test_split(
self.df_test, test_size=0.5, random_state=RANDOM_SEED, stratify=self.df_test["label"]
)
self.train_data_loader = self.create_data_loader(
self.df_train, self.tokenizer, self.MAX_LEN, self.BATCH_SIZE
)
self.val_data_loader = self.create_data_loader(
self.df_val, self.tokenizer, self.MAX_LEN, self.BATCH_SIZE
)
self.test_data_loader = self.create_data_loader(
self.df_test, self.tokenizer, self.MAX_LEN, self.BATCH_SIZE
)
def setOptimizer(self):
"""
Sets the optimizer and scheduler functions
"""
self.optimizer = AdamW(model.parameters(), lr=1e-3, correct_bias=False)
self.total_steps = len(self.train_data_loader) * self.EPOCHS
self.scheduler = get_linear_schedule_with_warmup(
self.optimizer, num_warmup_steps=0, num_training_steps=self.total_steps
)
self.loss_fn = nn.CrossEntropyLoss().to(self.device)
def startTraining(self, model):
"""
Initialzes the Traning step with the model initialized
:param model: Instance of the NewsClassifier class
"""
history = defaultdict(list)
best_accuracy = 0
for epoch in range(self.EPOCHS):
print(f"Epoch {epoch + 1}/{self.EPOCHS}")
train_acc, train_loss = self.train_epoch(model)
print(f"Train loss {train_loss} accuracy {train_acc}")
val_acc, val_loss = self.eval_model(model, self.val_data_loader)
print(f"Val loss {val_loss} accuracy {val_acc}")
history["train_acc"].append(train_acc)
history["train_loss"].append(train_loss)
history["val_acc"].append(val_acc)
history["val_loss"].append(val_loss)
if val_acc > best_accuracy:
torch.save(model.state_dict(), "best_model_state.bin")
best_accuracy = val_acc
def train_epoch(self, model):
"""
Training process happens and accuracy is returned as output
:param model: Instance of the NewsClassifier class
:result: output - Accuracy of the model after training
"""
model = model.train()
losses = []
correct_predictions = 0
for data in tqdm(self.train_data_loader):
input_ids = data["input_ids"].to(self.device)
attention_mask = data["attention_mask"].to(self.device)
targets = data["targets"].to(self.device)
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
_, preds = torch.max(outputs, dim=1)
loss = self.loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
return (
correct_predictions.double() / len(self.train_data_loader) / self.BATCH_SIZE,
np.mean(losses),
)
def eval_model(self, model, data_loader):
"""
Validation process happens and validation / test accuracy is returned as output
:param model: Instance of the NewsClassifier class
:param data_loader: Data loader for either test / validation dataset
:result: output - Accuracy of the model after testing
"""
model = model.eval()
losses = []
correct_predictions = 0
with torch.no_grad():
for d in data_loader:
input_ids = d["input_ids"].to(self.device)
attention_mask = d["attention_mask"].to(self.device)
targets = d["targets"].to(self.device)
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
_, preds = torch.max(outputs, dim=1)
loss = self.loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
return correct_predictions.double() / len(data_loader) / self.BATCH_SIZE, np.mean(losses)
def get_predictions(self, model, data_loader):
"""
Prediction after the training step is over
:param model: Instance of the NewsClassifier class
:param data_loader: Data loader for either test / validation dataset
:result: output - Returns prediction results,
prediction probablities and corresponding values
"""
model = model.eval()
review_texts = []
predictions = []
prediction_probs = []
real_values = []
with torch.no_grad():
for d in data_loader:
texts = d["review_text"]
input_ids = d["input_ids"].to(self.device)
attention_mask = d["attention_mask"].to(self.device)
targets = d["targets"].to(self.device)
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
_, preds = torch.max(outputs, dim=1)
probs = F.softmax(outputs, dim=1)
review_texts.extend(texts)
predictions.extend(preds)
prediction_probs.extend(probs)
real_values.extend(targets)
predictions = torch.stack(predictions).cpu()
prediction_probs = torch.stack(prediction_probs).cpu()
real_values = torch.stack(real_values).cpu()
return review_texts, predictions, prediction_probs, real_values
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyTorch BERT Example")
parser.add_argument(
"--max_epochs",
type=int,
default=5,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--num_samples",
type=int,
default=15000,
metavar="N",
help="Number of samples to be used for training "
"and evaluation steps (default: 15000) Maximum:100000",
)
parser.add_argument(
"--vocab_file",
default="https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
help="Custom vocab file",
)
parser.add_argument(
"--model_save_path", type=str, default="models", help="Path to save mlflow model"
)
args = parser.parse_args()
mlflow.start_run()
model = NewsClassifier(args)
model = model.to(model.device)
model.prepare_data()
model.setOptimizer()
model.startTraining(model)
print("TRAINING COMPLETED!!!")
test_acc, _ = model.eval_model(model, model.test_data_loader)
print(test_acc.item())
y_review_texts, y_pred, y_pred_probs, y_test = model.get_predictions(
model, model.test_data_loader
)
print("\n\n\n SAVING MODEL")
if os.path.exists(args.model_save_path):
shutil.rmtree(args.model_save_path)
mlflow.pytorch.save_model(
model,
path=args.model_save_path,
requirements_file="requirements.txt",
extra_files=["class_mapping.json", "bert_base_uncased_vocab.txt"],
)
mlflow.end_run()
| 12,819 | 4,024 |
__version__ = "0.2.2b"
| 23 | 14 |
def fibonacci(desired_index: int):
if desired_index < 0:
raise ValueError(f'{desired_index} is less than 0')
values = [0, 1]
for i in range(2, desired_index + 1):
values.append(values[i - 1] + values[i - 2])
return values[desired_index]
| 272 | 104 |
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
from toil.lib.misc import printq
from toil.lib.retry import retry
from toil.lib import aws
try:
from boto.exception import BotoServerError
except ImportError:
BotoServerError = None # AWS/boto extra is not installed
logger = logging.getLogger(__name__)
@retry(errors=[BotoServerError])
def delete_iam_role(role_name: str, region: Optional[str] = None, quiet: bool = True):
from boto.iam.connection import IAMConnection
iam_client = aws.client('iam', region_name=region)
iam_resource = aws.resource('iam', region_name=region)
boto_iam_connection = IAMConnection()
role = iam_resource.Role(role_name)
# normal policies
for attached_policy in role.attached_policies.all():
printq(f'Now dissociating policy: {attached_policy.name} from role {role.name}', quiet)
role.detach_policy(PolicyName=attached_policy.name)
# inline policies
for attached_policy in role.policies.all():
printq(f'Deleting inline policy: {attached_policy.name} from role {role.name}', quiet)
# couldn't find an easy way to remove inline policies with boto3; use boto
boto_iam_connection.delete_role_policy(role.name, attached_policy.name)
iam_client.delete_role(RoleName=role_name)
printq(f'Role {role_name} successfully deleted.', quiet)
@retry(errors=[BotoServerError])
def delete_iam_instance_profile(instance_profile_name: str, region: Optional[str] = None, quiet: bool = True):
iam_resource = aws.resource('iam', region_name=region)
instance_profile = iam_resource.InstanceProfile(instance_profile_name)
for role in instance_profile.roles:
printq(f'Now dissociating role: {role.name} from instance profile {instance_profile_name}', quiet)
instance_profile.remove_role(RoleName=role.name)
instance_profile.delete()
printq(f'Instance profile "{instance_profile_name}" successfully deleted.', quiet)
@retry(errors=[BotoServerError])
def delete_sdb_domain(sdb_domain_name: str, region: Optional[str] = None, quiet: bool = True):
sdb_client = aws.client('sdb', region_name=region)
sdb_client.delete_domain(DomainName=sdb_domain_name)
printq(f'SBD Domain: "{sdb_domain_name}" successfully deleted.', quiet)
@retry(errors=[BotoServerError])
def delete_s3_bucket(bucket: str, region: Optional[str], quiet: bool = True):
printq(f'Deleting s3 bucket in region "{region}": {bucket}', quiet)
s3_client = aws.client('s3', region_name=region)
s3_resource = aws.resource('s3', region_name=region)
paginator = s3_client.get_paginator('list_object_versions')
for response in paginator.paginate(Bucket=bucket):
versions = response.get('Versions', []) + response.get('DeleteMarkers', [])
for version in versions:
printq(f" Deleting {version['Key']} version {version['VersionId']}", quiet)
s3_client.delete_object(Bucket=bucket, Key=version['Key'], VersionId=version['VersionId'])
s3_resource.Bucket(bucket).delete()
printq(f'\n * Deleted s3 bucket successfully: {bucket}\n\n', quiet)
| 3,716 | 1,167 |
import os
import numpy as np
import pandas as pd
def read_data():
raw_data_path = os.path.join(os.path.pardir, 'data', 'raw')
train_file_path = os.path.join(raw_data_path, 'train.csv')
test_file_path = os.path.join(raw_data_path, 'test.csv')
train_df = pd.read_csv(train_file_path, index_col = 'PassengerId')
test_df = pd.read_csv(test_file_path, index_col = 'PassengerId')
test_df['Survived'] = -100
df = pd.concat([train_df, test_df], sort=-False, axis=0)
return df
def process_data(df):
return(df
.assign(Title = lambda x: x.Name.map(get_title))
.pipe(fill_missing_values)
.assign(Fare_Bin = lambda x: pd.qcut(x.Fare, 4, labels=['very_low', 'low', 'high', 'very_high']))
.assign(AgeState = lambda x: np.where(x.Age >= 18, 'Adult', 'Child'))
.assign(FamilySize = lambda x: x.Parch + x.SibSp + 1)
.assign(IsMother = lambda x: np.where(((x.Age > 18) & (x.Parch > 0) & (x.Title != 'Miss') & (x.Sex == 'female')), 1,0))
.assign(Cabin = lambda x: np.where(x.Cabin == 'T', np.nan, x.Cabin))
.assign(Deck = lambda x: x.Cabin.map(get_deck))
.assign(IsMale = lambda x: np.where(x.Sex == 'male', 1, 0))
.pipe(pd.get_dummies, columns=['Deck', 'Pclass', 'Title', 'Fare_Bin', 'Embarked', 'AgeState'])
.drop(['Cabin', 'Name', 'Ticket', 'Parch', 'SibSp', 'Sex'], axis=1)
.pipe(reorder_columns)
)
# modify the function to reduce number of titles and return more meaningful functions
def get_title(name):
title_map = {
'mr': 'Mr',
'mrs': 'Mrs',
'mme': 'Mrs',
'ms': 'Mrs',
'miss': 'Miss',
'mlle': 'Miss',
'master': 'Master',
'don': 'Sir',
'rev': 'Sir',
'sir': 'Sir',
'jonkheer': 'Sir',
'dr': 'Officer',
'major': 'Officer',
'capt': 'Office',
'col': 'Officer',
'lady': 'Lady',
'the countess': 'Lady',
'dona': 'Lady'
}
first_name_with_title = name.split(',')[1]
raw_title = first_name_with_title.split('.')[0]
title = raw_title.strip().lower()
return title_map[title]
def get_deck(cabin):
return np.where(pd.notnull(cabin), str(cabin)[0].upper(), 'Z')
def fill_missing_values(df):
#Embarked
df.Embarked.fillna('C', inplace=True)
# Fare
median_fare = df[(df.Pclass == 3) & (df.Embarked == 'S')]['Fare'].median()
df.Fare.fillna(median_fare, inplace=True)
#Age
title_age_median = df.groupby('Title').Age.transform('median')
df.Age.fillna(title_age_median, inplace=True)
return df
def reorder_columns(df):
columns = [column for column in df.columns if column != 'Survived']
columns = ['Survived'] + columns
df = df[columns]
return df
def write_data(df):
processed_data_path = os.path.join(os.path.pardir, 'data', 'processed')
write_train_path = os.path.join(processed_data_path, 'train.csv')
write_test_path = os.path.join(processed_data_path, 'test.csv')
df.loc[df.Survived != -100].to_csv(write_train_path)
columns = [column for column in df.columns if column != 'Survived']
df.loc[df.Survived == -100][columns].to_csv(write_test_path)
if __name__ == '__main__':
df = read_data()
df = process_data(df)
write_data(df)
| 3,389 | 1,309 |
# define a class for networks
class Network(object):
'''
Networks have two states: the data state where they are stored as: matrix and
nodes and a viz state where they are stored as: viz.links, viz.row_nodes, viz.
col_nodes.
The goal is to start in a data-state and produce a viz-state of the network
that will be used as input to clustergram.js.
'''
def __init__(self):
# network: data-state
self.dat = {}
self.dat['nodes'] = {}
self.dat['nodes']['row'] = []
self.dat['nodes']['col'] = []
# node_info holds the orderings (ini, clust, rank), classification ('cl'),
# and other general information
self.dat['node_info'] = {}
for inst_rc in self.dat['nodes']:
self.dat['node_info'][inst_rc] = {}
self.dat['node_info'][inst_rc]['ini'] = []
self.dat['node_info'][inst_rc]['clust'] = []
self.dat['node_info'][inst_rc]['rank'] = []
self.dat['node_info'][inst_rc]['info'] = []
# classification is specifically used to color the class triangles
self.dat['node_info'][inst_rc]['cl'] = []
self.dat['node_info'][inst_rc]['value'] = []
# initialize matrix
self.dat['mat'] = []
# mat_info is an optional dictionary
# so I'm not including it by default
# network: viz-state
self.viz = {}
self.viz['row_nodes'] = []
self.viz['col_nodes'] = []
self.viz['links'] = []
def load_tsv_to_net(self, filename):
f = open(filename,'r')
lines = f.readlines()
f.close()
self.load_lines_from_tsv_to_net(lines)
def pandas_load_tsv_to_net(self, file_buffer):
'''
A user can add category information to the columns
'''
import pandas as pd
# get lines and check for category and value info
lines = file_buffer.getvalue().split('\n')
# check for category info in headers
cat_line = lines[1].split('\t')
add_cat = False
if cat_line[0] == '':
add_cat = True
tmp_df = {}
if add_cat:
# read in names and categories
tmp_df['mat'] = pd.read_table(file_buffer, index_col=0, header=[0,1])
else:
# read in names only
tmp_df['mat'] = pd.read_table(file_buffer, index_col=0, header=0)
# save to self
self.df_to_dat(tmp_df)
# add categories if necessary
if add_cat:
cat_line = [i.strip() for i in cat_line]
self.dat['node_info']['col']['cl'] = cat_line[1:]
# make a dict of columns in categories
##########################################
col_in_cat = {}
for i in range(len(self.dat['node_info']['col']['cl'])):
inst_cat = self.dat['node_info']['col']['cl'][i]
inst_col = self.dat['nodes']['col'][i]
if inst_cat not in col_in_cat:
col_in_cat[inst_cat] = []
# collect col names for categories
col_in_cat[inst_cat].append(inst_col)
# save to node_info
self.dat['node_info']['col_in_cat'] = col_in_cat
def load_lines_from_tsv_to_net(self, lines):
import numpy as np
# get row/col labels and data from lines
for i in range(len(lines)):
# get inst_line
inst_line = lines[i].rstrip().split('\t')
# strip each element
inst_line = [z.strip() for z in inst_line]
# get column labels from first row
if i == 0:
tmp_col_labels = inst_line
# add the labels
for inst_elem in range(len(tmp_col_labels)):
# skip the first element
if inst_elem > 0:
# get the column label
inst_col_label = tmp_col_labels[inst_elem]
# add to network data
self.dat['nodes']['col'].append(inst_col_label)
# get row info
if i > 0:
# save row labels
self.dat['nodes']['row'].append(inst_line[0])
# get data - still strings
inst_data_row = inst_line[1:]
# convert to float
inst_data_row = [float(tmp_dat) for tmp_dat in inst_data_row]
# save the row data as an array
inst_data_row = np.asarray(inst_data_row)
# initailize matrix
if i == 1:
self.dat['mat'] = inst_data_row
# add rows to matrix
if i > 1:
self.dat['mat'] = np.vstack( ( self.dat['mat'], inst_data_row ) )
def load_l1000cds2(self, l1000cds2):
import scipy
import numpy as np
# process gene set result
if 'upGenes' in l1000cds2['input']['data']:
# add the names from all the results
all_results = l1000cds2['result']
# grab col nodes - input sig and drugs
self.dat['nodes']['col'] = []
for i in range(len(all_results)):
inst_result = all_results[i]
self.dat['nodes']['col'].append(inst_result['name']+'#'+str(i))
self.dat['node_info']['col']['value'].append(inst_result['score'])
for type_overlap in inst_result['overlap']:
self.dat['nodes']['row'].extend( inst_result['overlap'][type_overlap] )
self.dat['nodes']['row'] = sorted(list(set(self.dat['nodes']['row'])))
# initialize the matrix
self.dat['mat'] = scipy.zeros([ len(self.dat['nodes']['row']), len(self.dat['nodes']['col']) ])
# fill in the matrix with l10000 data
########################################
# fill in gene sigature as first column
for i in range(len(self.dat['nodes']['row'])):
inst_gene = self.dat['nodes']['row'][i]
# get gene index
inst_gene_index = self.dat['nodes']['row'].index(inst_gene)
# if gene is in up add 1 otherwise add -1
if inst_gene in l1000cds2['input']['data']['upGenes']:
self.dat['node_info']['row']['value'].append(1)
else:
self.dat['node_info']['row']['value'].append(-1)
# save the name as a class
for i in range(len(self.dat['nodes']['col'])):
self.dat['node_info']['col']['cl'].append(self.dat['nodes']['col'][i])
# swap keys for aggravate and reverse
if l1000cds2['input']['aggravate'] == False:
# reverse gene set
up_type = 'up/dn'
dn_type = 'dn/up'
else:
# mimic gene set
up_type = 'up/up'
dn_type = 'dn/dn'
# loop through drug results
for inst_result_index in range(len(all_results)):
inst_result = all_results[inst_result_index]
# for non-mimic if up/dn then it should be negative since the drug is dn
# for mimic if up/up then it should be positive since the drug is up
for inst_dn in inst_result['overlap'][up_type]:
# get gene index
inst_gene_index = self.dat['nodes']['row'].index(inst_dn)
# save -1 to gene row and drug column
if up_type == 'up/dn':
self.dat['mat'][ inst_gene_index, inst_result_index ] = -1
else:
self.dat['mat'][ inst_gene_index, inst_result_index ] = 1
# for non-mimic if dn/up then it should be positive since the drug is up
# for mimic if dn/dn then it should be negative since the drug is dn
for inst_up in inst_result['overlap'][dn_type]:
# get gene index
inst_gene_index = self.dat['nodes']['row'].index(inst_up)
# save 1 to gene row and drug column
if dn_type == 'dn/up':
self.dat['mat'][ inst_gene_index, inst_result_index ] = 1
else:
self.dat['mat'][ inst_gene_index, inst_result_index ] = -1
# process a characteristic direction vector result
else:
all_results = l1000cds2['result']
# get gene names
self.dat['nodes']['row'] = l1000cds2['input']['data']['up']['genes'] + l1000cds2['input']['data']['dn']['genes']
# save gene expression values
tmp_exp_vect = l1000cds2['input']['data']['up']['vals'] + l1000cds2['input']['data']['dn']['vals']
for i in range(len(self.dat['nodes']['row'])):
self.dat['node_info']['row']['value'].append(tmp_exp_vect[i])
# gather result names
for i in range(len(all_results)):
inst_result = all_results[i]
# add result to list
self.dat['nodes']['col'].append(inst_result['name']+'#'+str(i))
self.dat['node_info']['col']['cl'].append(inst_result['name'])
# reverse signature, score [1,2]
if l1000cds2['input']['aggravate'] == False:
self.dat['node_info']['col']['value'].append( inst_result['score']-1 )
else:
self.dat['node_info']['col']['value'].append( 1 - inst_result['score'] )
# concat up and down lists
inst_vect = inst_result['overlap']['up'] + inst_result['overlap']['dn']
inst_vect = np.transpose(np.asarray(inst_vect))
inst_vect = inst_vect.reshape(-1,1)
# initialize or add to matrix
if type(self.dat['mat']) is list:
self.dat['mat'] = inst_vect
else:
self.dat['mat'] = np.hstack(( self.dat['mat'], inst_vect))
def load_vect_post_to_net(self, vect_post):
import numpy as np
# get all signatures (a.k.a. columns)
sigs = vect_post['columns']
# get all rows from signatures
all_rows = []
all_sigs = []
for inst_sig in sigs:
# gather sig names
all_sigs.append(inst_sig['col_name'])
# get column
col_data = inst_sig['data']
# gather row names
for inst_row_data in col_data:
# get gene name
all_rows.append( inst_row_data['row_name'] )
# get unique sorted list of genes
all_rows = sorted(list(set(all_rows)))
all_sigs = sorted(list(set(all_sigs)))
print( 'found ' + str(len(all_rows)) + ' rows' )
print( 'found ' + str(len(all_sigs)) + ' columns\n' )
# save genes and sigs to nodes
self.dat['nodes']['row'] = all_rows
self.dat['nodes']['col'] = all_sigs
# initialize numpy matrix of nans
self.dat['mat'] = np.empty((len(all_rows),len(all_sigs)))
self.dat['mat'][:] = np.nan
is_up_down = False
if 'is_up_down' in vect_post:
if vect_post['is_up_down'] == True:
is_up_down = True
if is_up_down == True:
self.dat['mat_up'] = np.empty((len(all_rows),len(all_sigs)))
self.dat['mat_up'][:] = np.nan
self.dat['mat_dn'] = np.empty((len(all_rows),len(all_sigs)))
self.dat['mat_dn'][:] = np.nan
# loop through all signatures and rows
# and place information into self.dat
for inst_sig in sigs:
# get sig name
inst_sig_name = inst_sig['col_name']
# get row data
col_data = inst_sig['data']
# loop through column
for inst_row_data in col_data:
# add row data to signature matrix
inst_row = inst_row_data['row_name']
inst_value = inst_row_data['val']
# find index of row and sig in matrix
row_index = all_rows.index(inst_row)
col_index = all_sigs.index(inst_sig_name)
# save inst_value to matrix
self.dat['mat'][row_index, col_index] = inst_value
if is_up_down == True:
self.dat['mat_up'][row_index, col_index] = inst_row_data['val_up']
self.dat['mat_dn'][row_index, col_index] = inst_row_data['val_dn']
def load_data_file_to_net(self, filename):
# load json from file to new dictionary
inst_dat = self.load_json_to_dict(filename)
# convert dat['mat'] to numpy array and add to network
self.load_data_to_net(inst_dat)
def load_data_to_net(self, inst_net):
''' load data into nodes and mat, also convert mat to numpy array'''
self.dat['nodes'] = inst_net['nodes']
self.dat['mat'] = inst_net['mat']
# convert to numpy array
self.mat_to_numpy_arr()
def export_net_json(self, net_type, indent='no-indent'):
''' export json string of dat '''
import json
from copy import deepcopy
if net_type == 'dat':
exp_dict = deepcopy(self.dat)
# convert numpy array to list
if type(exp_dict['mat']) is not list:
exp_dict['mat'] = exp_dict['mat'].tolist()
elif net_type == 'viz':
exp_dict = self.viz
# make json
if indent == 'indent':
exp_json = json.dumps(exp_dict, indent=2)
else:
exp_json = json.dumps(exp_dict)
return exp_json
def write_json_to_file(self, net_type, filename, indent='no-indent'):
import json
# get dat or viz representation as json string
if net_type == 'dat':
exp_json = self.export_net_json('dat', indent)
elif net_type == 'viz':
exp_json = self.export_net_json('viz', indent)
# save to file
fw = open(filename, 'w')
fw.write( exp_json )
fw.close()
def set_node_names(self, row_name, col_name):
'''give names to the rows and columns'''
self.dat['node_names'] = {}
self.dat['node_names']['row'] = row_name
self.dat['node_names']['col'] = col_name
def mat_to_numpy_arr(self):
''' convert list to numpy array - numpy arrays can not be saved as json '''
import numpy as np
self.dat['mat'] = np.asarray( self.dat['mat'] )
def swap_nan_for_zero(self):
import numpy as np
self.dat['mat'][ np.isnan( self.dat['mat'] ) ] = 0
def filter_row_thresh( self, row_filt_int, filter_type='value' ):
'''
Remove rows from matrix that do not meet some threshold
value: The default filtering is value, in that at least one value in the row
has to be higher than some threshold.
num: Rows can be filtered by the number of non-zero values it has.
sum: Rows can be filtered by the sum of the values
'''
import scipy
import numpy as np
# max vlue in matrix
mat = self.dat['mat']
max_mat = abs(max(mat.min(), mat.max(), key=abs))
# maximum number of measurements
max_num = len(self.dat['nodes']['col'])
mat_abs = abs(mat)
sum_row = np.sum(mat_abs, axis=1)
max_sum = max(sum_row)
# transfer the nodes
nodes = {}
nodes['row'] = []
nodes['col'] = self.dat['nodes']['col']
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = []
node_info['col'] = self.dat['node_info']['col']['info']
# filter rows
#################################
for i in range(len(self.dat['nodes']['row'])):
# get row name
inst_nodes_row = self.dat['nodes']['row'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['row']['info']) > 0:
inst_node_info = self.dat['node_info']['row']['info'][i]
# get absolute value of row data
row_vect = np.absolute(self.dat['mat'][i,:])
# value: is there at least one value over cutoff
##################################################
if filter_type == 'value':
# calc cutoff
cutoff = row_filt_int * max_mat
# count the number of values above some thresh
found_tuple = np.where(row_vect >= cutoff)
if len(found_tuple[0])>=1:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
elif filter_type == 'num':
num_nonzero = np.count_nonzero(row_vect)
# use integer number of non-zero measurements
cutoff = row_filt_int * 10
if num_nonzero>= cutoff:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
elif filter_type == 'sum':
inst_row_sum = sum(abs(row_vect))
if inst_row_sum > row_filt_int*max_sum:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))]
# save nodes array - list of node names
self.dat['nodes'] = nodes
# save node_info array - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def filter_col_thresh( self, cutoff, min_num_meet ):
'''
remove rows and columns from matrix that do not have at least
min_num_meet instances of a value with an absolute value above cutoff
'''
import scipy
import numpy as np
# transfer the nodes
nodes = {}
nodes['row'] = self.dat['nodes']['row']
nodes['col'] = []
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = self.dat['node_info']['row']['info']
node_info['col'] = []
# add cols with non-zero values
#################################
for i in range(len(self.dat['nodes']['col'])):
# get col name
inst_nodes_col = self.dat['nodes']['col'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['col']['info']) > 0:
inst_node_info = self.dat['node_info']['col']['info'][i]
# get col vect
col_vect = np.absolute(self.dat['mat'][:,i])
# check if there are nonzero values
found_tuple = np.where(col_vect >= cutoff)
if len(found_tuple[0])>=min_num_meet:
# add name
nodes['col'].append(inst_nodes_col)
# add info if necessary
if len(self.dat['node_info']['col']['info']) > 0:
node_info['col'].append(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))]
# save nodes array - list of node names
self.dat['nodes'] = nodes
# save node_info array - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def filter_network_thresh( self, cutoff, min_num_meet ):
'''
remove rows and columns from matrix that do not have at least
min_num_meet instances of a value with an absolute value above cutoff
'''
import scipy
import numpy as np
# transfer the nodes
nodes = {}
nodes['row'] = []
nodes['col'] = []
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = []
node_info['col'] = []
# add rows with non-zero values
#################################
for i in range(len(self.dat['nodes']['row'])):
# get row name
inst_nodes_row = self.dat['nodes']['row'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['row']['info']) > 0:
inst_node_info = self.dat['node_info']['row']['info'][i]
# get row vect
row_vect = np.absolute(self.dat['mat'][i,:])
# check if there are nonzero values
found_tuple = np.where(row_vect >= cutoff)
if len(found_tuple[0])>=min_num_meet:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
# add cols with non-zero values
#################################
for i in range(len(self.dat['nodes']['col'])):
# get col name
inst_nodes_col = self.dat['nodes']['col'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['col']['info']) > 0:
inst_node_info = self.dat['node_info']['col']['info'][i]
# get col vect
col_vect = np.absolute(self.dat['mat'][:,i])
# check if there are nonzero values
found_tuple = np.where(col_vect >= cutoff)
if len(found_tuple[0])>=min_num_meet:
# add name
nodes['col'].append(inst_nodes_col)
# add info if necessary
if len(self.dat['node_info']['col']['info']) > 0:
node_info['col'].append(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))]
# save nodes array - list of node names
self.dat['nodes'] = nodes
# save node_info array - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def keep_max_num_links(self, keep_num_links):
print('\trun keep_max_num_links')
max_mat_value = abs(self.dat['mat']).max()
# check the total number of links
inst_thresh = 0
inst_pct_max = 0
inst_num_links = (abs(self.dat['mat'])>inst_thresh).sum()
print('initially there are '+str(inst_num_links)+' links ')
print('there are initially '+str(inst_num_links)+'\n')
thresh_fraction = 100
while (inst_num_links > keep_num_links):
# increase the threshold as a pct of max value in mat
inst_pct_max = inst_pct_max + 1
# increase threshold
inst_thresh = max_mat_value*(float(inst_pct_max)/thresh_fraction)
# check the number of links above the curr threshold
inst_num_links = (abs(self.dat['mat'])>inst_thresh).sum()
print('there are '+str(inst_num_links)+ ' links at threshold '+str(inst_pct_max)+'pct and value of ' +str(inst_thresh)+'\n')
# if there are no links then increas thresh back up
if inst_num_links == 0:
inst_pct_max = inst_pct_max - 1
inst_thresh = max_mat_value*(float(inst_pct_max)/thresh_fraction)
print('final number of links '+str(inst_num_links))
# replace values that are less than thresh with zero
self.dat['mat'][ abs(self.dat['mat']) < inst_thresh] = 0
# return number of links
return (abs(self.dat['mat'])>inst_thresh).sum()
def cluster_row_and_col(self, dist_type='cosine', linkage_type='average', dendro=True, \
run_clustering=True, run_rank=True):
'''
cluster net.dat and make visualization json, net.viz.
optionally leave out dendrogram colorbar groups with dendro argument
'''
import scipy
import numpy as np
from scipy.spatial.distance import pdist
from copy import deepcopy
# do not make dendrogram is you are not running clusttering
if run_clustering == False:
dendro = False
# make distance matrices
##########################
# get number of rows and columns from self.dat
num_row = len(self.dat['nodes']['row'])
num_col = len(self.dat['nodes']['col'])
# initialize distance matrices
row_dm = scipy.zeros([num_row,num_row])
col_dm = scipy.zeros([num_col,num_col])
# make copy of matrix
tmp_mat = deepcopy(self.dat['mat'])
# calculate distance matrix
row_dm = pdist( tmp_mat, metric=dist_type )
col_dm = pdist( tmp_mat.transpose(), metric=dist_type )
# prevent negative values
row_dm[row_dm < 0] = float(0)
col_dm[col_dm < 0] = float(0)
# initialize clust order
clust_order = self.ini_clust_order()
# initial ordering
###################
clust_order['row']['ini'] = range(num_row, -1, -1)
clust_order['col']['ini'] = range(num_col, -1, -1)
# cluster
if run_clustering == True:
clust_order['row']['clust'], clust_order['row']['group'] = \
self.clust_and_group(row_dm, linkage_type=linkage_type)
clust_order['col']['clust'], clust_order['col']['group'] = \
self.clust_and_group(col_dm, linkage_type=linkage_type)
# rank
if run_rank == True:
clust_order['row']['rank'] = self.sort_rank_nodes('row')
clust_order['col']['rank'] = self.sort_rank_nodes('col')
# save clustering orders to node_info
if run_clustering == True:
self.dat['node_info']['row']['clust'] = clust_order['row']['clust']
self.dat['node_info']['col']['clust'] = clust_order['col']['clust']
else:
self.dat['node_info']['row']['clust'] = clust_order['row']['ini']
self.dat['node_info']['col']['clust'] = clust_order['col']['ini']
if run_rank == True:
self.dat['node_info']['row']['rank'] = clust_order['row']['rank']
self.dat['node_info']['col']['rank'] = clust_order['col']['rank']
else:
self.dat['node_info']['row']['rank'] = clust_order['row']['ini']
self.dat['node_info']['col']['rank'] = clust_order['col']['ini']
# transfer ordereings
# row
self.dat['node_info']['row']['ini'] = clust_order['row']['ini']
self.dat['node_info']['row']['group'] = clust_order['row']['group']
# col
self.dat['node_info']['col']['ini'] = clust_order['col']['ini']
self.dat['node_info']['col']['group'] = clust_order['col']['group']
#!! disabled temporarily
# if len(self.dat['node_info']['col']['cl']) > 0:
# self.calc_cat_clust_order()
# make the viz json - can optionally leave out dendrogram
self.viz_json(dendro)
def calc_cat_clust_order(self):
from clustergrammer import Network
from copy import deepcopy
col_in_cat = self.dat['node_info']['col_in_cat']
# alpha order categories
all_cats = sorted(col_in_cat.keys())
# cluster each category
##############################
# calc clustering of each category
all_cat_orders = []
# this is the ordering of the columns based on their category, not
# including their clustering order on top of their category
tmp_col_names_list = []
for inst_cat in all_cats:
inst_cols = col_in_cat[inst_cat]
# keep a list of the columns
tmp_col_names_list.extend(inst_cols)
cat_net = deepcopy(Network())
cat_net.dat['mat'] = deepcopy(self.dat['mat'])
cat_net.dat['nodes'] = deepcopy(self.dat['nodes'])
# get dataframe, to simplify column filtering
cat_df = cat_net.dat_to_df()
# get subset of dataframe
sub_df = {}
sub_df['mat'] = cat_df['mat'][inst_cols]
# load back to dat
cat_net.df_to_dat(sub_df)
try:
cat_net.cluster_row_and_col('cos')
inst_cat_order = cat_net.dat['node_info']['col']['clust']
except:
inst_cat_order = range(len(cat_net.dat['nodes']['col']))
prev_order_len = len(all_cat_orders)
# add previous order length to the current order number
inst_cat_order = [i+prev_order_len for i in inst_cat_order]
all_cat_orders.extend(inst_cat_order)
# sort tmp_col_names_lust by the integers in all_cat_orders
names_col_cat_clust = [x for (y,x) in sorted(zip(all_cat_orders,tmp_col_names_list))]
# calc category-cluster order
##############################
final_order = []
for i in range(len(self.dat['nodes']['col'])):
# get the rank of the col in the order of col_nodes
inst_col_name = self.dat['nodes']['col'][i]
inst_col_num = names_col_cat_clust.index(inst_col_name)
final_order.append(inst_col_num)
self.dat['node_info']['col']['cl_index'] = final_order
def clust_and_group( self, dm, linkage_type='average' ):
import scipy.cluster.hierarchy as hier
# calculate linkage
Y = hier.linkage( dm, method=linkage_type )
Z = hier.dendrogram( Y, no_plot=True )
# get ordering
inst_clust_order = Z['leaves']
all_dist = self.group_cutoffs()
# generate distance cutoffs
inst_groups = {}
for inst_dist in all_dist:
inst_key = str(inst_dist).replace('.','')
inst_groups[inst_key] = hier.fcluster(Y, inst_dist*dm.max(), 'distance')
inst_groups[inst_key] = inst_groups[inst_key].tolist()
return inst_clust_order, inst_groups
def sort_rank_node_values( self, rowcol ):
import numpy as np
from operator import itemgetter
from copy import deepcopy
# make a copy of nodes and node_info
inst_nodes = deepcopy(self.dat['nodes'][rowcol])
inst_vals = deepcopy(self.dat['node_info'][rowcol]['value'])
tmp_arr = []
for i in range(len(inst_nodes)):
inst_dict = {}
# get name of the node
inst_dict['name'] = inst_nodes[i]
# get value
inst_dict['value'] = inst_vals[i]
tmp_arr.append(inst_dict)
# sort dictionary by value
tmp_arr = sorted( tmp_arr, key=itemgetter('value') )
# get list of sorted nodes
tmp_sort_nodes = []
for inst_dict in tmp_arr:
tmp_sort_nodes.append( inst_dict['name'] )
# get the sorted index
sort_index = []
for inst_node in inst_nodes:
sort_index.append( tmp_sort_nodes.index(inst_node) )
return sort_index
def sort_rank_nodes( self, rowcol ):
import numpy as np
from operator import itemgetter
from copy import deepcopy
# make a copy of node information
inst_nodes = deepcopy(self.dat['nodes'][rowcol])
inst_mat = deepcopy(self.dat['mat'])
sum_term = []
for i in range(len(inst_nodes)):
inst_dict = {}
# get name of the node
inst_dict['name'] = inst_nodes[i]
# sum values of the node
if rowcol == 'row':
inst_dict['total'] = np.sum(inst_mat[i,:])
else:
inst_dict['total'] = np.sum(inst_mat[:,i])
# add this to the list of dicts
sum_term.append(inst_dict)
# sort dictionary by number of terms
sum_term = sorted( sum_term, key=itemgetter('total'), reverse=False )
# get list of sorted nodes
tmp_sort_nodes = []
for inst_dict in sum_term:
tmp_sort_nodes.append(inst_dict['name'])
# get the sorted index
sort_index = []
for inst_node in inst_nodes:
sort_index.append( tmp_sort_nodes.index(inst_node) )
return sort_index
def viz_json(self, dendro=True):
''' make the dictionary for the clustergram.js visualization '''
# get dendrogram cutoff distances
all_dist = self.group_cutoffs()
# make nodes for viz
#####################
# make rows and cols
for inst_rc in self.dat['nodes']:
for i in range(len( self.dat['nodes'][inst_rc] )):
inst_dict = {}
inst_dict['name'] = self.dat['nodes'][inst_rc][i]
inst_dict['ini'] = self.dat['node_info'][inst_rc]['ini'][i]
#!! clean this up so I do not have to get the index here
inst_dict['clust'] = self.dat['node_info'][inst_rc]['clust'].index(i)
inst_dict['rank'] = self.dat['node_info'][inst_rc]['rank'][i]
# add node class cl
if len(self.dat['node_info'][inst_rc]['cl']) > 0:
inst_dict['cl'] = self.dat['node_info'][inst_rc]['cl'][i]
# add node class cl_index
if 'cl_index' in self.dat['node_info'][inst_rc] > 0:
inst_dict['cl_index'] = self.dat['node_info'][inst_rc]['cl_index'][i]
# add node class val
if len(self.dat['node_info'][inst_rc]['value']) > 0:
inst_dict['value'] = self.dat['node_info'][inst_rc]['value'][i]
# add node information
# if 'info' in self.dat['node_info'][inst_rc]:
if len(self.dat['node_info'][inst_rc]['info']) > 0:
inst_dict['info'] = self.dat['node_info'][inst_rc]['info'][i]
# group info
if dendro==True:
inst_dict['group'] = []
for tmp_dist in all_dist:
# read group info in correct order
tmp_dist = str(tmp_dist).replace('.','')
inst_dict['group'].append( float( self.dat['node_info'][inst_rc]['group'][tmp_dist][i] ) )
# append dictionary to list of nodes
self.viz[inst_rc+'_nodes'].append(inst_dict)
# links
########
for i in range(len( self.dat['nodes']['row'] )):
for j in range(len( self.dat['nodes']['col'] )):
if abs( self.dat['mat'][i,j] ) > 0:
inst_dict = {}
inst_dict['source'] = i
inst_dict['target'] = j
inst_dict['value'] = self.dat['mat'][i,j]
# add up/dn values if necessary
if 'mat_up' in self.dat:
inst_dict['value_up'] = self.dat['mat_up'][i,j]
if 'mat_up' in self.dat:
inst_dict['value_dn'] = self.dat['mat_dn'][i,j]
# add information if necessary - use dictionary with tuple key
# each element of the matrix needs to have information
if 'mat_info' in self.dat:
# use tuple string
inst_dict['info'] = self.dat['mat_info'][str((i,j))]
# add highlight if necessary - use dictionary with tuple key
if 'mat_hl' in self.dat:
inst_dict['highlight'] = self.dat['mat_hl'][i,j]
# append link
self.viz['links'].append( inst_dict )
def df_to_dat(self, df):
import numpy as np
import pandas as pd
self.dat['mat'] = df['mat'].values
self.dat['nodes']['row'] = df['mat'].index.tolist()
self.dat['nodes']['col'] = df['mat'].columns.tolist()
# check if there is category information in the column names
if type(self.dat['nodes']['col'][0]) is tuple:
self.dat['nodes']['col'] = [i[0] for i in self.dat['nodes']['col']]
if 'mat_up' in df:
self.dat['mat_up'] = df['mat_up'].values
self.dat['mat_dn'] = df['mat_dn'].values
def dat_to_df(self):
import numpy as np
import pandas as pd
df = {}
# always return 'mat' dataframe
df['mat'] = pd.DataFrame(data = self.dat['mat'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row'])
if 'mat_up' in self.dat:
df['mat_up'] = pd.DataFrame(data = self.dat['mat_up'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row'])
df['mat_dn'] = pd.DataFrame(data = self.dat['mat_dn'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row'])
return df
def make_filtered_views(self, dist_type='cosine', run_clustering=True, \
dendro=True, views=['filter_row_sum','N_row_sum'], calc_col_cats=True, \
linkage_type='average'):
from copy import deepcopy
'''
This will calculate multiple views of a clustergram by filtering the data
and clustering after each filtering. This filtering will keep the top N
rows based on some quantity (sum, num-non-zero, etc).
'''
print('running make_filtered_views')
print('dist_type '+str(dist_type))
# get dataframe dictionary of network and remove rows/cols with all zero values
df = self.dat_to_df()
# each row or column must have at least one non-zero value
threshold = 0.0001
df = self.df_filter_row(df, threshold)
df = self.df_filter_col(df, threshold)
# calculate initial view with no row filtering
##################################################
# swap back in the filtered df to dat
self.df_to_dat(df)
# cluster initial view
self.cluster_row_and_col(dist_type=dist_type, linkage_type=linkage_type, \
run_clustering=run_clustering, dendro=dendro)
# set up views
all_views = []
# generate views for each column category (default to only one)
all_col_cat = ['all_category']
# check for column categories and check whether category specific clustering
# should be calculated
if len(self.dat['node_info']['col']['cl']) > 0 and calc_col_cats:
tmp_cats = sorted(list(set(self.dat['node_info']['col']['cl'])))
# gather all col_cats
all_col_cat.extend(tmp_cats)
for inst_col_cat in all_col_cat:
# make a copy of df to send to filters
send_df = deepcopy(df)
# add N_row_sum views
if 'N_row_sum' in views:
print('add N top views')
all_views = self.add_N_top_views( send_df, all_views, dist_type=dist_type, current_col_cat=inst_col_cat )
if 'filter_row_sum' in views:
all_views = self.add_pct_top_views( send_df, all_views, dist_type=dist_type, current_col_cat=inst_col_cat )
# add views to viz
self.viz['views'] = all_views
print('finished make_filtered_views')
def add_pct_top_views(self, df, all_views, dist_type='cosine', \
current_col_cat='all_category'):
from clustergrammer import Network
from copy import deepcopy
import numpy as np
# make a copy of the network so that filtering is not propagated
copy_net = deepcopy(self)
# filter columns by category if necessary - do this on df, which is a copy
if current_col_cat != 'all_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
df['mat'] = copy_net.grab_df_subset(df['mat'], keep_rows='all', keep_cols=keep_cols)
# gather category key
is_col_cat = False
if len(self.dat['node_info']['col']['cl']) > 0 and current_col_cat=='all_category':
is_col_cat = True
cat_key_col = {}
for i in range(len(self.dat['nodes']['col'])):
cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i]
# filter between 0% and 90% of some threshoold
all_filt = range(10)
all_filt = [i/float(10) for i in all_filt]
# row filtering values
mat = deepcopy(df['mat'])
sum_row = np.sum(mat, axis=1)
max_sum = max(sum_row)
for inst_filt in all_filt:
cutoff = inst_filt * max_sum
# make a copy of the network so that filtering is not propagated
copy_net = deepcopy(self)
# make copy of df
inst_df = deepcopy(df)
# filter row in df
inst_df = copy_net.df_filter_row(inst_df, cutoff, take_abs=False)
# filter columns by category if necessary
if current_col_cat != 'all_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
inst_df['mat'] = copy_net.grab_df_subset(inst_df['mat'], keep_rows='all', keep_cols=keep_cols)
if 'mat_up' in inst_df:
# grab up and down data
inst_df['mat_up'] = copy_net.grab_df_subset(inst_df['mat_up'], keep_rows='all', keep_cols=keep_cols)
inst_df['mat_dn'] = copy_net.grab_df_subset(inst_df['mat_dn'], keep_rows='all', keep_cols=keep_cols)
# ini net
net = deepcopy(Network())
# transfer to dat
net.df_to_dat(inst_df)
# add col categories if necessary
if is_col_cat:
inst_col_cats = []
for inst_col_name in copy_net.dat['nodes']['col']:
inst_col_cats.append( cat_key_col[inst_col_name] )
# transfer category information
net.dat['node_info']['col']['cl'] = inst_col_cats
# add col_in_cat
net.dat['node_info']['col_in_cat'] = copy_net.dat['node_info']['col_in_cat']
# try to cluster
try:
try:
# cluster
net.cluster_row_and_col(dist_type=dist_type,run_clustering=True)
except:
# cluster
net.cluster_row_and_col(dist_type=dist_type,run_clustering=False)
# add view
inst_view = {}
inst_view['filter_row_sum'] = inst_filt
inst_view['dist'] = 'cos'
inst_view['col_cat'] = current_col_cat
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('\t*** did not cluster pct filtered view')
return all_views
def add_N_top_views(self, df, all_views, dist_type='cosine',\
current_col_cat='all_category'):
from clustergrammer import Network
from copy import deepcopy
# make a copy of hte network
copy_net = deepcopy(self)
# filter columns by category if necessary
if current_col_cat != 'all_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
df['mat'] = copy_net.grab_df_subset(df['mat'], keep_rows='all', keep_cols=keep_cols)
# gather category key
is_col_cat = False
if len(self.dat['node_info']['col']['cl']) > 0 and current_col_cat=='all_category':
is_col_cat = True
cat_key_col = {}
for i in range(len(self.dat['nodes']['col'])):
cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i]
# keep the following number of top rows
keep_top = ['all',500,400,300,200,100,90,80,70,60,50,40,30,20,10]
# get copy of df and take abs value, cell line cols and gene rows
df_abs = deepcopy(df['mat'])
# transpose to get gene columns
df_abs = df_abs.transpose()
# sum the values of the genes in the cell lines
tmp_sum = df_abs.sum(axis=0)
# take absolute value to keep most positive and most negative rows
tmp_sum = tmp_sum.abs()
# sort rows by value
tmp_sum.sort(ascending=False)
rows_sorted = tmp_sum.index.values.tolist()
for inst_keep in keep_top:
# initialize df
tmp_df = deepcopy(df)
# filter columns by category if necessary
if current_col_cat != 'all_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
tmp_df['mat'] = copy_net.grab_df_subset(tmp_df['mat'], keep_rows='all', keep_cols=keep_cols)
if 'mat_up' in df:
# grab up and down data
tmp_df['mat_up'] = copy_net.grab_df_subset(tmp_df['mat_up'], keep_rows='all', keep_cols=keep_cols)
tmp_df['mat_dn'] = copy_net.grab_df_subset(tmp_df['mat_dn'], keep_rows='all', keep_cols=keep_cols)
if inst_keep < len(rows_sorted) or inst_keep == 'all':
# initialize netowrk
net = deepcopy(Network())
# filter the rows
if inst_keep != 'all':
# get the labels of the rows that will be kept
keep_rows = rows_sorted[0:inst_keep]
# filter the matrix
tmp_df['mat'] = tmp_df['mat'].ix[keep_rows]
if 'mat_up' in tmp_df:
tmp_df['mat_up'] = tmp_df['mat_up'].ix[keep_rows]
tmp_df['mat_dn'] = tmp_df['mat_dn'].ix[keep_rows]
# filter columns - some columns may have all zero values
tmp_df = self.df_filter_col(tmp_df,0.001)
# transfer to dat
net.df_to_dat(tmp_df)
else:
net.df_to_dat(tmp_df)
# add col categories if necessary
if is_col_cat:
inst_col_cats = []
for inst_col_name in self.dat['nodes']['col']:
inst_col_cats.append( cat_key_col[inst_col_name] )
# transfer category information
net.dat['node_info']['col']['cl'] = inst_col_cats
# add col_in_cat
net.dat['node_info']['col_in_cat'] = copy_net.dat['node_info']['col_in_cat']
# try to cluster
try:
try:
# cluster
net.cluster_row_and_col(dist_type,run_clustering=True)
except:
# cluster
net.cluster_row_and_col(dist_type,run_clustering=False)
# add view
inst_view = {}
inst_view['N_row_sum'] = inst_keep
inst_view['dist'] = 'cos'
inst_view['col_cat'] = current_col_cat
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('\t*** did not cluster N filtered view')
return all_views
def fast_mult_views(self, dist_type='cos', run_clustering=True, dendro=True):
import numpy as np
import pandas as pd
from clustergrammer import Network
from copy import deepcopy
'''
This will use Pandas to calculte multiple views of a clustergram
Currently, it is only filtering based on row-sum and it is disregarding
link information (used to add click functionality).
'''
# gather category key
is_col_cat = False
if len(self.dat['node_info']['col']['cl']) > 0:
is_col_cat = True
cat_key_col = {}
for i in range(len(self.dat['nodes']['col'])):
cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i]
# get dataframe dictionary of network and remove rows/cols with all zero values
df = self.dat_to_df()
# each row or column must have at least one non-zero value
threshold = 0.001
df = self.df_filter_row(df, threshold)
df = self.df_filter_col(df, threshold)
# calculate initial view with no row filtering
#################################################
# swap back in filtered df to dat
self.df_to_dat(df)
# cluster initial view
self.cluster_row_and_col('cos',run_clustering=run_clustering, dendro=dendro)
# set up views
all_views = []
# set up initial view
inst_view = {}
inst_view['filter_row_sum'] = 0
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = self.viz['row_nodes']
inst_view['nodes']['col_nodes'] = self.viz['col_nodes']
# add view with no filtering
all_views.append(inst_view)
# filter between 0% and 90% of some threshoold
all_filt = range(10)
all_filt = [i/float(10) for i in all_filt]
# row filtering values
mat = self.dat['mat']
mat_abs = abs(mat)
sum_row = np.sum(mat_abs, axis=1)
max_sum = max(sum_row)
for inst_filt in all_filt:
# skip zero filtering
if inst_filt > 0:
cutoff = inst_filt * max_sum
# filter row
df = self.df_filter_row(df, cutoff, take_abs=True)
print('\tfiltering at cutoff ' + str(inst_filt) + ' mat shape: ' + str(df['mat'].shape))
# ini net
net = deepcopy(Network())
# transfer to dat
net.df_to_dat(df)
# add col categories if necessary
if is_col_cat:
inst_col_cats = []
for inst_col_name in self.dat['nodes']['col']:
inst_col_cats.append( cat_key_col[inst_col_name] )
net.dat['node_info']['col']['cl'] = inst_col_cats
# try to cluster
try:
# cluster
net.cluster_row_and_col('cos')
# add view
inst_view = {}
inst_view['filter_row_sum'] = inst_filt
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('\t*** did not cluster filtered view')
# add views to viz
self.viz['views'] = all_views
print('\tfinished fast_mult_views')
def make_mult_views(self, dist_type='cos',filter_row=['value'], filter_col=False, run_clustering=True, dendro=True):
'''
This will calculate multiple views of a clustergram by filtering the
data and clustering after each fitlering. By default row filtering will
be turned on and column filteirng will not. The filtering steps are defined
as a percentage of the maximum value found in the network.
'''
from clustergrammer import Network
from copy import deepcopy
# filter between 0% and 90% of some to be determined value
all_filt = range(10)
all_filt = [i/float(10) for i in all_filt]
# cluster default view
self.cluster_row_and_col('cos', run_clustering=run_clustering, dendro=dendro)
self.viz['views'] = []
all_views = []
# Perform row filterings
###########################
if len(filter_row) > 0:
# perform multiple types of row filtering
###########################################
for inst_type in filter_row:
for row_filt_int in all_filt:
# initialize new net
net = deepcopy(Network())
net.dat = deepcopy(self.dat)
# filter rows
net.filter_row_thresh(row_filt_int, filter_type=inst_type)
# filter columns since some columns might be all zero
net.filter_col_thresh(0.001,1)
# try to cluster - will not work if there is one row
try:
# cluster
net.cluster_row_and_col('cos')
inst_name = 'filter_row'+'_'+inst_type
# add view
inst_view = {}
inst_view[inst_name] = row_filt_int
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('\t***did not cluster filtered view')
# Default col Filtering
###########################
inst_meet = 1
if filter_col == True:
# col filtering
#####################
for col_filt in all_filt:
# print(col_filt)
# initialize new net
net = deepcopy(Network())
net.dat = deepcopy(self.dat)
filt_value = col_filt * max_mat
# filter cols
net.filter_col_thresh(filt_value, inst_meet)
# try to cluster - will not work if there is one col
try:
# cluster
net.cluster_row_and_col('cos')
# add view
inst_view = {}
inst_view['filter_col'] = col_filt
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('did not cluster filtered view')
# add views to viz
self.viz['views'] = all_views
@staticmethod
def df_filter_row(df, threshold, take_abs=True):
''' filter rows in matrix at some threshold
and remove columns that have a sum below this threshold '''
import pandas as pd
from copy import deepcopy
from clustergrammer import Network
net = Network()
# take absolute value if necessary
if take_abs == True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
ini_rows = df_copy.index.values.tolist()
# transpose df
df_copy = df_copy.transpose()
# sum the values of the rows
tmp_sum = df_copy.sum(axis=0)
# take absolute value to keep most positive and most negative rows
tmp_sum = tmp_sum.abs()
# sort rows by value
tmp_sum.sort(ascending=False)
# filter series using threshold
tmp_sum = tmp_sum[tmp_sum>threshold]
# get keep_row names
keep_rows = sorted(tmp_sum.index.values.tolist())
if len(keep_rows) < len(ini_rows):
# grab the subset of the data
df['mat'] = net.grab_df_subset(df['mat'], keep_rows=keep_rows)
if 'mat_up' in df:
# grab up and down data
df['mat_up'] = net.grab_df_subset(df['mat_up'], keep_rows=keep_rows)
df['mat_dn'] = net.grab_df_subset(df['mat_dn'], keep_rows=keep_rows)
return df
@staticmethod
def df_filter_col(df, threshold, take_abs=True):
''' filter columns in matrix at some threshold
and remove rows that have all zero values '''
import pandas
from copy import deepcopy
from clustergrammer import Network
net = Network()
# take absolute value if necessary
if take_abs == True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
# filter columns to remove columns with all zero values
# transpose
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > threshold]
# transpose back
df_copy = df_copy.transpose()
# filter rows
df_copy = df_copy[df_copy.sum(axis=1) > 0]
# get df ready for export
if take_abs == True:
inst_rows = df_copy.index.tolist()
inst_cols = df_copy.columns.tolist()
df['mat'] = net.grab_df_subset(df['mat'], inst_rows, inst_cols)
else:
# just transfer the copied data
df['mat'] = df_copy
return df
@staticmethod
def grab_df_subset(df, keep_rows='all', keep_cols='all'):
if keep_cols != 'all':
# filter columns
df = df[keep_cols]
if keep_rows != 'all':
# filter rows
df = df.ix[keep_rows]
return df
@staticmethod
def load_gmt(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
gmt = {}
# loop through the lines of the gmt
for i in range(len(lines)):
# get the inst line, strip off the new line character
inst_line = lines[i].rstrip()
inst_term = inst_line.split('\t')[0]
# get the elements
inst_elems = inst_line.split('\t')[2:]
# save the drug-kinase sets
gmt[inst_term] = inst_elems
return gmt
@staticmethod
def load_json_to_dict(filename):
''' load json to python dict and return dict '''
import json
f = open(filename, 'r')
inst_dict = json.load(f)
f.close()
return inst_dict
@staticmethod
def save_dict_to_json(inst_dict, filename, indent='no-indent'):
import json
# save as a json
fw = open(filename, 'w')
if indent == 'indent':
fw.write( json.dumps(inst_dict, indent=2) )
else:
fw.write( json.dumps(inst_dict) )
fw.close()
@staticmethod
def ini_clust_order():
rowcol = ['row','col']
orderings = ['clust','rank','group','ini']
clust_order = {}
for inst_node in rowcol:
clust_order[inst_node] = {}
for inst_order in orderings:
clust_order[inst_node][inst_order] = []
return clust_order
@staticmethod
def threshold_vect_comparison(x, y, cutoff):
import numpy as np
# x vector
############
# take absolute value of x
x_abs = np.absolute(x)
# this returns a tuple
found_tuple = np.where(x_abs >= cutoff)
# get index array
found_index_x = found_tuple[0]
# y vector
############
# take absolute value of y
y_abs = np.absolute(y)
# this returns a tuple
found_tuple = np.where(y_abs >= cutoff)
# get index array
found_index_y = found_tuple[0]
# get common intersection
found_common = np.intersect1d(found_index_x, found_index_y)
# apply cutoff
thresh_x = x[found_common]
thresh_y = y[found_common]
# return the threshold data
return thresh_x, thresh_y
@staticmethod
def group_cutoffs():
# generate distance cutoffs
all_dist = []
for i in range(11):
all_dist.append(float(i)/10)
return all_dist
@staticmethod
def find_dict_in_list(list_dict, search_value, search_string):
''' find a dict in a list of dicts by searching for a value '''
# get all the possible values of search_value
all_values = [d[search_value] for d in list_dict]
# check if the search value is in the keys
if search_string in all_values:
# find the dict
found_dict = (item for item in list_dict if item[search_value] == search_string).next()
else:
found_dict = {}
# return the found dictionary
return found_dict
| 59,336 | 20,251 |
numbers = [9, 8, 7, 6, 5, 4, 3, 2, 1]
def brick_sort(array):
n = len(array)-1
swapped = True
while swapped == True:
swapped = False
for index in range(0, n, 2):
if array[index] > array[index+1]:
array[index], array[index+1] = array[index+1], array[index]
swapped = True
for index in range(1, n, 2):
if array[index] > array[index+1]:
array[index], array[index+1] = array[index+1], array[index]
swapped = True
return array
print(brick_sort(numbers))
| 523 | 202 |
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import BasePermission, SAFE_METHODS, IsAuthenticatedOrReadOnly
from .models import Topic, Reply
from sanctuary.viewsets import NoDestroyModelViewSet
from .serializers import TopicSerializer, ReplySerializer
class CreateWithAuthorMixin(object):
"""
Create a model instance.
"""
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.initial_data["author"] = self.request.user.id
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
serializer.save(author=self.request.user)
class IsSuperAdminOrAuthor(BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
user = request.user
return user.is_superuser or user == obj.author
class TopicViewSet(CreateWithAuthorMixin, NoDestroyModelViewSet):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
permission_classes = (IsAuthenticatedOrReadOnly, IsSuperAdminOrAuthor)
filter_fields = ('author', 'tags')
class ReplyViewSet(CreateWithAuthorMixin, NoDestroyModelViewSet):
queryset = Reply.objects.all()
serializer_class = ReplySerializer
permission_classes = (IsAuthenticatedOrReadOnly, IsSuperAdminOrAuthor)
filter_fields = ('topic', 'author')
| 1,676 | 489 |
import cv2 as cv
from model import model
class face_recogniser(model):
def __init__(self):
super()
self.net = cv.dnn.readNet('/home/pi/inception/face-detection-adas-0001.xml','/home/pi/inception/face-detection-adas-0001.bin')
self.net.setPreferableTarget(cv.dnn.DNN_TARGET_MYRIAD)
def add_face_rectangle(self, frame):
# Prepare input blob and perform an inference.
blob = cv.dnn.blobFromImage(frame, size=(672, 384), ddepth=cv.CV_8U)
self.net.setInput(blob)
out = self.net.forward()
# Draw detected faces on the frame.
for detection in out.reshape(-1, 7):
confidence = float(detection[2])
xmin = int(detection[3] * frame.shape[1])
ymin = int(detection[4] * frame.shape[0])
xmax = int(detection[5] * frame.shape[1])
ymax = int(detection[6] * frame.shape[0])
if confidence > 0.5:
cv.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, 255, 0))
return frame
def process(self, frame):
return self.add_face_rectangle(frame)
| 1,113 | 396 |
from geopy.exc import GeocoderTimedOut
from geopy.geocoders import Nominatim
import numpy as np
import pandas as pd
# function to find the coordinate
# of a given city
def find_geocode(city):
try:
# Specify the user_agent as your
# app name it should not be none
geolocator = Nominatim(user_agent="your_app_name")
return geolocator.geocode(city)
except GeocoderTimedOut:
return None
loc = find_geocode('italy')
print(loc.latitude)
print(loc.longitude) | 503 | 165 |
import random
import pytest
from pyhdtoolkit.cpymadtools.generators import LatticeGenerator
class TestLatticeGenerator:
def test_base_cas_lattice_generation(self):
base_cas_lattice = LatticeGenerator.generate_base_cas_lattice()
assert isinstance(base_cas_lattice, str)
assert len(base_cas_lattice) == 1493
def test_onesext_cas_lattice(self):
onesext_cas_lattice = LatticeGenerator.generate_onesext_cas_lattice()
assert isinstance(onesext_cas_lattice, str)
assert len(onesext_cas_lattice) == 2051
def test_oneoct_cas_lattice(self):
oneoct_cas_lattice = LatticeGenerator.generate_oneoct_cas_lattice()
assert isinstance(oneoct_cas_lattice, str)
assert len(oneoct_cas_lattice) == 2050
def test_tripleterrors_study_reference(self):
tripleterrors_study_reference = LatticeGenerator.generate_tripleterrors_study_reference()
assert isinstance(tripleterrors_study_reference, str)
assert len(tripleterrors_study_reference) == 1617
@pytest.mark.parametrize(
"randseed, tferror",
[
("", ""),
("95", "195"),
("105038", "0.001"),
(str(random.randint(0, 1e7)), str(random.randint(0, 1e7))),
(random.randint(0, 1e7), random.randint(0, 1e7)),
],
)
def test_tripleterrors_study_tferror_job(self, randseed, tferror):
tripleterrors_study_tferror_job = LatticeGenerator.generate_tripleterrors_study_tferror_job(
rand_seed=randseed, tf_error=tferror,
)
assert isinstance(tripleterrors_study_tferror_job, str)
assert len(tripleterrors_study_tferror_job) == 2521 + len(str(randseed)) + len(str(tferror))
assert f"eoption, add, seed = {randseed};" in tripleterrors_study_tferror_job
assert f"B2r = {tferror};" in tripleterrors_study_tferror_job
@pytest.mark.parametrize(
"randseed, mserror",
[
("", ""),
("95", "195"),
("105038", "0.001"),
(str(random.randint(0, 1e7)), str(random.randint(0, 1e7))),
(random.randint(0, 1e7), random.randint(0, 1e7)),
],
)
def test_tripleterrors_study_mserror_job(self, randseed, mserror):
tripleterrors_study_mserror_job = LatticeGenerator.generate_tripleterrors_study_mserror_job(
rand_seed=randseed, ms_error=mserror,
)
assert isinstance(tripleterrors_study_mserror_job, str)
assert len(tripleterrors_study_mserror_job) == 2384 + len(str(randseed)) + len(str(mserror))
assert f"eoption, add, seed = {randseed};" in tripleterrors_study_mserror_job
assert f"ealign, ds := {mserror} * 1E-3 * TGAUSS(GCUTR);" in tripleterrors_study_mserror_job
| 2,783 | 1,046 |
import pandas as pd
import re
import sys
#read bed file
#constructure acitivity table
#output tfr file
def main():
bed_file = sys.argv[1]
act_table = sys.argv[2]
data = pd.read_csv(act_table,sep = '\t')
data.rename(columns={'Unnamed: 0':'loci'}, inplace=True)
chrom = [i.split(':')[0] for i in list(data.loci)]
coord = [re.split(':()',i)[-1] for i in list(data.loci)]
start = [i.split('(')[0].split('-')[0] for i in coord]
end = [i.split('(')[0].split('-')[1] for i in coord]
strand = [i[-2] for i in coord]
data = data.drop(columns=['loci'])
# chrom = [i.split(':')[0] for i in list(data.loci)]
# start = [re.split(':|-',i)[1] for i in list(data.loci)]
# end = [re.split(":|-",i)[2] for i in list(data.loci)]
# clean_end = [i[:-3] for i in end]
# strand = [i[-2] for i in end]
data['chrom'] = chrom
data['start'] = start
data['end'] = end
data['strand'] = strand
cols = data.columns.tolist()
cols = cols[-4:]+cols[:-4]
data = data[cols]
output_act = act_table.split('.txt')[0]+'.bed'
data.to_csv(output_act,sep='\t',index = False)
##############################################################
def align_seqs_scores_1hot(seq_vecs, seq_scores, sort=True):
if sort:
seq_headers = sorted(seq_vecs.keys())
else:
seq_headers = seq_vecs.keys()
# construct lists of vectors
train_scores = []
train_seqs = []
for header in seq_headers:
train_seqs.append(seq_vecs[header])
train_scores.append(seq_scores[header])
# stack into matrices
train_seqs = np.vstack(train_seqs)
train_scores = np.vstack(train_scores)
return train_seqs, train_scores
if __name__ == '__main__':
main()
| 1,789 | 673 |
import sys, socket
import json
import cgi
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy as np
from http.server import BaseHTTPRequestHandler, HTTPServer
from modules.controller import Controller
# setting
host = ''
port = 8000
class MyHandler(BaseHTTPRequestHandler):
def do_POST(self):
print("simpleserver do_POST exec()")
if self.path.endswith('favicon.ico'):
return;
self.controller = Controller()
# request
form = self.getRequestData()
print(type(form))
# logic
#logicResult = ""
logicResult = self.controller.webLogic(form)
# make result
result = self.makeResponseData(logicResult)
# send
self.sendResponse(result)
return
def getRequestData(self):
# POST されたフォームデータを解析する
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':'png',
})
print(form)
#image = {"test":"requestData"}
return form
def makeResponseData(self, result):
print("### simpleserver makeResponseData exec")
#result = {"test":"responseData"}
print(result)
print(type(result))
return result
def sendResponse(self, result):
print("### simpleserver sendResponse exec")
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.send_header('Access-Control-Allow-Origin', 'http://deeplearning.local.com')
self.end_headers()
#self.wfile.flush()
self.wfile.write(str(result).encode('UTF-8'))
self.wfile.close()
return
try:
server = HTTPServer((host, port), MyHandler)
server.serve_forever()
except KeyboardInterrupt:
print ('^C received, shutting down the web server')
server.socket.close()
| 2,072 | 646 |
#!/usr/bin/python
import subprocess
import sys
def main():
if len(sys.argv) > 1:
machines = dict()
image = sys.argv[1]
row_home = "Home:"
row_id_open = "{"
row_id_close = "}"
output = subprocess.check_output(['prlctl', 'list', '-a', '-i'])
items = str(output).split("ID:")
for item in items:
if row_home in item:
home = ""
machine_id = ""
rows = item.strip().split('\\n')
for row in rows:
if row_id_open in row.strip() and row_id_close in row.strip():
machine_id = row.replace(row_id_open, "").replace(row_id_close, "").strip()
if row_home in row:
home = row.replace(row_home, "").strip()
machines[home] = machine_id
for machine_id in machines:
if machine_id.startswith(image):
machine = machines[machine_id]
print(machine)
sys.exit(0)
print("Unknown_ID")
sys.exit(1)
else:
print("No image path provided")
sys.exit(1)
if __name__ == "__main__":
main()
| 1,214 | 358 |
#-*-coding: utf-8-*-
#todo p.333 ~ p.340
#todo code 7-7 ~ code 7-9
#todo 7.2.2 텐서보드 소개: 텐서플로의 시각화 프레임워크
import os
import keras
from keras import layers
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 2000 # 특성으로 사용할 단어의 수
max_len = 500 # 사용할 텍스트의 길이
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(sequences=x_train,
maxlen=max_len)
x_test = sequence.pad_sequences(sequences=x_test,
maxlen=max_len)
model = keras.models.Sequential()
model.add(layers.Embedding(input_dim=max_features,
output_dim=128,
input_length=max_len,
name='embed'))
model.add(layers.Conv1D(filters=32, kernel_size=7, activation='relu'))
model.add(layers.MaxPooling1D(pool_size=5))
model.add(layers.Conv1D(filters=32, kernel_size=7, activation='relu'))
model.add(layers.GlobalAveragePooling1D())
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
# ▣ keras.utils.plot_model
# - 모델의 층 그래프를 그려 주는 기능
from keras.utils import plot_model
plot_model(model=model,
to_file=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'model_graph', 'model_graph.png'),
show_shapes=True)
callbacks = [
keras.callbacks.TensorBoard(
log_dir=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tensorboard_log'),
histogram_freq=1, # 1 에포크마다 활성화 출력의 히스토그램을 기록
embeddings_freq=1 # 1 에포크마다 임베딩 데이터를 기록
)
]
history = model.fit(
x=x_train,
y=y_train,
epochs=1,
batch_size=128,
validation_split=0.2,
callbacks=callbacks
) | 1,813 | 822 |
# Copyright 2019 Willian Fuks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import os
import sys
import argparse
import six
import tensorflow as tf
import tensorflow_transform as tft
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
import tensorflow_transform.beam.impl as tft_beam
from tensorflow_transform.tf_metadata import dataset_metadata, dataset_schema
from tensorflow_transform.beam import impl as beam_impl
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
import ast
import six
import preprocess.metadata as metadata
import tempfile
if not six.PY2:
sys.exit("ERROR: Must use Python2.7")
def build_bq_query(filename, project_id, init_date, end_date):
query = open(filename).read().format(project_id=project_id, init_date=init_date,
end_date=end_date)
return query
def build_pipeline_options(args):
"""
Apache Beam Pipelines must receive a set of options for setting how the engine should
run.
Args
----
args: argparse.Namespace
Returns
-------
pipeline_options: defines how to run beam job.
"""
options = {}
options['runner'] = args.runner
if args.temp_location:
options['temp_location'] = args.temp_location
if args.project:
options['project'] = args.project
if args.staging_location:
options['staging_location'] = args.staging_location
if args.job_name:
options['job_name'] = args.job_name
if args.max_num_workers:
options['max_num_workers'] = args.max_num_workers
if args.machine_type:
options['machine_type'] = args.machine_type
options.update({'save_main_session': True})
options.update({'setup_file': './setup.py'})
pipeline_options = PipelineOptions(**options)
return pipeline_options
class FlattenInteractionsFn(beam.DoFn):
def process(self, element):
"""
flattens table
"""
for hit in element[1]:
yield {'customer_id': element[0], 'sku': hit['sku'], 'action': hit['action']}
def preprocess_fn(dictrow):
return {
'customer_id': tft.string_to_int(dictrow['customer_id'],
vocab_filename='customers_mapping'),
'sku': tft.string_to_int(dictrow['sku'], vocab_filename='skus_mapping'),
'action': dictrow['action']
}
def aggregate_customers_sessions(sessions):
"""
Receives as input what products customers interacted with and returns their final
aggregation.
Args
----
sessions: list of list of dicts.
List where each element is a list of dict of type: [{'action': '', 'sku': ''}]
Returns
-------
results: list of dicts
Each resulting dict is aggregated on the sku and action level (repeating
clauses are filtered out).
"""
result = []
for session in sessions:
for hit in session:
result.append(hit)
return [dict(t) for t in {tuple(d.items()) for d in result}]
def build_final_results(row):
"""
row = (customer_id, [{sku:, action}, {sku:, action}])
"""
skus_list = [e['sku'] for e in row[1]]
actions_list = [e['action'] for e in row[1]]
return {
'customer_id': row[0],
'skus_list': skus_list,
'actions_list': actions_list
}
def build_test_results(row):
"""
('customer2', {'test': [{'skus_list': [1, 1], 'actions_list': ['AddedToBasket',
'Browsed'], 'customer_id': 'customer2'}], 'train': [{'skus_list': [1, 1],
'actions_list': ['AddedToBasket', 'Browsed'], 'customer_id': 'customer2'}]})
"""
result = {}
result['customer_id'] = row[0]
inner_dicts = row[1]
# customers that had empty interactions after filtering out test dataset.
if not inner_dicts['test']:
return
# customers that were not present in training data.
if not inner_dicts['train']:
return
test_dict = inner_dicts['test'][0]
result['skus_list'] = test_dict['skus_list']
result['actions_list'] = test_dict['actions_list']
train_dict = inner_dicts['train'][0]
result['trained_skus_list'] = train_dict['skus_list']
result['trained_actions_list'] = train_dict['actions_list']
return result
def read_input_data(args, pipeline, flag):
"""
Reads train and test pipelines.
args: input args.
pipeline: input pipeline where all transformations will take place.
flag: either train or test.
"""
if args.input_sql:
train_query = build_bq_query(args.input_sql, args.project,
args.train_init_date, args.train_end_date)
test_query = build_bq_query(args.input_sql, args.project,
args.test_init_date, args.test_end_date)
data = (
pipeline
| '{} read'.format(flag) >> beam.io.Read(beam.io.BigQuerySource(
query=train_query if flag == 'train' else test_query,
use_standard_sql=True)
)
)
else:
data = (
pipeline
| '{} read'.format(flag) >> beam.io.ReadFromText(
args.input_train_data if flag == 'train' else args.input_test_data
)
| '{} to json'.format(flag) >> beam.Map(lambda x: ast.literal_eval(x))
)
data = (
data
| '{} filter empty hits'.format(flag) >> beam.Filter(lambda x: x['hits'])
| '{} prepare customer grouping'.format(flag) >> beam.Map(lambda x: (
x['customer_id'],
[{'action': e['action'], 'sku': e['productSku']} for e in
x['hits'] if e['action'] in ['Browsed', 'AddedToBasket']])
)
| '{} group customers'.format(flag) >> beam.GroupByKey()
| '{} aggregate customers sessions'.format(flag) >> beam.Map(lambda x: (
x[0],
aggregate_customers_sessions(x[1]))
)
| '{} flatten'.format(flag) >> beam.ParDo(FlattenInteractionsFn())
)
return data
def write_total_distinct_keys_to_file(data, filename, key):
"""
Counts how many distinct items of "key" is present in data. Key here is either
sku or customer_id.
Args
----
data: pcollection.
filename: where to write results to.
key: on which value to count for.
"""
_ = (
data
| 'get {}'.format(key) >> beam.Map(lambda x: x[key])
| 'group {}'.format(key) >> beam.RemoveDuplicates()
| 'count {}'.format(key) >> beam.combiners.Count.Globally()
| 'write {}'.format(key) >> beam.io.WriteToText(filename)
)
def write_tfrecords(data, schema, filename, name):
"""
Converts input pcollection into a file of tfrecords following schema.
Args
----
data: pcollection.
schema: dataset_schema from tensorflow transform.
name: str to identify operations.
"""
_ = (
data
| '{} tfrecords write'.format(name) >> beam.io.tfrecordio.WriteToTFRecord(
filename,
coder=example_proto_coder.ExampleProtoCoder(dataset_schema.Schema(schema)))
)
def aggregate_transformed_data(transformed_data, flag):
"""
One of the final steps into our pipelining transformations where data that has
been transformed (in our case, skus went from string names to integer indices) is
aggregated on the user level.
transformed_data: pcollection.
flag: identifies train or test
Returns
-------
transformed_data aggregated on user level.
"""
if flag == 'test':
transformed_data = (
transformed_data
| 'test filter out invalid skus' >> beam.Filter(lambda x: x['sku'] != -1)
)
transformed_agg_data = (
transformed_data
| '{} prepare grouping'.format(flag) >> beam.Map(lambda x: (
x['customer_id'],
{'sku': x['sku'], 'action': x['action']})
)
| '{} transformed agg group'.format(flag) >> beam.GroupByKey()
| '{} final results'.format(flag) >> beam.Map(lambda x: build_final_results(x))
)
return transformed_agg_data
def aggregate_final_test_data(train_data, test_data):
"""
Joins train dataset with test so that only customers that we can make recommendations
are present in final dataset. Remember that, in order to make them, we need to know
a priori what customers interacted with. That's why we join the train data so we
know customers preferences when we need to interact with them with our system.
"""
data = (
{
'train': train_data | 'train prepare customer key' >> beam.Map(lambda x: (
x['customer_id'], x)),
'test': test_data | 'test prepare customer key' >> beam.Map(lambda x: (
x['customer_id'], x))
}
| 'cogroup' >> beam.CoGroupByKey()
| 'build final rows' >> beam.Map(build_test_results)
| 'filter customers out of test' >> beam.Filter(lambda x: x)
)
return data
def run_tft_pipeline(args):
"""
This is where all the data we have available in our database is processed and
transformed into Tensorflow tfrecords for later training and testing.
The code runs in distributed manner automatically in the engine choosen by
the `runner` argument in input.
"""
pipeline_options = build_pipeline_options(args)
temp_tft_folder = (
tempfile.mkdtemp(dir='/tmp/') if not args.tft_temp else args.tft_temp
)
tft_transform_folder = (
tempfile.mkdtemp(dir='/tmp/') if not args.tft_transform else args.tft_transform
)
with beam.Pipeline(options=pipeline_options) as pipeline:
with beam_impl.Context(temp_dir=temp_tft_folder):
train_data = read_input_data(args, pipeline, 'train')
write_total_distinct_keys_to_file(train_data, args.nitems_filename,
'sku')
train_dataset = (train_data, metadata.RAW_DATA_METADATA)
(train_data, transformed_train_metadata), transform_fn = (
train_dataset | beam_impl.AnalyzeAndTransformDataset(preprocess_fn)
)
_ = (
transform_fn
| 'WriteTransformFn' >>
transform_fn_io.WriteTransformFn(tft_transform_folder)
)
train_data = aggregate_transformed_data(
train_data,
'train'
)
write_tfrecords(train_data, metadata.OUTPUT_TRAIN_SCHEMA,
args.output_train_filename,
'output train')
test_data = read_input_data(args, pipeline, 'test')
test_dataset = (test_data, metadata.RAW_DATA_METADATA)
(test_data, _) = (
(test_dataset, transform_fn) | beam_impl.TransformDataset())
test_data = aggregate_transformed_data(
test_data,
'test'
)
test_data = aggregate_final_test_data(
train_data,
test_data
)
write_tfrecords(test_data, metadata.OUTPUT_TEST_SCHEMA,
args.output_test_filename, 'output test')
def main():
args = parse_args()
run_tft_pipeline(args)
if __name__ == '__main__':
main()
| 12,158 | 3,604 |
import csv
with open("european_cities.csv", "r") as f:
data = list(csv.reader(f, delimiter=';'))
print(data)
| 114 | 45 |
__title__ = 'spacy-lookup'
__version__ = '0.0.1'
__summary__ = 'spaCy pipeline component for Named Entity Recognition based on dictionaries.'
__url__ = 'https://github.com/mpuig/spacy-lookup'
__author__ = 'Marc Puig'
__email__ = 'marc.puig@gmail.com'
__license__ = 'MIT'
| 271 | 104 |
# https://leetcode.com/problems/employee-importance/
"""
# Definition for Employee.
class Employee:
def __init__(self, id: int, importance: int, subordinates: List[int]):
self.id = id
self.importance = importance
self.subordinates = subordinates
"""
class Solution:
def getImportance(self, employees: List['Employee'], id: int) -> int:
table = {}
for e in employees:
table[e.id] = e
result = table[id].importance
q = []
q.append(table[id].subordinates)
while len(q) > 0:
ids = q.pop()
for id_ in ids:
result += table[id_].importance
q.append(table[id_].subordinates)
return result
| 751 | 218 |
import dataclasses
import datetime
import gzip
import json
import logging
import os
from typing import Any, Dict
import numpy as np
import tensorflow as tf
from absl import flags
from loganary.ranking.common import NumpyJsonEncoder, setup_logging, setup_seed
from loganary.ranking.model import (
RankingModel,
RankingModelConfig,
RankingModelEmbeddingField,
RankingModelField,
)
flags.DEFINE_string("train_path", None, "Path of .tfrecords file for training.")
flags.DEFINE_string("eval_path", None, "Path of .tfrecords file for evaluation.")
flags.DEFINE_string("keyword_path", None, "Path of vocabulary file for keyword field.")
flags.DEFINE_string("title_path", None, "Path of vocabulary file for title field.")
flags.DEFINE_string("model_path", None, "Path of trained model files.")
flags.DEFINE_integer("num_train_steps", 15000, "The number of train steps.")
flags.DEFINE_list("hidden_layer_dims", ["64", "32", "16"], "Sizes for hidden layers.")
flags.DEFINE_integer(
"keyword_embedding_dim", 20, "Dimention of an embedding for keyword field."
)
flags.DEFINE_integer(
"title_embedding_dim", 20, "Dimention of an embedding for title field."
)
flags.DEFINE_integer("batch_size", 32, "Batch size.")
flags.DEFINE_integer("list_size", 100, "List size.")
flags.DEFINE_float("learning_rate", 0.05, "Learning rate.")
flags.DEFINE_integer("group_size", 10, "Group size.")
flags.DEFINE_float("dropout_rate", 0.8, "Dropout rate.")
flags.DEFINE_bool("verbose", False, "Set a logging level as debug.")
FLAGS = flags.FLAGS
logger = logging.getLogger(__name__)
def main(_) -> None:
setup_seed()
setup_logging(FLAGS.verbose)
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M")
model_path: str = f"{FLAGS.model_path}/{now_str}"
config: RankingModelConfig = RankingModelConfig(
model_path=model_path,
train_path=FLAGS.train_path,
eval_path=FLAGS.eval_path,
context_fields=[
RankingModelEmbeddingField(
name="keyword",
vocabulary_file=FLAGS.keyword_path,
dimension=FLAGS.keyword_embedding_dim,
),
],
example_fields=[
RankingModelEmbeddingField(
name="title",
vocabulary_file=FLAGS.title_path,
dimension=FLAGS.title_embedding_dim,
),
],
label_field=RankingModelField(
name="relevance",
column_type="numeric",
default_value=-1,
),
num_train_steps=FLAGS.num_train_steps,
hidden_layer_dims=FLAGS.hidden_layer_dims,
batch_size=FLAGS.batch_size,
list_size=FLAGS.list_size,
learning_rate=FLAGS.learning_rate,
group_size=FLAGS.group_size,
dropout_rate=FLAGS.dropout_rate,
)
logger.info(f"Config: {config}")
model: RankingModel = RankingModel(config)
result = model.train()
logger.info(f"Result: {result}")
export_model_path: str = model.save_model()
saved_model_path: str = f"{model_path}/saved_model"
os.rename(export_model_path, saved_model_path)
logger.info(f"Output Model Path: {saved_model_path}")
with gzip.open(f"{model_path}/result.json.gz", mode="wt", encoding="utf-8") as f:
config_dict: Dict[str, Any] = dataclasses.asdict(config)
del config_dict["eval_metric"]
f.write(
json.dumps(
{
"config": config_dict,
"result": result,
},
ensure_ascii=False,
cls=NumpyJsonEncoder,
)
)
if __name__ == "__main__":
flags.mark_flag_as_required("train_path")
flags.mark_flag_as_required("eval_path")
flags.mark_flag_as_required("keyword_path")
flags.mark_flag_as_required("title_path")
flags.mark_flag_as_required("model_path")
tf.compat.v1.app.run()
| 3,916 | 1,270 |
# Generated by Django 3.0.5 on 2020-04-07 12:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shortner', '0004_auto_20200407_0905'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='entries', to=settings.AUTH_USER_MODEL),
),
]
| 615 | 217 |
#!/bin/python3
'''
Title: build-fortress.py
Purpose: Build the infosec-fortress
Author: James Smith (DFIRmadness)
Contributors: Check the github page.
Notes: Beta
Version: 0.5
Usage: ./build-fortress.py
Functions:
+ apt update
+ dist upgrade
+ install base packages
+ create /opt/infosec-fortress
+ start log
+ install starter packages (min. pkgs to let script run)
+ install the REMnux Distribution
+ install SIFT
+ install base security packages
+ install Metasploit Framework
+ install wordlists
+ install and update exploitdb (searchsploit)
+ log2Timeline
+ elasticsearch containers
+ powershell Core (turns out its part of REMnux)
+ install impacket
+ install enum4linux
+ enum4linux https://github.com/cddmp/enum4linux-ng
+ display message about updating ZAP and Burp after reboot
'''
# Globals
PKG_MGR = 'apt'
FORTRESS_DIR = '/opt/infosec-fortress/'
BUILD_LOG = 'build-fortress.log'
LOG = FORTRESS_DIR + BUILD_LOG
# Minimal Package list to get started
starterPackagesList = [
'net-tools',
'curl',
'git'
]
# List of packages to have APT install. Change if you want. You break it you buy it.
aptPackageList = [
'tmux',
'torbrowser-launcher',
'nmap',
'smbclient',
'locate',
'radare2-cutter',
'snort',
'dirb',
'gobuster',
'medusa',
'masscan',
'whois',
'libjenkins-htmlunit-core-js-java',
'autopsy',
'hashcat',
'kismet',
'kismet-plugins',
'airgraph-ng',
'wifite',
'dnsenum',
'dnsmap',
'ettercap-common',
'ettercap-graphical',
'netdiscover',
'sqsh',
'install nfs-common'
]
# List of packages to have SNAP install. Change if you want. You break it you buy it.
snapPackageList = [
'chromium',
'sqlmap',
'john-the-ripper'
]
# Snaps that need --classic
# Avoid these. It's better to scrape a git for the latest and install. Zaproxy is a great example.
snapClassicPackageList =[
#'zaproxy'
]
########################################################
# Colors
GREEN = '\033[32m'
RED = '\033[31m'
YELLOW = '\033[33m'
NOCOLOR = '\033[m'
from datetime import datetime
from getpass import getpass
from hashlib import sha1
from os import geteuid,path,makedirs
from os.path import expanduser
from subprocess import run
from urllib.request import urlopen
from requests import get
from re import search
# Check that the user is root
def checkIfRoot():
if geteuid() != 0:
print(RED + '[!] You need sudo/root permissions to run this... exiting.' + NOCOLOR)
exit(0)
# Check for internet connection
def checkForInternet():
try:
check = urlopen('https://www.google.com', timeout=3.0)
print(GREEN +'[+] Internet connection looks good!' + NOCOLOR)
except:
print(RED + '[-] Internet connection looks down. You will need internet for this to run (most likely). Fix and try again.' + NOCOLOR)
exit(1)
def initNotice():
print('[!] This script requires user input once or twice.\n\
[!] It is not completely "Set and Forget".')
nullInput = input('Hit Enter.')
# Get starting Disk Room
def freeSpaceStart():
# Needs Regex Impovement with RE Search. Non Gig sized systems will break this.
global FREE_SPACE_START_INT
freeSpaceStart = run(['df -h /'],shell=True,capture_output=True).stdout.decode().split('G')[2].strip()
writeToLog('[i] Gigs of Free Space on / at the Start of the build: ' + freeSpaceStart + 'G')
FREE_SPACE_START_INT = float(freeSpaceStart)
return(FREE_SPACE_START_INT)
def freeSpaceEnd():
# Needs Regex Impovement with RE Search. Non Gig sized systems will break this.
freeSpaceEnd = run(['df -h /'],shell=True,capture_output=True).stdout.decode().split('G')[2].strip()
writeToLog('[i] Gigs of Free Space on / at the Start of the build: ' + freeSpaceEnd + 'G')
freeSpaceEndInt = float(freeSpaceEnd)
spaceUsed = FREE_SPACE_START_INT - freeSpaceEndInt
writeToLog('[i] Gigs of Space used for InfoSec-Fortress Buildout: ' + str(spaceUsed) + 'G')
# Check/Inform about for unattended upgrade
def informAboutUnattendedUpgade():
print('[!][!][!][!][!][!][!][!]\nUnattended Upgades firing while this script is running will break it.\
\nKill or complete the upgrades if you recently booted or rebooted. Then continue.\
\nIT MAY REQUIRE A REBOOT! If so, kill this script. Reboot. Run the updates. Run this script again.')
nullInput = input('Hit any key to continue.')
def createFortressDir(FORTRESS_DIR):
print('[*] Creating InfoSec Fortress Dir at:',FORTRESS_DIR)
try:
makedirs(FORTRESS_DIR, exist_ok=True)
except FileExistsError:
print('[i] ' + FORTRESS_DIR + ' already exists. Continuing.')
except Exception as e:
print('[-] Error creating the ' + FORTRESS_DIR + '. Error ' + str(e))
def startLogFile():
try:
now = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
if not path.isfile(LOG):
with open(LOG, 'a') as log:
log.write(now + " - Log Started.\n")
return('Succeeded')
else:
with open(LOG, 'a') as log:
log.write(now + " - Log Started. Strange, the log file appears to exist already? Continuing anyways.\n")
return('Succeeded')
except:
return('Failed')
# For now just simply exit here
exit(1)
def writeToLog(stringToLog):
now = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
with open(LOG, 'a') as log:
log.write(now + " - " + stringToLog + '\n')
if '[+]' in stringToLog:
print('\n' + GREEN + stringToLog + NOCOLOR + '\n----------------------------------------------------------\n')
elif '[-]' in stringToLog:
print('\n' + RED + stringToLog + NOCOLOR + '\n----------------------------------------------------------\n')
elif '[i]' in stringToLog + NOCOLOR:
print('\n' + YELLOW + stringToLog + NOCOLOR + '\n----------------------------------------------------------\n')
else:
print('\n' + stringToLog + '\n----------------------------------------------------------\n')
def buildStarterPackageList():
listOfPackagesCommand = ''
for package in starterPackagesList:
listOfPackagesCommand = (listOfPackagesCommand + ' ' + package).strip()
return(listOfPackagesCommand)
def buildAptPackageList():
listOfPackagesCommand = ''
for package in aptPackageList:
listOfPackagesCommand = (listOfPackagesCommand + ' ' + package).strip()
return(listOfPackagesCommand)
def buildSnapPackageList():
listOfPackagesCommand = ''
for package in snapPackageList:
listOfPackagesCommand = (listOfPackagesCommand + ' ' + package).strip()
return(listOfPackagesCommand)
def buildSnapClassicPackagesList():
listOfPackagesCommand = ''
for package in snapClassicPackageList:
listOfPackagesCommand = (listOfPackagesCommand + ' ' + package).strip()
return(listOfPackagesCommand)
# apt update
def updateOS():
#writeToLog('[+] Beginning OS updates...')
try:
run(['/usr/bin/apt','update'])
except Exception as e:
writeToLog('[-] APT Updating failed. Fix and try again. Error:',str(e))
exit(1)
try:
run(['/usr/bin/apt','upgrade','-y'])
except Exception as e:
writeToLog('[-] APT Updating failed. Fix and try again. Error:',str(e))
exit(1)
try:
run(['/usr/bin/apt','dist-upgrade','-y'])
except Exception as e:
writeToLog('[-] APT Updating failed. Fix and try again. Error:',str(e))
exit(1)
# Minimal packages
def installStarterPackages():
starterPackages = buildStarterPackageList()
writeToLog('[*] Attempting installation of the following starter packages: ' + starterPackages)
try:
run(['/usr/bin/apt install -y ' + starterPackages],shell=True)
writeToLog('[+] Starter Packages installed.')
except Exception as e:
writeToLog('[-] Starter Packages installation failed:',str(e))
# the REMnux Distribution
def installREMnux():
writeToLog('[+] Installing REMnux. This will take quite awhile. Verify the hash from the site later.')
try:
run(['/usr/bin/wget https://REMnux.org/remnux-cli'],shell=True)
run(['/usr/bin/mv remnux-cli remnux'],shell=True)
run(['/usr/bin/chmod +x remnux'],shell=True)
run(['/usr/bin/mv remnux /usr/local/bin'],shell=True)
run(['/usr/local/bin/remnux install --mode=addon'],shell=True)
writeToLog('[+] REMnux Added On (downloaded and ran).')
except Exception as e:
writeToLog('[-] Something went wrong during the REMnux install. Error: ' + str(e))
# Install SIFT
def installSIFTPackages():
writeToLog('[*] Finding latest SIFT Release.')
try:
latestLinkPage = get('https://github.com/sans-dfir/sift-cli/releases/latest').text.splitlines()
latestSIFTBinLine = [match for match in latestLinkPage if "sift-cli-linux" in match][0].split('"')[1]
latestSIFTBin = 'https://github.com/' + latestSIFTBinLine
#latestSIFTBin = search('https:.*sift-cli-linux',latestSIFTBinLine)[0]
writeToLog('[+] latest SIFT BIN: ' + latestSIFTBin)
except Exception as e:
writeToLog('[-] latest SIFT Bin not found. Error: ' + str(e))
return
writeToLog('[*] Installing SIFT Packages.')
try:
run(['/usr/bin/curl -Lo /usr/local/bin/sift ' + latestSIFTBin],shell=True)
run(['/usr/bin/chmod +x /usr/local/bin/sift'],shell=True)
run(['/usr/local/bin/sift install --mode=packages-only'],shell=True)
writeToLog('[+] SIFT Packages installed (downloaded and ran).')
except Exception as e:
writeToLog('[-] Installation of SIFT Packages had an error. Error: '+str(e))
# install base packages
def installAPTandSNAPPackages():
print('[i] If Wireshark asks - say YES non-super users can capture packets.\n\n\
[i] When snort asks about a monitoring interface enter lo.\n\
[i] Setting the interface to "lo" (no quotes) sets it for local use.\n\
[i] Set any private network for the "home" network.\n\n\
[i] KISMET - Say YES to the sticky bit. Add your username to the Kismet Goup at the prompt.')
nullInput = input('Hit Enter.')
aptPackages = buildAptPackageList()
snapPackages = buildSnapPackageList()
snapClassicPackages = buildSnapClassicPackagesList()
writeToLog('[*] Attempting installation of the following ATP packages: ' + aptPackages)
try:
run(['/usr/bin/apt install -y ' + aptPackages],shell=True)
writeToLog('[+] APT Packages installed.')
except Exception as e:
writeToLog('[-] APT Packages installation failed:',str(e))
writeToLog('[*] Attempting installation of the following Snap Packages: ' + snapPackages)
try:
run(['/usr/bin/snap install ' + snapPackages],shell=True)
writeToLog('[+] Snap Packages installed.')
except Exception as e:
writeToLog('[-] Snap packages installation failed:',str(e))
if len(snapClassicPackages) == 0:
writeToLog('[*] No snap classics to install.')
return
writeToLog('[*] Attempting installation of the following Snap Classic Packages: ' + snapClassicPackages)
for package in snapClassicPackageList:
try:
run(['/usr/bin/snap install --classic ' + package],shell=True)
writeToLog('[+] Snap Classic ' + package + ' installed.')
except Exception as e:
writeToLog('[-] Snap packages ' + package + ' failed:',str(e))
# Swap Netcats
# Change out netcat-bsd for netcat-traditional
def swapNetcat():
writeToLog('[*] Attempting to trade out netcat-bsd for netcat-traditional')
try:
run(['/usr/bin/apt purge -y netcat-openbsd'],shell=True)
run(['/usr/bin/apt install -y netcat-traditional'],shell=True)
writeToLog('[+] netcat-traditional installed.')
except Exception as e:
writeToLog('[-] Installation of netcat-traditional failed. Error: '+str(e))
# Metasploit Framework
def installMSF():
writeToLog('[+] Installing Metasploit Framework.')
try:
run(['/usr/bin/curl https://raw.githubusercontent.com/rapid7/metasploit-omnibus/master/config/templates/metasploit-framework-wrappers/msfupdate.erb > msfinstall'],shell=True)
run(['/usr/bin/chmod 755 msfinstall'],shell=True)
run(['./msfinstall'],shell=True)
writeToLog('[+] MSF Installed Successfully.')
except Exception as e:
writeToLog('[-] Something went wrong during the MSF install. Error: ' + str(e))
# Install wordlists
# Git clone the default wordlists
# Add Rockyou2021
# Add fuzzing list for burp/SQLI (xplatform.txt)
def installWordlists():
# Error handling using git in this way (with run) sucks.
writeToLog('[*] Installing Wordlists to /usr/share/wordlists')
makedirs('/usr/share/wordlists/', exist_ok=True)
try:
run(['/usr/bin/git clone https://github.com/3ndG4me/KaliLists.git /usr/share/wordlists/'],shell=True)
run(['/usr/bin/rm /usr/share/wordlists/README.md'],shell=True)
run(['/usr/bin/gunzip /usr/share/wordlists/rockyou.txt.gz'],shell=True)
writeToLog('[+] Kali default wordlists added and unpacked.')
except Exception as e:
writeToLog('[-] There was an error installing Kali default wordlists. Error: ' + str(e))
try:
run(['/usr/bin/wget https://raw.githubusercontent.com/fuzzdb-project/fuzzdb/master/attack/sql-injection/detect/xplatform.txt \
-O /usr/share/wordlists/xplatform.txt'],shell=True)
writeToLog('[+] Xplatform.txt SQLI Validation list added.')
except Exception as e:
writeToLog('[-] There was an error adding xplatform.txt. Error: ' + str(e))
#Install exploit-db
def installExploitDb():
writeToLog('[*] Installing ExploitDB.')
try:
run(['/usr/bin/git clone https://github.com/offensive-security/exploitdb.git /opt/exploitdb'],shell=True)
run(['/usr/bin/ln -sf /opt/exploitdb/searchsploit /usr/local/bin/searchsploit'],shell=True)
writeToLog('[+] Exploit DB Added.')
except Exception as e:
writeToLog('[-] There was an error installing ExploitDB. Error: ' + str(e))
try:
writeToLog('[*] Updating ExploitDB...')
run(['/usr/local/bin/searchsploit -u'],shell=True)
writeToLog('[+] Exploit DB Updated.')
except Exception as e:
writeToLog('[-] There was an error updating ExploitDB. Error: ' + str(e))
# elasticsearch containers?
# powershell Core
# REMnux already installs it.
#def installPosh():
# writeToLog('[*] Installing Powershell.')
# try:
# run(['/usr/bin/apt-get update\
# && /usr/bin/apt-get install -y wget apt-transport-https software-properties-common\
# && /usr/bin/wget -q https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb\
# && /usr/bin/dpkg -i packages-microsoft-prod.deb\
# && /usr/bin/apt-get update\
# && /usr/bin/add-apt-repository universe\
# && /usr/bin/apt-get install -y powershell'],shell=True)
# writeToLog('[+] Powershell installed.')
# except Exception as e:
# writeToLog('[-] There was an error installing Powershell. Error: ' + str(e))
# Install Impacket
def installImpacket():
writeToLog('[*] Installing Impacket.')
try:
run(['/usr/bin/git clone https://github.com/SecureAuthCorp/impacket.git /opt/impacket'],shell=True)
run(['/usr/bin/python3 -m pip install /opt/impacket/.'],shell=True)
# It seems that it takes running this twice to get it to complete
run(['/usr/bin/python3 -m pip install /opt/impacket/.'],shell=True)
writeToLog('[+] Impacket Installed.')
except Exception as e:
writeToLog('[-] There was an error installing Impacket. Error: ' + str(e))
# enum4Linux
def installEnum():
writeToLog('[*] Installing Enum4Linux.')
try:
run(['/usr/bin/git clone https://github.com/CiscoCXSecurity/enum4linux.git /opt/enum4linux'],shell=True)
run(['/usr/bin/ln -sf /opt/enum4linux/enum4linux.pl /usr/local/bin/enum4linux.pl'],shell=True)
writeToLog('[+] Enum4Linux Installed.')
except Exception as e:
writeToLog('[-] There was an error installing Enum4Linux. Error: ' + str(e))
# enum4linux
def installEnumNG():
writeToLog('[*] Installing Enum4Linux-ng.')
try:
run(['/usr/bin/git clone https://github.com/cddmp/enum4linux-ng /opt/enum4linux-ng'],shell=True)
run(['/usr/bin/ln -sf /opt/enum4linux-ng/enum4linux-ng.py /usr/local/bin/enum4linux-ng.py'],shell=True)
writeToLog('[+] Enum4Linux-ng Installed.')
except Exception as e:
writeToLog('[-] There was an error installing Enum4Linux-ng. Error: ' + str(e))
# Install WebShells
def installWebShells():
writeToLog('[*] Installing Kali\'s Webshells')
try:
run(['/usr/bin/git clone https://gitlab.com/kalilinux/packages/webshells.git /usr/share/webshells'],shell=True)
writeToLog('[+] Kali\'s WebShells Cloned to /usr/share/webshells')
except Exception as e:
writeToLog('[-] There was an error installing Enum4Linux. Error: ' + str(e))
# Install Windows Resources
def installWindowsResources():
writeToLog('[*] Installing Kali\'s Windows Resources')
try:
run(['/usr/bin/git clone https://gitlab.com/kalilinux/packages/windows-binaries.git /usr/share/windows-resources'],shell=True)
writeToLog('[+] Kali\'s Windows Resources Cloned to /usr/share/webshells')
except Exception as e:
writeToLog('[-] There was an error installing Enum4Linux. Error: ' + str(e))
# Install Bloodhound
def installBloodhound():
writeToLog('[*] Finding latest Blood Hound Release.')
try:
latestLinkPage = get('https://github.com/BloodHoundAD/BloodHound/releases/latest').text.splitlines()
latestBloodHoundZip = [match for match in latestLinkPage if "BloodHound-linux-x64.zip" in match][0].split('"')[1]
writeToLog('[+] latest Blood Hound Zip at: ' + latestBloodHoundZip)
except Exception as e:
writeToLog('[-] latest Blood Hound Zip not found. Error: ' + str(e))
return
writeToLog('[*] Installing Bloodhound...')
try:
run(['/usr/bin/curl -Lo /tmp/bloodhound.zip https://github.com' + latestBloodHoundZip],shell=True)
run(['/usr/bin/unzip -o /tmp/bloodhound.zip -d /opt/'],shell=True)
except Exception as e:
writeToLog('[-] Bloodhound not installed. Error: ' + str(e))
# Find and install latest Zaproxy
def installZaproxy():
writeToLog('[*] Finding latest Zaproxy Release.')
try:
latestLinkPage = get('https://github.com/zaproxy/zaproxy/releases/latest').text.splitlines()
latestZapDeb = [match for match in latestLinkPage if "_all.deb" in match][0].split('"')[1]
writeToLog('[+] latest Zaproxy Zip at: ' + latestZapDeb)
except Exception as e:
writeToLog('[-] latest Zaproxy Zip not found. Error: ' + str(e))
return
writeToLog('[*] Installing Zaproxy...')
try:
run(['/usr/bin/curl -Lo /tmp/zaproxy.deb ' + latestZapDeb],shell=True)
run(['/usr/bin/dpkg -i /tmp/zaproxy.deb'],shell=True)
except Exception as e:
writeToLog('[-] Zaproxy not installed. Error: ' + str(e))
def installZeek():
# instll Zeek
writeToLog('[*] Installing Zeek...')
try:
run(['/usr/bin/echo \'deb http://download.opensuse.org/repositories/security:/zeek/xUbuntu_20.04/ /\' | sudo tee /etc/apt/sources.list.d/security:zeek.list'],shell=True)
run(['/usr/bin/curl -fsSL https://download.opensuse.org/repositories/security:zeek/xUbuntu_20.04/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/security_zeek.gpg > /dev/null'],shell=True)
run(['/usr/bin/apt update'],shell=True)
run(['/usr/bin/apt -y install zeek'],shell=True)
except Exception as e:
writeToLog('[-] Zeek not installed. Error: ' + str(e))
# add /opt/zeek/bin to the path permanently
try:
writeToLog('[i] Writing Zeeks path to the current users bashrc. You may need to manually add: \'export PATH=$PATH:/opt/zeek/bin\' to yours.')
run(['/usr/bin/echo "export PATH=$PATH:/opt/zeek/bin" >> ~/.bashrc'],shell=True)
run(['export PATH=$PATH:/opt/zeek/bin'],shell=True)
except Exception as e:
writeToLog('[-] Zeek path not added. Error: ' + str(e))
# display log
def displayLog():
print('[*] The following activities were logged:\n')
with open(LOG,'r') as log:
allLines = log.readlines()
for line in allLines:
print(line.strip())
# display fortress artwork
def displayImage():
try:
run(['/usr/bin/curl -Lo ' + FORTRESS_DIR + 'fortress.jpg https://dfirmadness.com/wp-content/uploads/2021/06/infosec-fortress-2500.jpg'],shell=True)
run(['/usr/bin/eog ' + FORTRESS_DIR + 'fortress.jpg'],shell=True)
run(['/usr/bin/rm ' + FORTRESS_DIR + 'fortress.jpg'],shell=True)
except:
return
# display message about updating ZAP and Burp after reboot
def giveUserNextSteps():
print(GREEN + '[+]' + '-----------------------------------------------------------------------------------' + NOCOLOR)
print(GREEN + '[+]' + '------------------------ ! Script Complete ! --------------------------------------' + NOCOLOR)
print('\n\n[!] REBOOT the system. After Reboot you will want to run Burp, Zap and Ghidra. Each will ask you to update.\
\n You should update these. If they have you download a .deb file you simple run ' + GREEN + 'dpkg -i foo.deb' + NOCOLOR + '.\
\n Don\'t forget to run: \'echo "export PATH=$PATH:/opt/zeek/bin" >> ~/.bashrc\' to add the Zeek bins to your user (non-root) path')
nullInput = input('Hit Enter.')
# Re-enable unattended upgrade
#Only needed if auto kill of unattended upgrades is added
def main():
checkIfRoot()
checkForInternet()
initNotice()
informAboutUnattendedUpgade()
createFortressDir(FORTRESS_DIR)
startLogFile()
freeSpaceStart()
updateOS()
installStarterPackages()
installREMnux()
installSIFTPackages()
installAPTandSNAPPackages()
swapNetcat()
installMSF()
installWordlists()
installExploitDb()
installImpacket()
installEnum()
installEnumNG()
installWebShells()
installWindowsResources()
installBloodhound()
installZaproxy()
installZeek()
freeSpaceEnd()
displayLog()
displayImage()
giveUserNextSteps()
exit(0)
main()
if __name__== "__main__":
main()
| 22,630 | 7,480 |
class Constants(object):
class ConstError(TypeError):
pass
def __init__(self, **kwargs):
for name, value in list(kwargs.items()):
super(Constants, self).__setattr__(name, value)
def __setattr__(self, name, value):
if name in self.__dist__:
raise self.ConstError("Can't rebind const(%s)" % name)
self.__dict__[name] = value
def __delattr__(self, name):
if name in self.__dict__:
raise self.ConstError("Can't unbind const(%s)" % name)
raise NameError(name)
Tables = Constants(
BOXES="boxes"
)
DefaultEngineBox = 'turbulent/substance-box:1.0'
EngineStates = Constants(
RUNNING="running",
STOPPED="stopped",
SUSPENDED="suspended",
UNKNOWN="unknown",
INEXISTENT="inexistent"
)
Syncher = Constants(
UP=">>",
DOWN="<<",
BOTH="<>"
)
Orchestrators = Constants(
DOCKWRKR="dockwrkr",
COMPOSE="docker-compose"
)
| 955 | 342 |
import time
import json
import sys
from pathlib import Path
from pprint import pprint
import wikidata_utils
from graphviz import Digraph
NL = "\n"
def find_subclasses_between(subclass, superclass):
# Query Stardog for subclasses
subclassesJSON = wikidata_utils.query_subclasses_stardog(superclass, subclass)[
"results"
]["bindings"]
subclassesList = []
try:
# Parse JSON for results
subclassesList = [result["entity"]["value"] for result in subclassesJSON]
# Look for QID in all the strings
subclassesList = wikidata_utils.regex_match_QID(subclassesList)
except:
pass
print(f"Subclasses between '{subclass}' and '{superclass}':\n{subclassesList}")
# print(subclassLabels)
try:
# Remove superclass from the list (it is included by SPARQL)
subclassesList.remove(superclass)
except:
pass
# Return reversed list so we can use it immediately in the right order with graphviz
return list(reversed(subclassesList))
def graph_from_superclasses_dict(treesDictFilename, **kwargs):
# PROBLEM: Given a dictionary with entities, their superclasses and subclasses, create a "maximal" graph that displays the relation between entities
dotsTime = int(time.time())
# Optional argument; if it exists, will include only entities from the ranking
rankingEntities = kwargs.get("rankingEntities", None)
useRandomColors = kwargs.get("useRandomColors", None)
remainingEntities = set(rankingEntities)
totalEntities = len(remainingEntities)
with open(Path(treesDictFilename), "r+", encoding="utf8") as dictFile:
entitiesDict = json.load(dictFile)
# Filter out entities without any subclasses in the ranking
# Entities of interest here are entities without superclasses or whose superclasses are themselves
entitiesDict = dict(
filter(
lambda x: x[1]["subclasses"] != []
and (x[1]["superclasses"] == [] or [x[0]] == x[1]["superclasses"]),
entitiesDict.items(),
)
)
keepEntity = "1"
keptDict = {}
pprint(entitiesDict.keys())
while(len(keepEntity) > 0):
if not keptDict:
keepEntity = input("What entity to generate graphs for? [Enter] for All: ")
else:
keepEntity = input("What entity to generate graphs for? [Enter] to leave: ")
if keepEntity:
kept = entitiesDict.pop(keepEntity)
keptDict[keepEntity] = kept
else:
break
print(f"Kept {keepEntity}")
if keptDict:
entitiesDict = keptDict
# Number of entities to be processed
print(f"{len(entitiesDict)} superclasses")
nodesDict = {}
for entity in entitiesDict.items():
# Get label for each main entity
entityLabel = wikidata_utils.get_entity_label(entity[0])
nSubclasses = len(entity[1]["subclasses"])
print(f"\nBuilding graph for {entity[0]} ({entityLabel}).")
print(f"{entityLabel.capitalize()} has at least {nSubclasses} subclasses from the ranking.\n")
# Create graph for each main entity
nodesep = "0.1"
ranksep = "0.5"
if nSubclasses > 50:
nodesep = "0.15"
ranksep = "1"
dot = Digraph(
comment=entityLabel,
strict=True,
encoding="utf8",
graph_attr={"nodesep": nodesep, "ranksep": ranksep, "rankdir": "BT"},
)
# Create a bigger node for each main entity
dot.node(f"{entityLabel}\n{entity[0]}", fontsize="24")
# Add entity QID to nodes' dict
nodesDict[entity[0]] = True
print(
f"{totalEntities - len(remainingEntities)} entities (of {totalEntities}) from the ranking processed so far."
)
for subclass in entity[1]["subclasses"]:
# Get label for each subclass
subclassLabel = wikidata_utils.get_entity_label(subclass)
# If label is unavailable, use ID
if subclassLabel != "Label unavailable":
subclassNodeLabel = f"{subclassLabel}\n{subclass}"
else:
subclassNodeLabel = subclass
print(
f'Finding subclasses between "{subclassLabel}" and "{entityLabel}"...'
)
# Get random color for nodes and edges
argsColor = "#111111"
if useRandomColors:
argsColor = wikidata_utils.random_color_hex()
edgeLabel = None
if not nodesDict.get(subclass, False):
# Create subclass node
dot.node(f"{subclassLabel}\n{subclass}", color=argsColor)
# Add subclass QID to nodes' dict
nodesDict[subclass] = True
# Query intermediary entities between "subclass" and "entity" (returns ordered list)
subclassesBetween = find_subclasses_between(subclass, entity[0])
# Default styling for intermediary subclasses
subclassNodeArgs = {
"shape": "square",
"color": "#777777",
"fontsize": "10",
"fontcolor": "#555555",
}
# remainingEntitiesLastIteration = {totalEntities - len(remainingEntities)}
if rankingEntities:
# Filter out subclasses that aren't from the ranking
subclassesBetween = {
subclass: True
for subclass in subclassesBetween
if subclass in rankingEntities
}
print(f"Subclasses between: {subclassesBetween}")
# Use no particular styling instead
subclassNodeArgs = {}
# edgeLabel = "P279+"
if subclassesBetween:
# Get labels for each subclass in between
subclassLabels = [
wikidata_utils.get_entity_label(subclass)
for subclass in list(subclassesBetween)
]
# Connect "main" subclass to its immediate superclass
print(
f"(First) Marking {subclassNodeLabel.split(NL)[0]} ({subclassNodeLabel.split(NL)[1]}) as subclass of {subclassLabels[-1]} ({list(subclassesBetween)[-1]})"
)
dot.edge(
subclassNodeLabel,
f"{subclassLabels[-1]}\n{list(subclassesBetween)[-1]}",
label=edgeLabel,
color=argsColor,
arrowhead="o",
)
try:
remainingEntities.remove(list(subclassesBetween)[-1])
except KeyError:
pass
for i, subclassBetween in enumerate(subclassesBetween):
if not nodesDict.get(subclassBetween, False):
# Create node for each subclass
dot.node(
f"{subclassLabels[i]}\n{subclassBetween}",
**subclassNodeArgs,
color=argsColor,
)
# Add intermediary entity QID to nodes' dict
nodesDict[subclassBetween] = True
for i, subclassBetween in enumerate(list(subclassesBetween)[:-1]):
# Connect each subclass to its immediate superclass
# First, check if they should be connected
for j, entityAbove in enumerate(list(subclassesBetween)[i:]):
checkSubclass = list(subclassesBetween)[i]
checkSubclassLabel = subclassLabels[i]
if i == 0:
checkSubclass = subclass
checkSubclassLabel = subclassLabel
isSubclass = wikidata_utils.query_subclass_stardog(
entityAbove, checkSubclass, transitive=True
)["results"]["bindings"][0]["isSubclass0"]["value"]
isSubclass = isSubclass.lower() == "true"
print(
f" (For) Is {checkSubclass} subclass of {entityAbove}? {isSubclass}"
)
if isSubclass:
print(
f" Marking {checkSubclassLabel} ({checkSubclass}) as subclass of {subclassLabels[i + j]} ({entityAbove})"
)
dot.edge(
f"{checkSubclassLabel}\n{checkSubclass}",
f"{subclassLabels[i + j]}\n{entityAbove}",
label=edgeLabel,
color=argsColor,
arrowhead="o",
)
try:
remainingEntities.remove(checkSubclass)
except KeyError:
pass
try:
remainingEntities.remove(entityAbove)
except KeyError:
pass
# if totalEntities - len(remainingEntities) > remainingEntitiesLastIteration:
print(
f"{totalEntities - len(remainingEntities)} entities (of {totalEntities}) from the ranking processed so far."
)
# Connect the topmost superclass to the main superclass, i.e., the entity
print(
f"(Last) Marking {subclassLabels[0]} as subclass of {entityLabel}"
)
dot.edge(
f"{subclassLabels[0]}\n{list(subclassesBetween)[0]}",
f"{entityLabel}\n{entity[0]}",
label=edgeLabel,
color=argsColor,
arrowhead="o",
)
else:
# If there are no subclasses in between, connect subclass and entity directly
print(
f"Joining {subclassNodeLabel.split(NL)[0]} ({subclassNodeLabel.split(NL)[1]}) and {entityLabel} ({entity[0]})"
)
dot.edge(
subclassNodeLabel,
f"{entityLabel}\n{entity[0]}",
label=edgeLabel,
color=argsColor,
arrowhead="o",
)
try:
remainingEntities.remove(subclass)
except KeyError:
pass
# Not having graphviz properly installed might raise an exception
try:
if rankingEntities:
u = dot.unflatten(stagger=5) # Break graphs into more lines
u.render(f"output/dots/dots_{dotsTime}/AP1_{dot.comment}.gv")
else:
u = dot.unflatten(stagger=5) # Break graphs into more lines
u.render(
f"output/dots/dots_{dotsTime}/AP1_{dot.comment}_intermediary.gv"
)
except:
print("\nVerify your Graphviz installation or Digraph args!\n")
pass
try:
remainingEntities.remove(entity[0])
except KeyError:
pass
print(remainingEntities)
def get_ranking_entity_set(rankingFile):
entityList = parse_ranking_file(rankingFile)
return set(entityList)
def parse_ranking_file(rankingFile):
lines = rankingFile.readlines()
lines = list(map(lambda line: line.strip(), lines))
# Look for the QID in all strings
rankEntities = wikidata_utils.regex_match_QID(lines)
return rankEntities
if __name__ == "__main__":
try:
fileIn = Path(sys.argv[2])
except:
fileIn = Path("output/ranking/AP1_minus_Q23958852_ranking.txt")
with open(fileIn, "r") as rankingFile:
entities = parse_ranking_file(rankingFile)
# entitiesSet = get_ranking_entity_set(rankingFile)
# graph_from_superclasses_dict(
# "output/AP1_occurrence.json", rankingEntities=entities
# )
graph_from_superclasses_dict(
"output/AP1_trees.json", rankingEntities=entities
)
| 12,482 | 3,276 |
from typing import Any, Iterator, List
class SkillInfo:
def __init__(self, **data: Any) -> None:
self.__command = data["Command"]
self.__hit_level = data["Hit level"]
self.__damage = data["Damage"]
self.__startup_frame = data["Start up frame"]
self.__block_frame = data["Block frame"]
self.__hit_frame = data["Hit frame"]
self.__counter_hit_frame = data["Counter hit frame"]
self.__notes = data["Notes"]
@property
def command(self) -> str:
return self.__command
@property
def hit_level(self) -> str:
return self.__hit_level
@property
def damage(self) -> str:
return self.__damage
@property
def start_up_frame(self) -> str:
return self.__startup_frame
@property
def block_frame(self) -> str:
return self.__block_frame
@property
def hit_frame(self) -> str:
return self.__hit_frame
@property
def counter_hit_frame(self) -> str:
return self.__counter_hit_frame
@property
def notes(self) -> str:
return self.__notes
class SkillData:
def __init__(self, **data: Any) -> None:
self.__status = data["status"]
self.__info = data["info"]
@property
def status(self) -> int:
return self.__status
@property
def info(self) -> SkillInfo:
return SkillInfo(**self.__info)
class AllSkillsData:
def __init__(self, **data: Any) -> None:
self.__status = data["status"]
self.__skill_list = data["skill_list"]
@property
def status(self) -> int:
return self.__status
@property
def skill_list(self) -> Iterator[SkillInfo]:
for skill in self.__skill_list:
yield SkillInfo(**skill)
class GalleryPost:
def __init__(self, **data: Any) -> None:
self.__status = data["status"]
self.__title = data["content"]["title"]
self.__auhor = data["content"]["auhor"]
self.__content = data["content"]["content"]
@property
def status(self) -> int:
return self.__status
@property
def title(self) -> str:
return self.__title
@property
def author(self) -> str:
return self.__auhor
@property
def content(self) -> List[str]:
return self.__content
class ListsPostInfo:
def __init__(self, **data: Any) -> None:
self.__id = data["id"]
self.__title = data["title"]
self.__writer = data["writer"]
self.__date = data["date"]
self.__recommend = data["recommend"]
self.__reply = data["reply"]
self.__views = data["views"]
@property
def id(self) -> int:
return self.__id
@property
def title(self) -> str:
return self.__title
@property
def writer(self) -> str:
return self.__writer
@property
def date(self) -> str:
return self.__date
@property
def recommend(self) -> int:
return self.__recommend
@property
def reply(self) -> int:
return self.__reply
@property
def views(self) -> int:
return self.__views
class GalleryList:
def __init__(self, **data: Any) -> None:
self.__status = data["status"]
self.__total = data["total"]
self.__lists = data["lists"]
@property
def status(self) -> int:
return self.__status
@property
def total(self) -> int:
return self.__total
@property
def lists(self) -> Iterator[ListsPostInfo]:
for post in self.__lists:
yield ListsPostInfo(post)
| 3,620 | 1,095 |
# Frames
UPDATE_FRAME = 'update'
POSTBACK_FRAME = 'postback'
# Login
USERNAME_LOGIN_FIELD = 'OPERCODE'
PASSWORD_LOGIN_FIELD = 'PASSWD'
LOGIN_BUTTON = 'LogInSub'
| 162 | 75 |
import weewx
class MyTypes(object):
def get_value(self, obs_type, record, db_manager):
if obs_type == 'dewpoint':
if record['usUnits'] == weewx.US:
return weewx.wxformulas.dewpointF(record.get('outTemp'), record.get('outHumidity'))
elif record['usUnits'] == weewx.METRIC or record['usUnits'] == weewx.METRICWX:
return weewx.wxformulas.dewpointC(record.get('outTemp'), record.get('outHumidity'))
else:
raise ValueError("Unknown unit system %s" % record['usUnits'])
else:
raise weewx.UnknownType(obs_type)
class MyVector(object):
def get_aggregate(self, obs_type, timespan,
aggregate_type=None,
aggregate_interval=None):
if obs_type.starts_with('ch'):
"something"
else:
raise weewx.UnknownType(obs_type) | 913 | 287 |