text stringlengths 38 1.54M |
|---|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
from django.db import models
from pygments.lexers import get_all_lexers
from pygments.styles import get_all_styles
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import HtmlFormatter
from pygments import highlight
from django.contrib.auth.models import User
class Snippet(models.Model):
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
message = models.TextField()
owner = models.ForeignKey('auth.User', related_name='snippets')
class Meta:
ordering = ('created',)
def save(self, *args, **kwargs):
super(Snippet, self).save(*args, **kwargs)
class Friendship(models.Model):
created = models.DateTimeField(auto_now_add=True, editable=False)
approved = models.BooleanField(default=False)
creator = models.ForeignKey(User, related_name="friends")
friend = models.ForeignKey(User, related_name="friend_id") |
from .filters import filters_rotated, filters_learnable
__all__ = ['filters_rotated', 'filters_learnable']
|
## png_to_copper_list.py
import png
import math
import colorsys
import codecs
filename_in = 'ilkke_font'
filename_out = '../../source/fonts'
def quantize_color_as_OCS(_color):
_new_color = [0,0,0]
_new_color[0] = 2 * int(_color[0] / 2)
_new_color[1] = 2 * int(_color[1] / 2)
_new_color[2] = 2 * int(_color[2] / 2)
return _new_color
def main():
print('Font converter')
## Creates the header
print('Output in : ' + filename_out + '.c')
f = codecs.open(filename_out + '.c', 'w')
f.write('/* Font descriptor */' + '\n\n')
f.write('const char ' + filename_in + '_glyph_array[] = ' + '\n')
f.write('{' + '\n')
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXZ'
## \ = |
## `= ^
out_str = ''
_count = 0
for _letter in alphabet:
_letter = _letter.replace('\\', '\\\\')
_letter = _letter.replace('"', '\\"')
_letter = _letter.replace('\'', '\\\'')
# print(_letter)
out_str += '\'' + _letter + '\', '
_count += 1
if _count >= 20:
_count = 0
f.write('\t' + out_str + '\n')
out_str = ''
if out_str != '':
f.write('\t' + out_str + '\n')
f.write('\t\'' + '\\0' + '\'\n')
f.write('};' + '\n')
f.write('\n')
print('Loading bitmap : ' + filename_in + '.png')
## Loads the PNG image
png_buffer = png.Reader(filename = filename_in + '.png')
b = png_buffer.read()
# print(b)
## Get size & depth
w = b[0]
h = b[1]
print('w = ' + str(w) + ', h = ' + str(h))
print('bitdepth = ' + str(b[3]['bitdepth']))
if b[3]['greyscale']:
print('!!!Error, cannot process a greyscale image :(')
return 0
if b[3]['bitdepth'] > 8:
print('!!!Error, cannot process a true color image :(')
return 0
original_palette = b[3]['palette']
x_table = []
buffer_in = list(b[2])
for x in range(0,w):
current_pixel = buffer_in[0][x]
if current_pixel == 0:
x_table.append(x)
x_table.append(w)
f.write('const int ' + filename_in + '_x_pos_array[] = ' + '\n')
f.write('{' + '\n')
out_str = ''
_count = 0
for x in x_table:
out_str += str(x) + ', '
_count += 1
if _count >= 20:
_count = 0
f.write('\t' + out_str + '\n')
out_str = ''
if out_str != '':
f.write('\t' + out_str + '\n')
f.write('};' + '\n')
f.close()
f = codecs.open(filename_out + '.h', 'w')
f.write('/* Font descriptor headers */' + '\n\n')
f.write('extern const char ' + filename_in + '_glyph_array[' + str(len(alphabet)) + '];' + '\n')
f.write('extern const int ' + filename_in + '_x_pos_array[' + str(len(x_table)) + '];' + '\n')
return 1
main() |
"""
TCP ๆๅก็ซฏ
"""
import socket
# ๅๅปบๆๅก็ซฏ้ไฟกๅฏน่ฑก
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# p้
็ฝฎIP็ซฏๅฃ
SERRVERADDR = ("192.168.15.3", 6666)
server.bind(SERRVERADDR)
# ๅผๅฏ็ๅฌ
server.listen()
print("ๆๅก็ซฏๅฏๅจ")
# ่ทๅๅฎขๆท็ซฏsocket
client,clientaddr = server.accept()
print("ๅฎขๆท็ซฏ",clientaddr,"่ฟๆฅไธไบ")
# ๆฅๅๅฎขๆท็ซฏๅ้็ๆฐๆฎ
BUFFERSIZE = 1024
data = client.recv(BUFFERSIZE)
print(data.decode("utf-8"))
client.send("ไฝ ๅฅฝๅฎขๆท็ซฏ".encode("utf8"))
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest.lib.common.utils import data_utils
from openstackclient.tests.functional.identity.v3 import common
SYSTEM_CLOUD = os.environ.get('OS_SYSTEM_CLOUD', 'devstack-system-admin')
class RegisteredLimitTestCase(common.IdentityTests):
def test_registered_limit_create_with_service_name(self):
self._create_dummy_registered_limit()
def test_registered_limit_create_with_service_id(self):
service_name = self._create_dummy_service()
raw_output = self.openstack(
'service show' ' %(service_name)s' % {'service_name': service_name}
)
service_items = self.parse_show(raw_output)
service_id = self._extract_value_from_items('id', service_items)
raw_output = self.openstack(
'registered limit create'
' --service %(service_id)s'
' --default-limit %(default_limit)s'
' %(resource_name)s'
% {
'service_id': service_id,
'default_limit': 10,
'resource_name': 'cores',
},
cloud=SYSTEM_CLOUD,
)
items = self.parse_show(raw_output)
registered_limit_id = self._extract_value_from_items('id', items)
self.addCleanup(
self.openstack,
'registered limit delete'
' %(registered_limit_id)s'
% {'registered_limit_id': registered_limit_id},
cloud=SYSTEM_CLOUD,
)
self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
def test_registered_limit_create_with_options(self):
service_name = self._create_dummy_service()
region_id = self._create_dummy_region()
params = {
'service_name': service_name,
'resource_name': 'cores',
'default_limit': 10,
'description': 'default limit for cores',
'region_id': region_id,
}
raw_output = self.openstack(
'registered limit create'
' --description \'%(description)s\''
' --region %(region_id)s'
' --service %(service_name)s'
' --default-limit %(default_limit)s'
' %(resource_name)s' % params,
cloud=SYSTEM_CLOUD,
)
items = self.parse_show(raw_output)
registered_limit_id = self._extract_value_from_items('id', items)
self.addCleanup(
self.openstack,
'registered limit delete %(registered_limit_id)s'
% {'registered_limit_id': registered_limit_id},
cloud=SYSTEM_CLOUD,
)
self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
def test_registered_limit_show(self):
registered_limit_id = self._create_dummy_registered_limit()
raw_output = self.openstack(
'registered limit show %(registered_limit_id)s'
% {'registered_limit_id': registered_limit_id}
)
items = self.parse_show(raw_output)
self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
def test_registered_limit_set_region_id(self):
region_id = self._create_dummy_region()
registered_limit_id = self._create_dummy_registered_limit()
params = {
'registered_limit_id': registered_limit_id,
'region_id': region_id,
}
raw_output = self.openstack(
'registered limit set'
' %(registered_limit_id)s'
' --region %(region_id)s' % params,
cloud=SYSTEM_CLOUD,
)
items = self.parse_show(raw_output)
self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
def test_registered_limit_set_description(self):
registered_limit_id = self._create_dummy_registered_limit()
params = {
'registered_limit_id': registered_limit_id,
'description': 'updated description',
}
raw_output = self.openstack(
'registered limit set'
' %(registered_limit_id)s'
' --description \'%(description)s\'' % params,
cloud=SYSTEM_CLOUD,
)
items = self.parse_show(raw_output)
self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
def test_registered_limit_set_service(self):
registered_limit_id = self._create_dummy_registered_limit()
service_name = self._create_dummy_service()
params = {
'registered_limit_id': registered_limit_id,
'service': service_name,
}
raw_output = self.openstack(
'registered limit set'
' %(registered_limit_id)s'
' --service %(service)s' % params,
cloud=SYSTEM_CLOUD,
)
items = self.parse_show(raw_output)
self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
def test_registered_limit_set_default_limit(self):
registered_limit_id = self._create_dummy_registered_limit()
params = {
'registered_limit_id': registered_limit_id,
'default_limit': 20,
}
raw_output = self.openstack(
'registered limit set'
' %(registered_limit_id)s'
' --default-limit %(default_limit)s' % params,
cloud=SYSTEM_CLOUD,
)
items = self.parse_show(raw_output)
self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
def test_registered_limit_set_resource_name(self):
registered_limit_id = self._create_dummy_registered_limit()
resource_name = data_utils.rand_name('resource_name')
params = {
'registered_limit_id': registered_limit_id,
'resource_name': resource_name,
}
raw_output = self.openstack(
'registered limit set'
' %(registered_limit_id)s'
' --resource-name %(resource_name)s' % params,
cloud=SYSTEM_CLOUD,
)
items = self.parse_show(raw_output)
self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
def test_registered_limit_list(self):
self._create_dummy_registered_limit()
raw_output = self.openstack('registered limit list')
items = self.parse_listing(raw_output)
self.assert_table_structure(items, self.REGISTERED_LIMIT_LIST_HEADERS)
def test_registered_limit_delete(self):
registered_limit_id = self._create_dummy_registered_limit(
add_clean_up=False
)
raw_output = self.openstack(
'registered limit delete'
' %(registered_limit_id)s'
% {'registered_limit_id': registered_limit_id},
cloud=SYSTEM_CLOUD,
)
self.assertEqual(0, len(raw_output))
|
from sklearn.cluster import KMeans
from secure_kmeans import *
from timeit import Timer
import cProfile
def graph_performance(sk, naive, secure, range_start, range_end, step):
"""
Utility function to plot time as function of how many data points
are to be clustered
"""
x = [i for i in range(range_start, range_end, step)]
plt.plot(x, secure)
plt.plot(x, naive)
plt.plot(x, sk)
plt.title("Comparison of baseline against naive and secure implementation.")
plt.xlabel("No. of points in dataset")
plt.ylabel("Time (s)")
plt.yscale('log')
plt.legend(['secure k-means', 'naive k-means', 'scikit k-means', ])
plt.show()
def graph_calls():
"""
Utility function to create a callgraph for visualization in
gprof2dot or snakeviz
To generate call graph .png run:
gprof2dot -f pstats performance.prof | dot -Tpng -o output.png
"""
data = gen_data(k, n_samples=1000)
with cProfile.Profile() as pr:
pr.run("secure_kmeans(data, centroids, k, epsilon, max_iter, False)")
pr.dump_stats("performance.prof")
if __name__ == '__main__':
k = 3
epsilon = 1
max_iter = 15
range_start = 1000
range_end = 33000
step = 1000
n_timings = 3
centroids = random_centroids(k)
timings_sklearn = []
for i in range(range_start, range_end, step):
print(i)
data = gen_data(k, n_samples=i)
t = Timer(lambda: KMeans(n_clusters=k, max_iter=max_iter).fit(data))
timings_sklearn.append(t.timeit(number=n_timings))
print(timings_sklearn)
timings_naive = []
for i in range(range_start, range_end, step):
print(i)
data = gen_data(k, n_samples=i)
t = Timer(lambda: naive_kmeans(data, centroids, k, epsilon, max_iter, False))
timings_naive.append(t.timeit(number=n_timings))
print(timings_naive)
timings_secure = []
for i in range(range_start, range_end, step):
print(i)
data = gen_data(k, n_samples=i)
t = Timer(lambda: secure_kmeans(data, centroids, k, epsilon, max_iter, False))
timings_secure.append(t.timeit(number=n_timings))
print(timings_secure)
print(timings_sklearn)
print(timings_naive)
print(timings_secure)
graph_performance(timings_sklearn, timings_naive, timings_secure, range_start, range_end, step)
|
print "iets"
x = 2
if x == 2:
print("dit is mijn eerste programma")
print("kijk opa jan ik kan al programmeren")
print "Hoofdstuk: %d" % x
print("en ik doe het met erwin")
print("ik vind het heel heel heel erg leuk om te doen")
print('gaaf he')
tekst = "dit heeft erwin gedaan"
if x == 2:
tekst = "ik kan het al heel goed"
print(tekst)
print(tekst + ", dus ik vind het heel leuk")
one = 1
two = 2
three = one + two
print("Hoofdstuk: %d" % three)
print 'Torre gaat het ook proberen'
|
# -*- coding: utf-8 -*-
r"""
๋จ์ด ์๋ฒ ๋ฉ: ์ดํ์ ์๋ฏธ๋ฅผ ์ธ์ฝ๋ฉํ๊ธฐ
===========================================
**๋ฒ์ญ**: `์์ฑ์ฐ <http://github.com/sylim2357>`_
๋จ์ด ์๋ฒ ๋ฉ(word embedding)์ด๋ ๋ง๋ญ์น(ํน์ ์ฝํผ์ค, corpus) ๋ด ๊ฐ ๋จ์ด์ ์ผ๋์ผ๋ก ๋์ํ๋ ๋ฐ์ง๋ ์ค์ ๋ฒกํฐ(dense vector)์ ์งํฉ, ํน์ ์ด ๋ฒกํฐ๋ฅผ
๊ตฌํ๋ ํ์๋ฅผ ๊ฐ๋ฆฌํต๋๋ค. ์ฃผ๋ก ๋จ์ด๋ฅผ ํผ์ฒ(feature)๋ก ์ฌ์ฉํ๋ ์์ฐ์ด ์ฒ๋ฆฌ ๋ถ์ผ์์๋ ๋จ์ด๋ฅผ ์ปดํจํฐ ์นํ์ ์ธ
ํํ๋ก ๋ฐ๊พธ์ด ์ฃผ๋ ์์
์ด ํ์์ ์
๋๋ค. ์ปดํจํฐ๊ฐ ๋จ์ด๋ฅผ ๋ฐ๋ก ์ดํดํ๊ธฐ๋ ์๋นํ ์ด๋ ต๊ธฐ ๋๋ฌธ์ด์ฃ .
๊ทธ๋ ๋ค๋ฉด, ๋จ์ด๋ฅผ ์ด๋ป๊ฒ ํํํ๋ ๊ฒ์ด ์ข์๊น์? ๋ฌผ๋ก ๊ฐ ๋ฌธ์์ ํด๋นํ๋ ASCII์ฝ๋๋ฅผ ์ฌ์ฉํ ์ ์๊ฒ ์ง๋ง,
ASCII์ฝ๋๋ ์ด ๋จ์ด๊ฐ *๋ฌด์* ์ธ์ง๋ฅผ ์๋ ค์ค ๋ฟ, ๋จ์ด๊ฐ ์ด๋ค *์๋ฏธ* ๋ฅผ ๊ฐ์ง๋์ง๋ ์๋ ค์ฃผ์ง ์์ต๋๋ค.
(๋ฃฐ๋ฒ ์ด์ค๋ก ์ด๋ฏธ ๋ฑ ๋ฌธ๋ฒ์ ํน์ง์ ํ์ฉํ๊ฑฐ๋ ์์ด์ ๊ฒฝ์ฐ ๋๋ฌธ์๋ฅผ ์ฌ์ฉํ ์ ์๊ฒ ์ง๋ง ์ถฉ๋ถํ์ง ์์ต๋๋ค.)
๋จ์ด๋ฅผ ์ด๋ป๊ฒ ํํํ ์ง ๋ฟ ์๋๋ผ, ์ด ํํ๋ฒ์ ์ด๋ ํ ๋ฐฉ์์ผ๋ก ์ฐ์ฐํด์ผ ํ ์ง ๋ํ ํฐ ๋ฌธ์ ์
๋๋ค.
๋ณดํต ์ด๋ฌํ ๋ฐ๋ ๋์ ๋ฒกํฐ๋ฅผ ์ป๊ธฐ ์ํด ์ฌ์ฉํ๋ ๋ด๋ด๋ท ๋ชจ๋ธ์ :math:`|V|` (๋ง๋ญ์น์ ๋จ์ด ๊ฐ์)์
ํฐ ์
๋ ฅ ์ฐจ์๊ณผ ๋ช ์๋๋ (ํ
์ค๋ฅผ ๋ถ๋ฅํ๋ ๋ฌธ์ ๋ผ๊ณ ํ ๊ฒฝ์ฐ) ์์ ์ถ๋ ฅ ์ฐจ์์ ๊ฐ์ง๋๋ค.
์ฆ, ๋จ์ด๋ค ๊ฐ์ ์ฐ์ฐ์ด ํ์์
๋๋ค. ์ด๋ป๊ฒ ์ด ํฐ ์ฐจ์์ ๊ณต๊ฐ์ ์์ ๊ณต๊ฐ์ผ๋ก ๋ณํ์ํฌ ์ ์์๊น์?
๋จผ์ , ์๊ธฐํ ASCII์ฝ๋ ๋์ ์ํซ ์ธ์ฝ๋ฉ(one-hot encoding)์ ์ฌ์ฉํด๋ณด๋ ๊ฒ์ ์ด๋จ๊น์? ์ํซ ์ธ์ฝ๋ฉ์ด๋
ํ๋์ ๋จ์ด :math:`w` ๋ฅผ ์๋์ ๋ฒกํฐ๋ก ํํํ๋ ๊ฒ์ ๋งํฉ๋๋ค.
.. math:: \overbrace{\left[ 0, 0, \dots, 1, \dots, 0, 0 \right]}^\text{|V| elements}
์ฌ๊ธฐ์ 1์ ํด๋น ๋ฒกํฐ๊ฐ ํํํ๊ณ ์ ํ๋ ๋จ์ด์ ํด๋นํ๋ ์์น 1๊ณณ์ ์๋ฆฌํฉ๋๋ค. (๋๋จธ์ง๋ ์ ๋ถ
0์
๋๋ค.) ๋ค๋ฅธ ๋จ์ด๋ฅผ ๋ํ๋ด๋ ๋ฒกํฐ์์ 1์ด ๋ค๋ฅธ ๊ณณ์ ์์นํด ์๊ฒ ์ฃ .
์ํซ ์ธ์ฝ๋ฉ์ ๋ง๋ค๊ธฐ๊ฐ ์ฝ๋ค๋ ์ฅ์ ์ด ์์ง๋ง, ๋จ์ํ ๋งํผ ๋จ์ ๋ ์์ต๋๋ค. ์ผ๋จ ๋จ์ด ๋ฒกํฐ ํ ๊ฐ๋
๋ชจ๋ ๋จ์ด๋ฅผ ํํํ ์ ์์ ๋งํ ํฌ๊ธฐ๊ฐ ๋์ด์ผ ํฉ๋๋ค. ์ฐ๋ฆฌ๊ฐ ์ผ๋ง๋ ๋ง์ ์ข
๋ฅ์ ๋จ์ด๋ฅผ
์ฌ์ฉํ๋์ง๋ฅผ ์๊ฐ ํ๋ค๋ฉด ์ด๋ง์ด๋งํ๊ฒ ํฐ ๋ฒกํฐ๋ผ๋ ๊ฒ์ ์ ์ ์์ฃ . ์ด ๋ฟ๋ง์ด ์๋๋๋ค.
์ํซ ๋ฒกํฐ๋ ๋ชจ๋ ๋จ์ด๋ฅผ ๋
๋ฆฝ์ ์ธ ๊ฐ์ฒด๋ก ๊ฐ์ ํ๋ ๊ฒ์ ๋ณผ ์ ์์ต๋๋ค. ์ฆ, ๊ณต๊ฐ์์์
์์ ํ ๋ค๋ฅธ ์ถ์ ์์นํด ์์ด์ ๋จ์ด๊ฐ์ ๊ด๊ณ๋ฅผ ๋ํ๋ผ ์๊ฐ ์์ต๋๋ค. ํ์ง๋ง ์ฐ๋ฆฌ๋ ๋จ์ด
์ฌ์ด์ *์ ์ฌ๋* ๋ฅผ ์ด๋ป๊ฒ๋ ๊ณ์ฐํ๊ณ ์ถ์๊ฑฐ์ฃ . ์ ์ ์ฌ๋๊ฐ ์ค์ํ๋๊ตฌ์? ๋ค์ ์์ ๋ฅผ ๋ด
์๋ค.
์ฐ๋ฆฌ์ ๋ชฉํ๊ฐ ์ธ์ด ๋ชจ๋ธ์ ๋ง๋๋ ๊ฒ์ด๋ผ๊ณ ๊ฐ์ ํ๊ณ ๋ค์์ ๋ฌธ์ฅ์ด ํ์ต ๋ฐ์ดํฐ๋ก์จ ์ฃผ์ด์ก๋ค๊ณ ํด๋ด
์๋ค.
* ์ํ์๊ฐ ๊ฐ๊ฒ๋ก ๋ฐ์ด๊ฐ๋ค.
* ๋ฌผ๋ฆฌํ์๊ฐ ๊ฐ๊ฒ๋ก ๋ฐ์ด๊ฐ๋ค.
* ์ํ์๊ฐ ๋ฆฌ๋ง ๊ฐ์ค์ ์ฆ๋ช
ํ๋ค.
๋ํ ํ์ต ๋ฐ์ดํฐ์๋ ์๋ ์๋ ๋ฌธ์ฅ์ด ์๋ค๊ณ ์๊ฐํด๋ด
์๋ค.
* ๋ฌผ๋ฆฌํ์๊ฐ ๋ฆฌ๋ง ๊ฐ์ค์ ์ฆ๋ช
ํ๋ค.
ASCII ์ฝ๋๋ ์ํซ ์ธ์ฝ๋ฉ ๊ธฐ๋ฐ ์ธ์ด ๋ชจ๋ธ์ ์ ๋ฌธ์ฅ์ ์ด๋์ ๋ ๋ค๋ฃฐ ์ ์๊ฒ ์ง๋ง, ๊ฐ์ ์ ์ฌ์ง๊ฐ ์์ง ์์๊น์?
๋จผ์ ์๋์ ๋ ์ฌ์ค์ ์๊ฐํด๋ด
์๋ค.
* '์ํ์'์ '๋ฌผ๋ฆฌํ์'๊ฐ ๋ฌธ์ฅ ๋ด์์ ๊ฐ์ ์ญํ ์ ๋งก๊ณ ์์ต๋๋ค. ์ด ๋ ๋จ์ด๋ ์ด๋ป๊ฒ๋ ์๋ฏธ์ ์ธ ์ฐ๊ด์ฑ์ด ์์ ๊ฒ๋๋ค.
* ์๋ก์ด ๋ฌธ์ฅ์์ '๋ฌผ๋ฆฌํ์'๊ฐ ๋งก์ ์ญํ ์ '์ํ์'๊ฐ ๋งก๋ ๊ฒ์ ํ์ต ๋ฐ์ดํฐ์์ ๋ณธ ์ ์ด ์์ต๋๋ค.
์ฐ๋ฆฌ ๋ชจ๋ธ์ด ์์ ์ฌ์ค์ ํตํด '๋ฌผ๋ฆฌํ์'๊ฐ ์ ๋ฌธ์ฅ์ ์ ๋ค์ด ๋ง๋๋ค๋ ๊ฒ์ ์ถ๋ก ํ ์
์๋ค๋ฉด ์ฐธ ์ข์ ๊ฒ์
๋๋ค. ์ด๊ฒ์ด ์์์ ์ธ๊ธํ ์ ์ฌ๋์ ์๋ฏธ์
๋๋ค. ์ฒ ์์ ์ ์ฌ๋ ๋ฟ
์๋๋ผ *์๋ฏธ์ ์ ์ฌ๋* ์ธ ๊ฒ์
๋๋ค. ์ด๊ฒ์ด์ผ๋ง๋ก ์ธ์ด ๋ฐ์ดํฐ์ ๋ด์ฌํ๋ ํฌ๋ฐ์ฑ(sparsity)์
๋ํ ์ฒ๋ฐฉ์ด ๋ ๊ฒ์
๋๋ค. ์ฐ๋ฆฌ๊ฐ ๋ณธ ๊ฒ๊ณผ ์์ง ๋ณด์ง ์์ ๊ฒ ์ฌ์ด๋ฅผ ์ด์ด์ฃผ๋ ๊ฒ์ด์ฃ .
์์ผ๋ก๋ ๋ค์์ ์ธ์ดํ์ ๊ธฐ๋ณธ ๋ช
์ ๋ฅผ ๊ฐ์ ํ๋๋ก ํฉ์๋ค. ๋ฐ๋ก ๋น์ทํ ๋งฅ๋ฝ์์ ๋ฑ์ฅํ๋
๋จ์ด๋ค์ ์๋ก ์๋ฏธ์ ์ฐ๊ด์ฑ์ ๊ฐ์ง๋ค๋ ๊ฒ์
๋๋ค. ์ธ์ดํ์ ์ผ๋ก๋ `๋ถ์ฐ ์๋ฏธ ๊ฐ์ค(distributional
hypothesis) <https://en.wikipedia.org/wiki/Distributional_semantics>`__ ์ด๋ผ๊ณ ๋ ํฉ๋๋ค.
๋ฐ์ง๋ ๋จ์ด ์๋ฒ ๋ฉ ๊ตฌํ๊ธฐ
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
์ด๋ป๊ฒ ๋จ์ด์ ์๋ฏธ์ ์ ์ฌ๋๋ฅผ ์ธ์ฝ๋ฉ ํ ์ ์์๊น์? ๋ค์ ๋งํด, ์ด๋ป๊ฒ ํด์ผ ๋จ์ด์ ์ ์ฌ๋๋ฅผ
๋จ์ด ๋ฒกํฐ์ ๋ฐ์ํ ์ ์์๊น์? ๋จ์ด ๋ฐ์ดํฐ์ ์๋ฏธ์ ์์ฑ(attribute)์ ๋ถ์ฌํ๋ ๊ฑด ์ด๋ค๊ฐ์?
์๋ฅผ ๋ค์ด '์ํ์'์ '๋ฌผ๋ฆฌํ์'๊ฐ ๋ชจ๋ ๋ธ ์ ์๋ค๋ฉด, ํด๋น ๋จ์ด์ '๋ธ ์ ์์' ์์ฑ์ ๋์ ์ ์๋ฅผ ์ฃผ๋ ๊ฒ๋๋ค.
๊ณ์ ํด๋ด
์๋ค. ๋ค๋ฅธ ๋จ์ด๋ค์ ๋ํด์๋ ์ด๋ ํ ์์ฑ์ ๋ง๋ค ์ ์์์ง ์๊ฐํด๋ด
์๋ค.
๋ง์ฝ ๊ฐ ์์ฑ์ ํ๋์ ์ฐจ์์ด๋ผ๊ณ ๋ณธ๋ค๋ฉด ํ๋์ ๋จ์ด์ ์๋์ ๊ฐ์ ๋ฒกํฐ๋ฅผ ๋ฐฐ์ ํ ์ ์์๊ฒ๋๋ค.
.. math::
q_\text{์ํ์} = \left[ \overbrace{2.3}^\text{๋ธ ์ ์์},
\overbrace{9.4}^\text{์ปคํผ๋ฅผ ์ข์ํจ}, \overbrace{-5.5}^\text{๋ฌผ๋ฆฌ ์ ๊ณต์}, \dots \right]
.. math::
q_\text{๋ฌผ๋ฆฌํ์} = \left[ \overbrace{2.5}^\text{๋ธ ์ ์์},
\overbrace{9.1}^\text{์ปคํผ๋ฅผ ์ข์ํจ}, \overbrace{6.4}^\text{๋ฌผ๋ฆฌ ์ ๊ณต์}, \dots \right]
๊ทธ๋ฌ๋ฉด ์๋์ ๊ฐ์ด ๋ ๋จ์ด ์ฌ์ด์ ์ ์ฌ๋๋ฅผ ๊ตฌํ ์ ์์ต๋๋ค. ('์ ์ฌ๋'๋ผ๋ ํจ์๋ฅผ ์ ์ํ๋ ๊ฒ๋๋ค)
.. math:: \text{์ ์ฌ๋}(\text{๋ฌผ๋ฆฌํ์}, \text{์ํ์}) = q_\text{๋ฌผ๋ฆฌํ์} \cdot q_\text{์ํ์}
๋ฌผ๋ก ๋ณดํต์ ์ด๋ ๊ฒ ๋ฒกํฐ์ ๊ธธ์ด๋ก ๋๋ ์ฃผ์ง๋ง์.
.. math::
\text{์ ์ฌ๋}(\text{๋ฌผ๋ฆฌํ์}, \text{์ํ์}) = \frac{q_\text{๋ฌผ๋ฆฌํ์} \cdot q_\text{์ํ์}}
{\| q_\text{๋ฌผ๋ฆฌํ์} \| \| q_\text{์ํ์} \|} = \cos (\phi)
:math:`\phi` ๋ ๋ ๋ฒกํฐ ์ฌ์ด์ ๊ฐ์
๋๋ค. ์ด๋ฐ ์์ด๋ฉด ์ ๋ง ๋น์ทํ ๋จ์ด๋ ์ ์ฌ๋ 1์ ๊ฐ๊ณ ,
์ ๋ง ๋ค๋ฅธ ๋จ์ด๋ ์ ์ฌ๋ -1์ ๊ฐ๊ฒ ์ฃ . ๋น์ทํ ์๋ฏธ๋ฅผ ๊ฐ์ง์๋ก ๊ฐ์ ๋ฐฉํฅ์ ๊ฐ๋ฆฌํค๊ณ ์์ ํ
๋๊น์.
์ด ๊ธ ์ด๋ฐ์ ๋์จ ํฌ๋ฐํ ์ํซ ๋ฒกํฐ๊ฐ ์ฌ์ค์ ์ฐ๋ฆฌ๊ฐ ๋ฐฉ๊ธ ์ ์ํ ์๋ฏธ ๋ฒกํฐ์
ํน์ด ์ผ์ด์ค๋ผ๋ ๊ฒ์ ๊ธ๋ฐฉ ์ ์ ์์ต๋๋ค. ๋จ์ด ๋ฒกํฐ์ ๊ฐ ์์๋ ๊ทธ ๋จ์ด์ ์๋ฏธ์ ์์ฑ์
ํํํ๊ณ , ๋ชจ๋ ๋จ์ด ์์ ์ ์ฌ๋๋ 0์ด๊ธฐ ๋๋ฌธ์ด์ฃ . ์์์ ์ ์ํ ์๋ฏธ ๋ฒกํฐ๋ *๋ฐ์ง* ๋์ด ์์ต๋๋ค.
์ฆ, ์ํซ ๋ฒกํฐ์ ๋นํด 0 ์์์ ์๊ฐ ์ ๋ค๊ณ ํ ์ ์์ต๋๋ค.
ํ์ง๋ง ์ด ๋ฒกํฐ๋ค์ ๊ตฌํ๊ธฐ๊ฐ ์ง์ง ์ด๋ ต์ต๋๋ค. ๋จ์ด๊ฐ์ ์ ์ฌ๋๋ฅผ ๊ฒฐ์ ์ง์ ๋งํ
์๋ฏธ์ ์์ฑ์ ์ด๋ป๊ฒ ๊ฒฐ์ ํ ๊ฒ์ด๋ฉฐ, ์์ฑ์ ๊ฒฐ์ ํ๋ค๊ณ ํ๋๋ผ๋ ๊ฐ ์์ฑ์
ํด๋นํ๋ ๊ฐ์ ๋๋์ฒด ์ด๋ ํ ๊ธฐ์ค์ผ๋ก ์ ํด์ผ ํ ๊น์? ์์ฑ๊ณผ ๊ฐ์ ๋ฐ์ดํฐ์ ๊ธฐ๋ฐํด
๋ง๋ค๊ณ ์๋์ผ๋ก ๋จ์ด ๋ฒกํฐ๋ฅผ ๋ง๋ค ์๋ ์์๊น์? ์์ต๋๋ค. ๋ฅ๋ฌ๋์ ์ฌ์ฉํ๋ฉด ๋ง์ด์ฃ .
๋ฅ๋ฌ๋์ ์ธ๊ณต์ ๊ฒฝ๋ง์ ์ด์ฉํ์ฌ ์ฌ๋์ ๊ฐ์
์์ด ์์ฑ์ ํํ ๋ฐฉ๋ฒ์ ์๋์ผ๋ก ํ์ตํฉ๋๋ค.
์ด๋ฅผ ์ด์ฉํด ๋จ์ด ๋ฒกํฐ๋ฅผ ๋ชจ๋ธ ๋ชจ์๋ก ์ค์ ํ๊ณ ๋ชจ๋ธ ํ์ต์์ ๋จ์ด ๋ฒกํฐ๋ ํจ๊ป ์
๋ฐ์ดํธ ํ๋ฉด
๋ ๊ฒ์
๋๋ค. ์ด๋ ๊ฒ ์ฐ๋ฆฌ ์ ๊ฒฝ๋ง ๋ชจ๋ธ์ ์ ์ด๋ ์ด๋ก ์์ผ๋ก๋ ์ถฉ๋ถํ ํ์ตํ ์ ์๋
*์ ์ฌ ์๋ฏธ ์์ฑ* ์ ์ฐพ์ ๊ฒ์
๋๋ค. ์ฌ๊ธฐ์ ๋งํ๋ ์ ์ฌ ์๋ฏธ ์์ฑ์ผ๋ก ์ด๋ฃจ์ด์ง ๋ฒกํฐ๋ ์ฌ๋์ด
ํด์ํ๊ธฐ ์๋นํ ์ด๋ ต๋ค๋ ์ ์ ๊ธฐ์ตํด ๋์ธ์. ์์์ ์ํ์์ ๋ฌผ๋ฆฌํ์์๊ฒ ์ปคํผ๋ฅผ ์ข์ํ๋ค๋
๋ฑ ์ฌ๋์ด ์์์ ์ผ๋ก ๋จ์ด์ ๋ถ์ฌํ ์์ฑ๊ณผ๋ ๋ฌ๋ฆฌ, ์ธ๊ณต์ ๊ฒฝ๋ง์ด ์๋์ผ๋ก ๋จ์ด์ ์์ฑ์ ์ฐพ๋๋ค๋ฉด
๊ทธ ์์ฑ๊ณผ ๊ฐ์ด ์๋ฏธํ๋ ๋ฐ๋ฅผ ์๊ธฐ๊ฐ ์ด๋ ค์ธ ๊ฒ์
๋๋ค. ์๋ฅผ ๋ค์ด์ ์ ๊ฒฝ๋ง ๋ชจ๋ธ์ด ์ฐพ์ '์ํ์'์
'๋ฌผ๋ฆฌํ์'์ ํํ ๋ฒกํฐ ๋ ๋ค ๋๋ฒ์งธ ์์๊ฐ ํฌ๋ค๊ณ ๊ฐ์ ํด ๋ด
์๋ค. ๋์ด ๋น์ทํ๋ค๋ ๊ฑด ์๊ฒ ์ง๋ง,
๋๋์ฒด ๋๋ฒ์งธ ์์๊ฐ ๋ฌด์์ ์๋ฏธํ๋์ง๋ ์๊ธฐ๊ฐ ๋งค์ฐ ํ๋ ๊ฒ์
๋๋ค. ํํ ๋ฒกํฐ ๊ณต๊ฐ์์์
๋น์ทํ๋ค๋ ์ ๋ณด ์ธ์๋ ์๋ง ๋ง์ ์ ๋ณด๋ฅผ ์ฃผ๊ธด ์ด๋ ค์ธ ๊ฒ์
๋๋ค.
์์ฝํ์๋ฉด, **๋จ์ด ์๋ฒ ๋ฉ์ ๋จ์ด์ *์๋ฏธ* ๋ฅผ ํํํ๋ ๋ฐฉ๋ฒ์ด๋ฉฐ, ์ฐจํ์ ์๋ฒ ๋ฉ์ ์ฌ์ฉํด์
ํ๊ณ ์ ํ๋ ๋ฌธ์ ์ ์ ์ฉํ ์๋ฏธ ์ ๋ณด๋ฅผ ํจ์จ์ ์ผ๋ก ์ธ์ฝ๋ฉํ ๊ฒ์
๋๋ค.** ํ์ฌ ํ๊ทธ, ํ์ค ํธ๋ฆฌ(parse tree) ๋ฑ
๋จ์ด์ ์๋ฏธ ์ธ์ ๋ค๋ฅธ ๊ฒ๋ ์ธ์ฝ๋ฉ ํ ์ ์์ต๋๋ค! ํผ์ฒ ์๋ฒ ๋ฉ์ ๊ฐ๋
์ ์ก๋ ๊ฒ์ด ์ค์ํฉ๋๋ค.
ํ์ดํ ์น์์ ๋จ์ด ์๋ฒ ๋ฉ ํ๊ธฐ
~~~~~~~~~~~~~~~~~~~~~~~~~~
์ค์ ๋ก ์ฝ๋์ ์์๋ฅผ ๋ณด๊ธฐ ์ ์, ํ์ดํ ์น๋ฅผ ๋น๋กฏํด ๋ฅ๋ฌ๋ ๊ด๋ จ ํ๋ก๊ทธ๋๋ฐ์ ํ ๋
๋จ์ด ์๋ฒ ๋ฉ์ ์ด๋ป๊ฒ ์ฌ์ฉํ๋์ง์ ๋ํด ์กฐ๊ธ ์์๋ด
์๋ค. ๋งจ ์์์ ์ํซ ๋ฒกํฐ๋ฅผ
์ ์ํ๋ ๊ฒ ์ฒ๋ผ, ๋จ์ด ์๋ฒ ๋ฉ์ ์ฌ์ฉํ ๋์๋ ๊ฐ ๋จ์ด์ ์ธ๋ฑ์ค๋ฅผ ๋ถ์ฌํด์ผ ํฉ๋๋ค.
์ด ์ธ๋ฑ์ค๋ฅผ ์ฐธ์กฐ ํ
์ด๋ธ(look-up table)์์ ์ฌ์ฉํ ๊ฒ์
๋๋ค. ์ฆ, :math:`|V| \times D` ํฌ๊ธฐ์ ํ๋ ฌ์
๋จ์ด ์๋ฒ ๋ฉ์ ์ ์ฅํ๋๋ฐ, :math:`D` ์ฐจ์์ ์๋ฒ ๋ฉ ๋ฒกํฐ๊ฐ ํ๋ ฌ์ :math:`i` ๋ฒ์งธ ํ์
์ ์ฅ๋์ด์์ด :math:`i` ๋ฅผ ์ธ๋ฑ์ค๋ก ํ์ฉํด ์๋ฒ ๋ฉ ๋ฒกํฐ๋ฅผ ์ฐธ์กฐํ๋ ๊ฒ์
๋๋ค.
์๋์ ๋ชจ๋ ์ฝ๋์์๋ ๋จ์ด์ ์ธ๋ฑ์ค๋ฅผ ๋งคํํด์ฃผ๋ ๋์
๋๋ฆฌ๋ฅผ word\_to\_ix๋ผ ์นญํฉ๋๋ค.
ํ์ดํ ์น๋ ์๋ฒ ๋ฉ์ ์์ฝ๊ฒ ์ฌ์ฉํ ์ ์๊ฒ torch.nn.Embedding์ ์์์ ์ค๋ช
ํ ์ฐธ์กฐ ํ
์ด๋ธ
๊ธฐ๋ฅ์ ์ง์ํฉ๋๋ค. ์ด ๋ชจ๋์ ๋จ์ด์ ๊ฐ์์ ์๋ฒ ๋ฉ์ ์ฐจ์, ์ด 2๊ฐ์ ๋ณ์๋ฅผ ์
๋ ฅ ๋ณ์๋ก ๋ฐ์ต๋๋ค.
torch.nn.Embedding ํ
์ด๋ธ์ ์๋ฒ ๋ฉ์ ์ฐธ์กฐํ๊ธฐ ์ํด์ torch.LongTensor ํ์
์ ์ธ๋ฑ์ค ๋ณ์๋ฅผ
๊ผญ ์ฌ์ฉํด์ผ ํฉ๋๋ค. (์ธ๋ฑ์ค๋ ์ค์๊ฐ ์๋ ์ ์์ด๊ธฐ ๋๋ฌธ์
๋๋ค.)
"""
# Author: Robert Guthrie
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
######################################################################
word_to_ix = {"hello": 0, "world": 1}
embeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings
lookup_tensor = torch.tensor([word_to_ix["hello"]], dtype=torch.long)
hello_embed = embeds(lookup_tensor)
print(hello_embed)
######################################################################
# ์์: N๊ทธ๋จ ์ธ์ด ๋ชจ๋ธ๋ง
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# N๊ทธ๋จ ์ธ์ด ๋ชจ๋ธ๋ง์์ ๋จ์ด ์ํ์ค :math:`w` ๊ฐ ์ฃผ์ด์ก์ ๋ ์๋์ ๊ฒ์ ์ป๊ณ ์
# ํจ์ ์๊ธฐํด ๋ด
์๋ค.
#
# .. math:: P(w_i | w_{i-1}, w_{i-2}, \dots, w_{i-n+1} )
#
# :math:`w_i` ๋ ์ํ์ค์์ i๋ฒ์งธ ๋จ์ด์
๋๋ค.
#
# ์ด ์์์์๋ ํ์ต ๋ฐ์ดํฐ๋ฅผ ๋ฐํ์ผ๋ก ์์ค ํจ์๋ฅผ ๊ณ์ฐํ๊ณ ์ญ์ ํ๋ฅผ ํตํด
# ๋ชจ์๋ฅผ ์
๋ฐ์ดํธ ํด๋ณด๊ฒ ์ต๋๋ค.
#
CONTEXT_SIZE = 2
EMBEDDING_DIM = 10
# ์
ฐ์ต์คํผ์ด ์๋คํธ(Sonnet) 2๋ฅผ ์ฌ์ฉํ๊ฒ ์ต๋๋ค.
test_sentence = """When forty winters shall besiege thy brow,
And dig deep trenches in thy beauty's field,
Thy youth's proud livery so gazed on now,
Will be a totter'd weed of small worth held:
Then being asked, where all thy beauty lies,
Where all the treasure of thy lusty days;
To say, within thine own deep sunken eyes,
Were an all-eating shame, and thriftless praise.
How much more praise deserv'd thy beauty's use,
If thou couldst answer 'This fair child of mine
Shall sum my count, and make my old excuse,'
Proving his beauty by succession thine!
This were to be new made when thou art old,
And see thy blood warm when thou feel'st it cold.""".split()
# ์๋๋ ์
๋ ฅ์ ์ ๋๋ก ํ ํฌ๋์ด์ฆ(tokenize) ํด์ผํ์ง๋ง ์ด๋ฒ์ ๊ฐ์ํํ๊ฒ ์ต๋๋ค.
# ํํ๋ก ์ด๋ฃจ์ด์ง ๋ฆฌ์คํธ๋ฅผ ๋ง๋ค๊ฒ ์ต๋๋ค. ๊ฐ ํํ์ ([ i-2 ๋ฒ์งธ ๋จ์ด, i-1 ๋ฒ์งธ ๋จ์ด ], ๋ชฉํ ๋จ์ด)์
๋๋ค.
trigrams = [([test_sentence[i], test_sentence[i + 1]], test_sentence[i + 2])
for i in range(len(test_sentence) - 2)]
# ์ฒซ 3๊ฐ์ ํํ์ ์ถ๋ ฅํ์ฌ ๋ฐ์ดํฐ๊ฐ ์ด๋ป๊ฒ ์๊ฒผ๋์ง ๋ณด๊ฒ ์ต๋๋ค.
print(trigrams[:3])
vocab = set(test_sentence)
word_to_ix = {word: i for i, word in enumerate(vocab)}
class NGramLanguageModeler(nn.Module):
def __init__(self, vocab_size, embedding_dim, context_size):
super(NGramLanguageModeler, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
embeds = self.embeddings(inputs).view((1, -1))
out = F.relu(self.linear1(embeds))
out = self.linear2(out)
log_probs = F.log_softmax(out, dim=1)
return log_probs
losses = []
loss_function = nn.NLLLoss()
model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
optimizer = optim.SGD(model.parameters(), lr=0.001)
for epoch in range(10):
total_loss = 0
for context, target in trigrams:
# ์ฒซ๋ฒ์งธ. ๋ชจ๋ธ์ ๋ฃ์ด์ค ์
๋ ฅ๊ฐ์ ์ค๋นํฉ๋๋ค. (i.e, ๋จ์ด๋ฅผ ์ ์ ์ธ๋ฑ์ค๋ก
# ๋ฐ๊พธ๊ณ ํ์ดํ ์น ํ
์๋ก ๊ฐ์ธ์ค์๋ค.)
context_idxs = torch.tensor([word_to_ix[w] for w in context], dtype=torch.long)
# ๋๋ฒ์งธ. ํ ์น๋ ๊ธฐ์ธ๊ธฐ๊ฐ *๋์ * ๋ฉ๋๋ค. ์ ์ธ์คํด์ค๋ฅผ ๋ฃ์ด์ฃผ๊ธฐ ์ ์
# ๊ธฐ์ธ๊ธฐ๋ฅผ ์ด๊ธฐํํฉ๋๋ค.
model.zero_grad()
# ์ธ๋ฒ์งธ. ์์ ํ๋ฅผ ํตํด ๋ค์์ ์ฌ ๋จ์ด์ ๋ํ ๋ก๊ทธ ํ๋ฅ ์ ๊ตฌํฉ๋๋ค.
log_probs = model(context_idxs)
# ๋ค๋ฒ์งธ. ์์คํจ์๋ฅผ ๊ณ์ฐํฉ๋๋ค. (ํ์ดํ ์น์์๋ ๋ชฉํ ๋จ์ด๋ฅผ ํ
์๋ก ๊ฐ์ธ์ค์ผ ํฉ๋๋ค.)
loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long))
# ๋ค์ฏ๋ฒ์งธ. ์ญ์ ํ๋ฅผ ํตํด ๊ธฐ์ธ๊ธฐ๋ฅผ ์
๋ฐ์ดํธ ํด์ค๋๋ค.
loss.backward()
optimizer.step()
# tensor.item()์ ํธ์ถํ์ฌ ๋จ์ผ์์ ํ
์์์ ์ซ์๋ฅผ ๋ฐํ๋ฐ์ต๋๋ค.
total_loss += loss.item()
losses.append(total_loss)
print(losses) # ๋ฐ๋ณตํ ๋๋ง๋ค ์์ค์ด ์ค์ด๋๋ ๊ฒ์ ๋ด
์๋ค!
######################################################################
# ์์: ๋จ์ด ์๋ฒ ๋ฉ ๊ณ์ฐํ๊ธฐ: Continuous Bag-of-Words
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The Continuous Bag-of-Words (CBOW) ๋ชจ๋ธ์ NLP ๋ฅ๋ฌ๋์์ ๋ง์ด ์ฐ์
๋๋ค.
# ์ด ๋ชจ๋ธ์ ๋ฌธ์ฅ ๋ด์์ ์ฃผ๋ณ ๋จ์ด, ์ฆ ์ ๋ช ๋จ์ด์ ๋ค ๋ช ๋จ์ด๋ฅผ ๋ณด๊ณ ํน์
# ๋จ์ด๋ฅผ ์์ธกํ๋๋ฐ, ์ธ์ด ๋ชจ๋ธ๋ง๊ณผ๋ ๋ฌ๋ฆฌ ์์ฐจ์ ์ด์ง๋ ์๊ณ ํ๋ฅ ์ ์ด์ง๋ ์์ต๋๋ค.
# ์ฃผ๋ก CBOW๋ ๋ณต์กํ ๋ชจ๋ธ์ ์ด๊ธฐ ์
๋ ฅ๊ฐ์ผ๋ก ์ฐ์ผ ๋จ์ด ์๋ฒ ๋ฉ์ ๋น ๋ฅด๊ฒ ํ์ตํ๋
# ๋ฐ์ ์ฐ์
๋๋ค. ์ด๊ฒ์ *์ฌ์ ํ๋ จ๋(pre-trained) ์๋ฒ ๋ฉ* ์ด๋ผ๊ณ ๋ถ๋ฅด์ฃ .
# ๋ช ํผ์ผํธ ์ ๋์ ์ฑ๋ฅ ํฅ์์ ๊ธฐ๋ํ ์ ์๋ ๊ธฐ๋ฒ์
๋๋ค.
#
# CBOW ๋ชจ๋ธ์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค. ๋ชฉํ ๋จ์ด :math:`w_i` ์ ๊ทธ ์์ชฝ์ :math:`N` ๊ฐ์
# ๋ฌธ๋งฅ ๋จ์ด :math:`w_{i-1}, \dots, w_{i-N}` ์ :math:`w_{i+1}, \dots, w_{i+N}`
# ๊ฐ ์ฃผ์ด์ก์ ๋, (๋ฌธ๋งฅ ๋จ์ด๋ฅผ ์ด์นญํด :math:`C` ๋ผ๊ณ ํฉ์๋ค.)
#
# .. math:: -\log p(w_i | C) = -\log \text{Softmax}(A(\sum_{w \in C} q_w) + b)
#
# ์ ์์ ์ต์ํํ๋ ๊ฒ์ด CBOW์ ๋ชฉ์ ์
๋๋ค. ์ฌ๊ธฐ์ :math:`q_w` ๋ ๋จ์ด :math:`w` ์
# ์๋ฒ ๋ฉ ์
๋๋ค.
#
# ์๋์ ํด๋์ค ํ
ํ๋ฆฟ์ ๋ณด๊ณ ํ์ดํ ์น๋ก CBOW๋ฅผ ๊ตฌํํด ๋ณด์ธ์. ํํธ๋ ๋ค์๊ณผ ๊ฐ์ต๋๋ค.
#
# * ์ด๋ค ๋ชจ์๋ฅผ ์ ์ํด์ผ ํ๋์ง ์๊ฐํด๋ณด์ธ์.
# * ๊ฐ ์์
์์ ๋ค๋ฃจ์ด์ง๋ ๋ณ์์ ์ฐจ์์ด ์ด๋ค์ง ๊ผญ ์๊ฐํด๋ณด์ธ์.
# ํ
์์ ๋ชจ์์ ๋ฐ๊ฟ์ผ ํ๋ค๋ฉด .view()๋ฅผ ์ฌ์ฉํ์ธ์.
#
CONTEXT_SIZE = 2 # ์ผ์ชฝ์ผ๋ก 2๋จ์ด, ์ค๋ฅธ์ชฝ์ผ๋ก 2๋จ์ด
raw_text = """We are about to study the idea of a computational process.
Computational processes are abstract beings that inhabit computers.
As they evolve, processes manipulate other abstract things called data.
The evolution of a process is directed by a pattern of rules
called a program. People create programs to direct processes. In effect,
we conjure the spirits of the computer with our spells.""".split()
# ์ค๋ณต๋ ๋จ์ด๋ฅผ ์ ๊ฑฐํ๊ธฐ ์ํด `raw_text` ๋ฅผ ์งํฉ(set) ์๋ฃํ์ผ๋ก ๋ฐ๊ฟ์ค๋๋ค.
vocab = set(raw_text)
vocab_size = len(vocab)
word_to_ix = {word: i for i, word in enumerate(vocab)}
data = []
for i in range(2, len(raw_text) - 2):
context = [raw_text[i - 2], raw_text[i - 1],
raw_text[i + 1], raw_text[i + 2]]
target = raw_text[i]
data.append((context, target))
print(data[:5])
class CBOW(nn.Module):
def __init__(self):
pass
def forward(self, inputs):
pass
# ๋ชจ๋ธ์ ๋ง๋ค๊ณ ํ์ตํด ๋ณด์ธ์.
# ์๋๋ ๋ฐ์ดํฐ ์ค๋น๋ฅผ ์ํํ๊ฒ ๋๊ธฐ ์ํ ํจ์์
๋๋ค.
def make_context_vector(context, word_to_ix):
idxs = [word_to_ix[w] for w in context]
return torch.tensor(idxs, dtype=torch.long)
make_context_vector(data[0][0], word_to_ix) # ์์
|
#!/usr/local/bin/python2.7
"""
replace low-frequency words in training file with '_RARE_'
and generate new training file - 'parse_train_rare.dat'
then execute
python count_cfg_freq.py parse_train_rare.dat > cfg_rare.counts
to generate new count file
"""
import sys, os
import numpy as np
import json
import types
import timeit
def create_rare_word_list_from_training_file(count_file):
"""
read count file and return a list of rare words
:param count_file: files created by run command line:
python count cfg freqs.py parse train.dat > cfg.counts
:return: a list of unique rare words
"""
lines = []
with open(count_file) as f:
for line in f:
thisline = line.split()
if thisline[1] == 'UNARYRULE':
lines.append(thisline[0]) #count
lines.append(thisline[3]) #word
lines.append(thisline[2]) # Part-of-speech tag
word_counts = np.asarray(lines)
word_counts = np.reshape(word_counts, (-1, 3)) # [count, word, tag]
counts = word_counts[:,0].astype('int')
#print counts
words = word_counts[:, 1]
rare_words = []
total_rare = 0
for w in words:
word_sum = np.sum(counts[words[:] == w])
#if w == 'Medical' :print 'medical counts', word_sum
if word_sum < 5 and w not in rare_words:
total_rare += word_sum
rare_words.append(w)
# print 'rare words appear %r times' % total_rare
return rare_words
def edit_training_file(train_file, rare_file):
"""
replace rare words with '_RARE_'
:return:
"""
rare_words_list = create_rare_word_list_from_training_file("cfg.counts")
def modify_leaf(tree):
"""
recursivly find the leaf level terminal words in a tree
and replace it with "_RARE_" if the word is a rare word
:param tree: a parse tree
:return: the tree with low-frequency leaf words modified
"""
for idx, item in enumerate(tree):
if idx != 0:
if isinstance(item, types.ListType):
modify_leaf(item)
else:
if item in rare_words_list:
tree[idx] = '_RARE_'
return tree
newf = open(rare_file, 'w+')
with open(train_file) as f:
for line in f:
tree = json.loads(line)
modified_tree = modify_leaf(tree)
newf.write(str(json.dumps(modified_tree)) + '\n')
newf.close()
# edit_training_file()
if __name__ == "__main__":
train_file = sys.argv[1]
rare_file = sys.argv[2]
edit_training_file(train_file, rare_file) |
# euler 37
def is_prime(n):
if n <= 1:
return False
elif n <= 3:
return True
elif (n % 2 == 0 or n % 3 == 0):
return False
i = 5
while i * i <= n:
if (n % i == 0 or n % (i + 2) == 0):
return False
i += 6
return True
sumx = 0
for x in range(11,1000001,2):
strx = str(x)
all_prime = True
for lenx in range(0, len(strx)):
if all_prime:
test = int(strx[:lenx+1])
all_prime = is_prime(test)
if all_prime:
test = int(strx[lenx:])
all_prime = is_prime(test)
if all_prime:
print(x)
sumx += x
print('\nsum={}'.format(sumx))
# 23
# 37
# 53
# 73
# 313
# 317
# 373
# 797
# 3137
# 3797
# 739397
#
# sum=748317
|
import logging
from .stream import Stream
logger = logging.getLogger(__name__)
class TagManager():
def __init__(self, docker_api, docker_client, version_manager, cache, quiet):
'''
Constructor
@param docker_api: Customer docker API
@type docker_api: DockerApi
@param docker_client: Docker client
@type docker_client: docker
@param version_manager: Version manager
@type version_manager: VersionManager
@param cache: Use cache
@type cache: bool
@param quiet: Quiet mode
@type quiet: bool
'''
self.docker_api = docker_api
self.docker_client = docker_client
self.stream = Stream(quiet)
self.version_manager = version_manager
self.cache = cache
self.tags = None
def build(self, version=None, force=False):
'''
Build version on the current machine
@param version: Optional version you want to build
@type version: str
'''
versions = self.get_versions(version)
for version, version_path in versions.items():
print(
'Building {}'.format(version)
)
if not force and self.exists(version):
print('Image already exists')
# Do not build images that already exists on Docker Hub
continue
log = self.docker_client.api.build(
path=str(version_path),
tag='prestashop/prestashop:' + version,
rm=True,
nocache=(not self.cache),
decode=True
)
self.stream.display(log)
aliases = self.version_manager.get_aliases()
if version in aliases:
for alias in aliases[version]:
print(
'Create tag {}'.format(alias)
)
self.docker_client.api.tag(
'prestashop/prestashop:' + version,
'prestashop/prestashop',
alias
)
def push(self, version=None, force=False):
'''
Push version on Docker Hub
@param version: Optional version you want to build
@type version: str
'''
versions = self.get_versions(version)
for version in versions.keys():
print(
'Pushing {}'.format(version)
)
if not force and self.exists(version):
continue
log = self.docker_client.api.push(
repository='prestashop/prestashop',
tag=version,
decode=True,
stream=True
)
self.stream.display(log)
aliases = self.version_manager.get_aliases()
if version in aliases:
for alias in aliases[version]:
print(
'Pushing tag {}'.format(alias)
)
log = self.docker_client.api.push(
repository='prestashop/prestashop',
tag=alias,
decode=True,
stream=True
)
self.stream.display(log)
def exists(self, version):
'''
Test if a version is already on Docker Hub
@param version: The version you want to check
@type version: str
@return: True if tag exists
@rtype: dict
'''
if self.tags is None:
self.tags = self.docker_api.get_tags()
for tag in self.tags:
if tag['name'] == version:
return True
return False
def get_versions(self, version):
'''
Version checker
@param version: Version
@type version: str
@return: List of versions
@rtype: dict
'''
if version is None:
return self.version_manager.get_versions()
return self.version_manager.parse_version(version)
def get_aliases(self, version):
'''
Get all aliases
@param version: Version
@type version: str
'''
versions = self.get_versions(version)
aliases = self.version_manager.get_aliases()
for version in versions:
if version in aliases:
print('Aliases for {}'.format(version))
[print("\t{}".format(alias)) for alias in aliases[version]]
|
# ้ๆฑ๏ผ0-10ๅถๆฐๆฐๆฎ็ๅ่กจ
# 1. ็ฎๅๅ่กจๆจๅฏผๅผ rangeๆญฅ้ฟ
list1 = [i for i in range(0, 10, 2)]
print(list1)
# 2. forๅพช็ฏๅ if ๅๅปบๆ่งๅพ็ๅ่กจ
list2 = []
for i in range(10):
if i % 2 == 0:
list2.append(i)
print(list2)
# 3. ๆforๅพช็ฏ้
ๅif็ไปฃ็ ๆนๅ ๅธฆif็ๅ่กจๆจๅฏผๅผ
list3 = [i for i in range(10) if i % 2 == 0]
print(list3)
|
# python ไฝฟ็จๅญๅ
ธไปฃๆฟswitch
switcherDict = {
0 : 'Sunday',
1 : 'Monday',
2 : 'Tuesday'
}
day_name = switcherDict[0]
print(day_name)
day_name1 = switcherDict.get(5, 'Unkown')
print(day_name1)
# ๅ่กจๆจๅฏผๅผ
a = [1,2,3,4,5,6,7,8,9]
b = [i**2 for i in a]
print(b)
student = {
'่็': 18,
'็ไบ': 19,
'็ไธ': 20
}
bb = [key for key,value in student.items()]
print(bb) |
#coding:utf-8
import smtplib # ๅ้้ฎไปถ
from email.mime.multipart import MIMEMultipart # ๅธฆ้ไปถ
from email.mime.text import MIMEText # ๆๅปบ้ฎไปถ
def sendemail_func(smtp_server, send_user, password, receive_user_list, subject, excel_path, run_num, pass_num, failed_num, pass_rate, failed_rate):
if smtp_server:
smtp_server = smtp_server
else: # ๅฆๆไธ่พๅ
ฅ๏ผๅๆ็
ง้ป่ฎคๅผๆฅ
smtp_server = "smtp.163.com"
if send_user:
send_user = send_user
else: # ๅฆๆไธ่พๅ
ฅ๏ผๅๆ็
ง้ป่ฎคๅผๆฅ
send_user = "17709816196@163.com"
if password:
password = password
else: # ๅฆๆไธ่พๅ
ฅ๏ผๅๆ็
ง้ป่ฎคๅผๆฅ
password = "xxxxxxxx"
if receive_user_list:
receive_user_list = receive_user_list.split(",") # ๏ผไปฅ้ๅทไธบๅ้็ฌฆ๏ผๅนถ้ๅผ๏ผๅนถ่ชๅจๅไธบlist็ๅฝขๅผ
#receive_user_list = ["17709816196@163.com","1052398277@qq.com"]
else: # ๅฆๆไธ่พๅ
ฅ๏ผๅๆ็
ง้ป่ฎคๅผๆฅ
receive_user_list = ["17709816196@163.com"]
if subject:
subject = subject
else: # ๅฆๆไธ่พๅ
ฅ๏ผๅๆ็
ง้ป่ฎคๅผๆฅ
subject = "api testing report"
message=MIMEMultipart() # ้ฎไปถ
message['Subject'] = subject
message['From'] = send_user
message['To'] = ', '.join(receive_user_list) # ๅฏนๅญๅ
ธ่ฟ่ก่ฟๆฅไนๅๅๆไบๅญ็ฌฆไธฒ list=['1','2','3','4','5'] print(''.join(list)) ็ปๆ๏ผ12345 ๆญคๅค้ๅทๆ่
ๅๅท้ฝๆฏๅฏไปฅ็
content = "ๆญคๆฌกไธๅ
ฑ่ฟ่กๆฅๅฃไธชๆฐไธบ%sไธช๏ผ้่ฟไธชๆฐไธบ%sไธช๏ผๅคฑ่ดฅไธชๆฐไธบ%s, ้่ฟ็ไธบ%s, ๅคฑ่ดฅ็ไธบ%s" % (run_num, pass_num, failed_num, pass_rate, failed_rate)
message.attach(MIMEText(content,'plain','utf-8')) # ๅ ้ไปถ
att = MIMEText(open(excel_path,'rb').read(),'base64','utf-8')
att["Content-Type"] = 'application/octet-stream'
att["Content-Disposition"] = 'attachment; filename="case.xls"'
message.attach(att)
server = smtplib.SMTP()
server.connect(smtp_server,25)
server.login(send_user,password)
server.sendmail(send_user,receive_user_list,message.as_string())
server.quit()
|
list=[1,2,3,4,5,6,7,8,9,10,11]
print("Original List")
print(list)
new_list = [x*2 for x in list]
print(new_list)
new_even_list = [x for x in list if x%2==0 ]
print(new_even_list)
#inner loop
list_same=[10,100]
combine_add = [x+y for x in list for y in list_same]
print(combine_add)
|
def generate_config(context):
properties = context.properties
resource_name = 'mig-' + context.env['name']
project = context.env['project']
zone = properties['zone']
template_id = properties['templateId']
instance_template = 'projects/' + project + '/global/instanceTemplates/' + template_id
outputs = []
resources = [{
'name': resource_name,
'type': 'gcp-types/compute-v1:instanceGroupManagers',
'properties': {
'instanceTemplate': instance_template,
'name': resource_name,
'zone': zone,
'targetSize': 1,
'updatePolicy': {
'maxSurge': {
'calculated': 0,
'fixed': 0
},
'maxUnavailable': {
'calculated': 1,
'fixed': 1
},
'minReadySec': 0,
'minimalAction': 'REPLACE',
'replacementMethod': 'RECREATE',
'type': 'PROACTIVE'
}
}
}]
return {'resources': resources, 'outputs': outputs}
|
from wikiinfo import *
from ...fields import FieldType
class RussianVerbStressField(FieldType):
def __init__(self, db, sdictPath):
self.info = WikiInfo(db, sdictPath)
def pull(self, word):
return self.info.getStress(word)
|
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import torch, random
from sklearn.manifold import TSNE
from mpl_toolkits import mplot3d
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import plot_precision_recall_curve
# according to the data, the final real number may be var.
nc_dic = {'A':0, 'T':1, 'G':2, 'C':3, 'N':4}
cn_dic = {'0':'A', '1':'T', '2':'G','3':'C', '4':'N'}
# visulaization segments
def vis_segment(signal, events, basecall):
start, end = 0, 40
bks = np.cumsum([e[1] for e in events[start:end]])
bks = [0] + bks.tolist()
bases = [e[2] for e in events[start:end]]
signal = signal[:bks[-1]+1]
fig=plt.figure(figsize=(10,6))
plt.subplot(211)
plt.plot(range(len(signal)), signal)
plt.xticks(bks[:-1], bases, color="brown",fontsize=10)
for bk in bks[:-1]:
plt.axvline(bk, linestyle="-.", color="red")
plt.subplot(212)
base_quality = [ord(s)-33 for s in basecall[3]]
plt.plot(range(len(base_quality)), base_quality)
plt.show()
plt.close("all")
def plot_curve(train_loss, validate_loss, model_name, save_path):
plt.plot(train_loss)
plt.plot(validate_loss)
plt.title(' %s training curve' %(model_name))
plt.ylabel('cross entorpty loss')
plt.xlabel('epoch')
plt.legend(['train loss', 'validation loss'], loc='upper right')
plt.savefig(save_path)
def plot_curve_GAN(train_loss, model_name, save_path):
plt.plot([x[0] for x in train_loss])
plt.plot([x[1] for x in train_loss])
plt.title(' %s training curve' %(model_name))
plt.ylabel('cross entorpty loss')
plt.xlabel('epoch')
plt.legend(['Generator Loss', 'Discriminator Loss'], loc='upper right')
plt.savefig(save_path)
def plot_seq_signal_diff(s, meth, unmeth, save_path):
if len(meth) <= 1 or len(unmeth) <= 1:
return -1
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_subplot(2, 1, 1)
ax1.boxplot(meth)
ax1.set_title('Methyl_signal')
ax1.set_ylabel('pA')
ax1.set_xlabel('position')
ax2 = fig.add_subplot(2, 1, 2)
ax2.boxplot(unmeth)
ax2.set_title('unMethyl_signal')
ax2.set_ylabel('pA')
ax2.set_xlabel('position')
plt.subplots_adjust(wspace=0.1, hspace=0.6)
plt.title(' %s signals difference' %("".join([cn_dic[str(x)] for x in s])))
plt.savefig(save_path + s + ".png")
def plot_seq_signal_diff_group(s, meth, unmeth, save_path):
if len(meth) <= 1 or len(unmeth) <= 1:
return -1
nLen = meth.shape[1]
column_names = [str(x+1) for x in range(nLen)]
unmeth = pd.DataFrame(unmeth, columns=column_names).assign(s_type="unMeth")
meth = pd.DataFrame(meth, columns=column_names).assign(s_type="Meth")
combined = pd.concat([unmeth, meth])
melted = pd.melt(combined, id_vars="s_type")
fig = plt.figure(figsize=(10,5))
sns.boxplot(x="variable", y="value", hue="s_type", order=column_names,palette=["g", "r"], data=melted)
plt.title(' %s signals difference' %("".join([cn_dic[str(x)] for x in s])))
plt.savefig(save_path + s + ".png")
# 20200608
def vis_signal_difference(data_generator, figSavePath="../experiment/figures/barplot.png"):
meth_list, unMeth_list, sim_list = [],[],[]
for i, data in enumerate(data_generator, 0):
inputs, labels = data
if len(labels) == 0: continue
index1 = labels.nonzero()
index0 = (labels == 0).nonzero()
if len(index1) > 0:
meth_list.append(inputs[2][index1,:,0].squeeze(1))
if len(index0) > 0:
unMeth_list.append(inputs[2][index0,:,0].squeeze(1))
sim_list.append(inputs[3][:,:,0])
# cat the data
meth_df = torch.cat(meth_list, 0).cpu().numpy()
unMeth_df = torch.cat(unMeth_list, 0).cpu().numpy()
sim_df = torch.cat(sim_list, 0).cpu().numpy()
# visualization
fig = plt.figure(figsize=(5,10))
ax1 = fig.add_subplot(3, 1, 1)
ax1.boxplot(meth_df)
#ax1.set_ylim([-5,5])
ax1.set_title('Methylation')
ax2 = fig.add_subplot(3, 1, 2)
ax2.boxplot(unMeth_df)
#ax2.set_ylim([-5,5])
ax2.set_title('un_Methylation')
ax3 = fig.add_subplot(3, 1, 3)
ax3.boxplot(sim_df)
#ax3.set_ylim([-5,5])
ax3.set_title('Simulation data')
plt.subplots_adjust(wspace=0.1, hspace=0.6)
plt.savefig(figSavePath)
# visulaization for sequence contentnt
def tSNE_plot(X, Y, file_save_path, max_num=-1, dim=2):
if max_num > 0 and max_num < len(Y):
sidx = random.sample(range(len(Y)),max_num)
X = X[sidx]
Y = Y[sidx]
labels = [0, 1]
colors = ["blue", "red"]
plt.figure(figsize=(10,10))
if dim == 3:
ax = plt.axes(projection='3d')
latent_vec = TSNE(n_components=dim, random_state=0).fit_transform(X)
for i in range(len(labels)):
idx = np.where(Y == labels[i])[0]
if dim == 2:
plt.scatter(latent_vec[idx, 0], latent_vec[idx, 1], c=colors[i])
elif dim == 3:
ax.scatter3D(latent_vec[idx, 0], latent_vec[idx, 1],latent_vec[idx, 2], c=colors[i])
plt.legend(["unMeth", "Meth"])
plt.savefig(file_save_path)
plt.clf()
plt.close()
|
n = -1
while n < 0:
n = int(input('Digite um nรบmero a saber seu fatorial: '))
if n < 0:
print('Valores negativos nรฃo sรฃo permitidos. Tente novamente.')
else:
if n == 0:
print('O fatorial de {} รฉ igual a 1'.format(n))
else:
fatorial = n
print('O fatorial de {} = {}! ='.format(n, n), end=' ')
for contador in range(n, 1, -1):
print(contador, end=' x ')
if n != 1:
print('1', end=' = ')
contador = 1
while contador != n:
fatorial *= (n-contador)
contador += 1
print(fatorial)
|
import json
def isJson():
with open('books.json') as json_data:
try:
data_dict = json.load(json_data)
data_str = json.dumps(data_dict)
data_dict_02 = json.loads(data_str)
json_data.close()
return data_dict_02
except ValueError as e:
print(e)
exit(1)
dataBooks = isJson()
lenTitle = 0
lenIsbn = 0
lenPublished = 0
lenthumbnailUrl = 0
lenshortDescription = 0
lenlongDescription = 0
lenStatus = 0
lenAutors = 0
lenCategories = 0
for val in dataBooks:
#print("Titre = ", val['title'], " : ", len(val['title']))
lenTitle += len(val['title'])
try:
#print("isbn = ", val['isbn'], " : ", len(val['isbn']))
lenIsbn += len(val['isbn'])
except KeyError as e:
print("ERROR isbn = ", e)
try:
#print("publishedDate = ", len(val['publishedDate']['$date']))
lenPublished += len(val['publishedDate']['$date'])
except KeyError as e:
print("ERROR publishedDate = ", e)
try:
#print("thumbnailUrl : ", val['thumbnailUrl'], " : ", len(val['thumbnailUrl']))
lenthumbnailUrl += len(val['thumbnailUrl'])
except KeyError as e:
print("ERROR thumbnailUrl = ", e)
try:
#print("shortDescription : ", val['shortDescription'], " : ", len(val['shortDescription']))
lenshortDescription += len(val['shortDescription'])
except KeyError as e:
print("ERROR shortDescription = ", e)
try:
#print("longDescription : ", val['longDescription'], " : ", len(val['longDescription']))
lenlongDescription += len(val['longDescription'])
except KeyError as e:
print("ERROR longDescription = ", e)
#print("Status = ", val['status'], " : ", len(val['status']))
lenStatus += len(val['status'])
#print("Autors = ", val['authors'], " : ", len(val['authors']))
lenAutors += len(val['authors'])
#print("Categories = ", val['categories'], " : ", len(val['categories']))
lenCategories += len(val['categories'])
TOTAL = lenTitle + lenIsbn + lenPublished + lenthumbnailUrl + lenshortDescription + lenlongDescription + lenStatus + lenAutors + lenCategories
print("Le total des caractรจre est de ", TOTAL)
|
import csv
import random
def load_quotes():
"""Loads all quotes from the CSV."""
quotes = []
with open('quotes.csv', newline='') as quotes_file:
csv_reader = csv.reader(quotes_file)
for row in csv_reader:
if len(row) != 0:
quotes.append({'quote': row[0], 'author': row[1]})
return quotes
def get_random_quote():
"""Returns a random quote."""
quotes = load_quotes()
value = random.choice(quotes)
return value['quote'], value['author']
def add_quote(quote, author):
"""Adds a quote to the CSV."""
with open('quotes.csv', mode="a", newline='') as quotes_file:
csv_writer = csv.writer(quotes_file)
csv_writer.writerow([quote, author])
|
import datetime
from unittest.mock import Mock, MagicMock, patch, call
import pytest
from testframework.checkers import bigquery_checker
from testframework.checkers.bigquery_checker import BigqueryChecker
from testframework.checkers.checker_message import CheckerMessage
from testframework.util.assertion import undecorated_module
from testframework.util.sql_handler import SqlHandler
RETRY_COUNT = 8
ZERO_SECONDS = 0
class TestBigqueryChecker:
def test_table_returns_row_for_callsGetMessagesAndReturnsFirstRow(self):
is_partitioned = True
where = "some_where_filter"
returned_rows = ["row1", "row2"]
self.bq_helper_mock.execute_sync_query.side_effect = [[1]]
self.checker = BigqueryChecker(self.bq_helper_mock, self.sql_handler_mock)
with patch.object(self.checker, 'get_messages', wraps=self.checker.message_found):
self.checker.get_messages.return_value = returned_rows
assert returned_rows[0] == self.checker.table_returns_row_for(self.dataset_name, self.table_name, self.message_mock, is_partitioned, where)
self.checker.get_messages.assert_called_once_with(self.dataset_name, self.table_name, self.message_mock, is_partitioned, where, True, "loaded_at")
def test_table_returns_row_for_retriesCallForMessageFound3Times(self):
self.bq_helper_mock.execute_sync_query.side_effect = [[1]]
self.checker = BigqueryChecker(self.bq_helper_mock, self.sql_handler_mock)
with patch.object(self.checker, 'get_messages', wraps=self.checker.message_found):
self.checker.get_messages.return_value = None
with pytest.raises(Exception):
self.checker.table_returns_row_for(self.dataset_name, self.table_name, self.message_mock, max_attempts=RETRY_COUNT, wait_seconds=ZERO_SECONDS)
calls = [call(self.dataset_name, self.table_name, self.message_mock, True, None, True, "loaded_at")] * RETRY_COUNT
self.checker.get_messages.assert_has_calls(calls)
def test_table_has_row_for_callsMessageFound(self):
is_partitioned = True
where = "some_where_filter"
self.bq_helper_mock.execute_sync_query.side_effect = [[1]]
self.checker = BigqueryChecker(self.bq_helper_mock, self.sql_handler_mock)
with patch.object(self.checker, 'message_found', wraps=self.checker.message_found):
self.checker.message_found.return_value = True
assert self.checker.table_has_row_for(self.dataset_name, self.table_name, self.message_mock, is_partitioned, where)
self.checker.message_found.assert_called_once_with(self.dataset_name, self.table_name, self.message_mock, is_partitioned, where, True, "loaded_at")
@undecorated_module(bigquery_checker, 'tenacity.retry')
def test_tableHasRowFor_rowNotFound_raisesTimeoutError(self):
self.bq_helper_mock.execute_sync_query.side_effect = [[1]]
self.checker = bigquery_checker.BigqueryChecker(self.bq_helper_mock, self.sql_handler_mock)
with patch.object(self.checker, 'message_found', wraps=self.checker.message_found):
self.checker.message_found.return_value = False
with pytest.raises(TimeoutError):
self.checker.table_has_row_for(self.dataset_name, self.table_name, self.message_mock)
def test_get_messages_with_is_partitioned_true_query_executed_with_correctly_formatted_query(self):
utcnow = datetime.datetime(2017, 9, 14, 11, 47, 42)
self.bq_client_mock.project = "testing"
sql_handler = SqlHandler()
with patch("datetime.datetime"), patch.object(sql_handler, 'build_query',
wraps=sql_handler.build_query) as sql_handler_build_query:
datetime.datetime.utcnow.return_value = utcnow
self.checker = BigqueryChecker(self.bq_helper_mock, sql_handler)
self.checker.get_messages(self.dataset_name, self.table_name, self.message_mock)
params_for_query = {"gcp_project_id": self.bq_client_mock.project,
"dataset_name": self.dataset_name,
"table_name": self.table_name}
partition_filter_sql = ("(_PARTITIONTIME IS NULL OR _PARTITIONTIME" + \
" BETWEEN TIMESTAMP_ADD(TIMESTAMP('{partition_day}'), INTERVAL -1 DAY)" + \
" AND TIMESTAMP_ADD(TIMESTAMP('{partition_day}'), INTERVAL 1 DAY))").format(
partition_day="2017-09-14")
where_params = {
"filter": "CAST(customer_id AS STRING) = '123'",
"partition_filter": partition_filter_sql,
"where": "loaded_at is not null"
}
expected_sql_to_run = \
("SELECT * FROM `{gcp_project_id}.{dataset_name}.{table_name}` " + \
"WHERE ({partition_filter}) " + \
"AND ({filter}) " + \
"AND ({where})").format(**{**params_for_query, **where_params})
datetime.datetime.utcnow.assert_called_once()
self.message_mock.get_unique_fields.assert_called_once()
sql_handler_build_query.assert_has_calls(
[call({**params_for_query, **where_params})])
self.bq_helper_mock.execute_sync_query.assert_called_once_with(expected_sql_to_run)
def test_get_messages_with_is_partitioned_false_query_executed_with_correctly_formatted_query(self):
utcnow = datetime.datetime(2017, 9, 14, 11, 47, 42)
self.bq_client_mock.project = "testing"
self.sql_handler_mock.build_query.return_value = "valid_query"
self.checker = BigqueryChecker(self.bq_helper_mock, self.sql_handler_mock)
with patch("datetime.datetime"):
datetime.datetime.utcnow.return_value = utcnow
self.checker.get_messages(self.dataset_name, self.table_name, self.message_mock, False)
params_for_query = {"gcp_project_id": self.bq_client_mock.project,
"dataset_name": self.dataset_name,
"table_name": self.table_name}
where_params = {
"filter": "CAST(customer_id AS STRING) = '123'",
"partition_filter": "true",
"where": "loaded_at is not null"
}
datetime.datetime.utcnow.assert_not_called()
self.message_mock.get_unique_fields.assert_called_once()
self.sql_handler_mock.build_query.assert_called_once_with({**params_for_query, **where_params})
self.bq_helper_mock.execute_sync_query.assert_called_once_with("valid_query")
def test_get_messages_with_is_partitioned_false_and_where_conditions_query_executed_with_correctly_formatted_query(
self):
utcnow = datetime.datetime(2017, 9, 14, 11, 47, 42)
where = "loaded_at is not null"
self.bq_client_mock.project = "testing"
self.sql_handler_mock.build_query.return_value = "valid_query"
self.checker = BigqueryChecker(self.bq_helper_mock, self.sql_handler_mock)
with patch("datetime.datetime"):
datetime.datetime.utcnow.return_value = utcnow
self.checker.get_messages(self.dataset_name, self.table_name, self.message_mock, False)
params_for_query = {"gcp_project_id": self.bq_client_mock.project,
"dataset_name": self.dataset_name,
"table_name": self.table_name}
where_params = {
"filter": "CAST(customer_id AS STRING) = '123'",
"partition_filter": "true",
"where": where
}
datetime.datetime.utcnow.assert_not_called()
self.message_mock.get_unique_fields.assert_called_once()
self.sql_handler_mock.build_query.assert_called_once_with({**params_for_query, **where_params})
self.bq_helper_mock.execute_sync_query.assert_called_once_with("valid_query")
def test_get_messages_returns_rows(self):
self.bq_helper_mock.execute_sync_query.side_effect = [[1, 2]]
self.checker = BigqueryChecker(self.bq_helper_mock, self.sql_handler_mock)
assert [1, 2] == self.checker.get_messages(self.dataset_name, self.table_name, self.message_mock)
def test_message_found_returns_results(self):
self.bq_helper_mock.execute_sync_query.side_effect = [[1]]
self.checker = BigqueryChecker(self.bq_helper_mock, self.sql_handler_mock)
with patch.object(self.checker, 'get_messages', wraps=self.checker.get_messages):
self.checker.get_messages.return_value = ['row1']
assert self.checker.message_found(self.dataset_name, self.table_name, self.message_mock)
self.checker.get_messages.assert_called_once_with(self.dataset_name, self.table_name, self.message_mock,
True, None, True, "loaded_at")
def setup_method(self):
self.dataset_name = "test_dataset"
self.table_name = "test_table"
self.source = {self.dataset_name: [self.table_name]}
self.message_mock = Mock(CheckerMessage)
self.bq_helper_mock = Mock()
self.bq_client_mock = self.bq_helper_mock._bq_client
self.message_mock.get_unique_fields.return_value = {"customer_id": 123}
self.bq_helper_mock.execute_sync_query = MagicMock()
self.sql_handler_mock = Mock(SqlHandler)
|
import numpy as np
import random
data_path = './data/web-Google.txt'
with open(data_path, 'r') as f:
data = f.read().replace("\n", ",").split(",")
del data[:5]
# add random k (0,1)
for i in range(len(data)):
# random_k = random.randint(0, 1)
# data[i] += '\t'+str(random_k)
data[i] += '\t1'
#split into train/test
dataset = np.array(data)
dataset = np.random.permutation(dataset)
test_size = int(len(data)*0.2)
train_size = test_size * 4
test_data = dataset[:test_size]
train_data = dataset[test_size:]
#save files
f = open('./data/web-Google_dyrep_train.txt', 'w')
f.write('\n'.join(train_data))
f.close()
f = open('./data/web-Google_dyrep_test.txt', 'w')
f.write('\n'.join(test_data))
f.close()
|
from collections import defaultdict
from logging import getLogger, NOTSET, basicConfig
from pkg_resources import resource_filename
from logging.config import fileConfig
import numpy as np
import scipy.stats
from statsmodels.sandbox.stats.multicomp import multipletests
# import matplotlib.pyplot as plt
import pandas as pd
import sklearn
import sklearn.ensemble
import calour as ca
from calour.util import _to_list
from calour.training import plot_scatter, plot_roc, plot_cm
try:
# get the logger config file location
log_file = resource_filename(__package__, 'log.cfg')
# log = path.join(path.dirname(path.abspath(__file__)), 'log.cfg')
# set the logger output according to log.cfg
# setting False allows other logger to print log.
fileConfig(log_file, disable_existing_loggers=False)
except:
print('failed to load logging config file')
basicConfig(format='%(levelname)s:%(message)s')
logger = getLogger(__package__)
# set the log level to the same as calour module if present
try:
clog = getLogger('calour')
calour_log_level = clog.getEffectiveLevel()
if calour_log_level != NOTSET:
logger.setLevel(calour_log_level)
except:
print('calour module not found for log level setting. Level not set')
def equalize_groups(exp, group_field, equal_fields, random_seed=None):
'''Normalize an experiment so all groups have the same number of samples from each equal_field
Parameters
----------
group_field: str
the field by which samples are divided into groups (at least 2 groups)
equal_field: list of str
list of fields for which each of the groups should have the same amount of samples for each value.
if more than one supplied, the combination is created as a unique value
Returns
-------
Experiment, with equal number of samples for each value of equal_fields in each group
'''
exp = exp.copy()
jfield = equal_fields[0]
if len(equal_fields) > 1:
cname = '__calour_joined'
for cefield in equal_fields[1:]:
exp = exp.join_metadata_fields(jfield, cefield, cname)
jfield = cname
cname += 'X'
exp = exp.join_metadata_fields(group_field, jfield, '__calour_final_field', axis=0)
samples = []
for cval in exp.sample_metadata[jfield].unique():
cexp = exp.filter_samples(jfield, cval)
if len(cexp.sample_metadata['__calour_final_field'].unique()) == 1:
continue
cexp = cexp.downsample('__calour_final_field', inplace=True, random_seed=random_seed)
samples.extend(cexp.sample_metadata.index.values)
res = exp.filter_ids(samples, axis='s')
return res
def merge_general(exp, field, val1, val2, new_field=None, v1_new=None, v2_new=None):
'''merge a field with multiple values into a new field with only two values
All samples with values not in val1, val2 are filtered away
Parameters
----------
exp:
calour.Experiment
field : str
the field to merge
val1, val2: list of str
the values to merge together
new_field : str or None (optional)
name of the new field. if None, new field will be field+"_merged"
v1_new, v2_new: str or None, optional
name of new values for merged val1, val2
if None, will use "_".join(val1)
Returns
-------
newexp: calour.Experiment, with values in 2 categories - yes/no
'''
if new_field is None:
new_field = field + '_merged'
newexp = exp.copy()
newexp.sample_metadata[new_field] = newexp.sample_metadata[field].copy()
if v1_new is None:
v1_new = '+'.join(map(str, val1))
if v2_new is None:
v2_new = '+'.join(map(str, val2))
newexp.sample_metadata[new_field].replace(val1, v1_new, inplace=True)
newexp.sample_metadata[new_field].replace(val2, v2_new, inplace=True)
newexp = newexp.filter_samples(new_field, [v1_new, v2_new], inplace=True)
return newexp
def get_ratios(exp, id_field, group_field, group1, group2, min_thresh=5):
'''get a new experiment made of the ratios between different group_field values
for the same id_field
Parameters
----------
exp : Experiment
id_field: str
name of the field containing the individual id. ratios are calculated
for samples with the same id_field (i.e. the individual id)
group_field: str
name of the field with the two groups to calculate the ratio of
(i.e. sample_site)
group1: str
value of group_field for group1 (nominator)
group2: str
value of group_field for group1 (denominator)
Returns
-------
calour.Experiment
with only samples from group1 that have group1 and group2 values.
Data contains the ratio of group1/group2
'''
data = exp.get_data(sparse=False)
newexp = exp.copy()
newexp.sparse = False
keep = []
for cid in exp.sample_metadata[id_field].unique():
pos1 = np.where((exp.sample_metadata[id_field] == cid) & (exp.sample_metadata[group_field] == group1))[0]
pos2 = np.where((exp.sample_metadata[id_field] == cid) & (exp.sample_metadata[group_field] == group2))[0]
if len(pos1) != 1:
print('not 1 sample for group1: %s' % cid)
continue
if len(pos2) != 1:
print('not 1 sample for group2: %s' % cid)
continue
cdat1 = data[pos1, :]
cdat2 = data[pos2, :]
cdat1[cdat1 < min_thresh] = min_thresh
cdat2[cdat2 < min_thresh] = min_thresh
newexp.data[pos1, :] = np.log2(cdat1 / cdat2)
keep.append(pos1[0])
print('found %d ratios' % len(keep))
# print(keep)
newexp = newexp.reorder(keep, axis='s')
return newexp
def get_sign_pvals(exp, alpha=0.1, min_present=5):
'''get p-values for a sign-test with the data in exp
data should come from get_ratios()
does fdr on it
'''
exp = exp.copy()
# get rid of bacteria that don't have enough non-zero ratios
keep = []
for idx in range(exp.data.shape[1]):
cdat = exp.data[:, idx]
npos = np.sum(cdat > 0)
nneg = np.sum(cdat < 0)
if npos + nneg >= min_present:
keep.append(idx)
print('keeping %d features with enough ratios' % len(keep))
exp = exp.reorder(keep, axis='f')
pvals = []
esize = []
for idx in range(exp.data.shape[1]):
cdat = exp.data[:, idx]
npos = np.sum(cdat > 0)
nneg = np.sum(cdat < 0)
pvals.append(scipy.stats.binom_test(npos, npos + nneg))
esize.append((npos - nneg) / (npos + nneg))
# plt.figure()
# sp = np.sort(pvals)
# plt.plot(np.arange(len(sp)),sp)
# plt.plot([0,len(sp)],[0,1],'k')
reject = multipletests(pvals, alpha=alpha, method='fdr_bh')[0]
index = np.arange(len(reject))
esize = np.array(esize)
pvals = np.array(pvals)
exp.feature_metadata['esize'] = esize
exp.feature_metadata['pval'] = pvals
index = index[reject]
okesize = esize[reject]
new_order = np.argsort(okesize)
new_order = np.argsort((1 - pvals[reject]) * np.sign(okesize))
newexp = exp.reorder(index[new_order], axis='f', inplace=False)
print('found %d significant' % len(newexp.feature_metadata))
return newexp
def show_wordcloud(exp, ignore_exp=None, server='http://127.0.0.1:5000'):
'''open the wordcloud html page from dbbact for all sequences in exp
File is saved into 'wordcloud.html'
Parameters
----------
exp: AmpliconExperiment
ignore_exp: None or list of int, optional
expids to ignore when drawing the wordcloud
'''
import requests
import webbrowser
import os
print('getting wordcloud for %d sequences' % len(exp.feature_metadata))
params = {}
params['sequences'] = list(exp.feature_metadata.index.values)
params['ignore_exp'] = ignore_exp
res = requests.post(server + '/sequences_wordcloud', json=params)
if res.status_code != 200:
print('failed')
print(res.status_code)
print(res.reason)
print('got output')
with open('wordcloud.html', 'w') as fl:
fl.write(res.text)
webbrowser.open('file://' + os.path.realpath('wordcloud.html'), new=True)
def collapse_correlated(exp, min_corr=0.95):
'''merge features that have very correlated expression profile
useful after dbbact.sample_enrichment()
all correlated featuresIDs are concatenated to a single id
Returns
-------
Experiment, with correlated features merged
'''
import numpy as np
data = exp.get_data(sparse=False, copy=True)
corr = np.corrcoef(data, rowvar=False)
use_features = set(np.arange(corr.shape[0]))
feature_ids = {}
orig_ids = {}
for idx, cfeature in enumerate(exp.feature_metadata.index.values):
feature_ids[idx] = str(cfeature)
orig_ids[idx] = str(cfeature)
da = exp.feature_metadata['_calour_diff_abundance_effect']
for idx in range(corr.shape[0]):
if idx not in use_features:
continue
corr_pos = np.where(corr[idx, :] >= min_corr)[0]
for idx2 in corr_pos:
if idx2 == idx:
continue
if idx2 in use_features:
id1 = orig_ids[idx]
id2 = orig_ids[idx2]
if abs(da[id1]) < abs(da[id2]):
pos1 = idx2
pos2 = idx
else:
pos1 = idx
pos2 = idx2
feature_ids[pos1] = feature_ids[pos1] + '; ' + feature_ids[pos2]
# data[:, idx] = data[:, idx] + data[:, idx2]
use_features.remove(idx2)
del feature_ids[idx2]
keep_pos = list(use_features)
newexp = exp.copy()
newexp.data = data
newexp = newexp.reorder(keep_pos, axis='f', inplace=True)
feature_ids_list = [feature_ids[idx] for idx in keep_pos]
newexp.feature_metadata['_featureid'] = feature_ids_list
newexp.feature_metadata.set_index('_featureid', drop=False, inplace=True)
return newexp
def plot_violin(exp, field, features=None, downsample=True, num_keep=None, **kwargs):
'''Plot a violin plot for the distribution of frequencies for a (combined set) of features
Parameters
----------
exp: Experiment
field: str
Name of the field to plot for
features: list of str or None, optional
None to sum frequencies of all features. Otherwise sum frequencies of features in list.
downsample: bool, optional
True to run exp.downsample on the field so all groups have same number of samples.
num_keep: int or None, optional
The minimal group size for downsample, or None to use smallest group size
**kwargs: additional parameters to pass to pyplot.violinplot
Returns
-------
figure
'''
import matplotlib.pyplot as plt
if downsample:
exp = exp.downsample(field, num_keep=num_keep)
if features is not None:
exp = exp.filter_ids(features)
data = exp.get_data(sparse=False).sum(axis=1)
group_freqs = []
group_names = []
for cgroup in exp.sample_metadata[field].unique():
group_names.append(cgroup)
group_freqs.append(data[exp.sample_metadata[field] == cgroup])
fig = plt.figure()
plt.violinplot(group_freqs, **kwargs)
plt.xticks(np.arange(1, len(group_names) + 1), group_names)
return fig
def splot(exp, field, **kwargs):
'''
Plot a sorted version of the experiment exp based on field
'''
tt = exp.sort_samples(field)
res = tt.plot(sample_field=field, gui='qt5', **kwargs)
return res
def sort_by_bacteria(exp, seq, inplace=True):
import numpy as np
'''sort samples according to the frequency of a given bacteria
'''
spos = np.where(exp.feature_metadata.index.values == seq)[0][0]
bf = exp.get_data(sparse=False, copy=True)[:, spos].flatten()
if inplace:
newexp = exp
else:
newexp = exp.copy()
newexp.sample_metadata['bf'] = bf
newexp = newexp.sort_samples('bf')
return newexp
def metadata_enrichment(exp, field, val1, val2=None, ignore_vals=set(['Unspecified', 'Unknown']), use_fields=None, alpha=0.05):
'''Test for metadata enrichment over all metadata fields between the two groups
Parameters
----------
exp: Experiment
field: str
the field to divide the samples
val1: str or list of str
first group values for field
val2: str or list of str or None, optional
second group values or None to select all not in group1
ignore_vals: set of str
the values in the metadata field to ignore
use_fields: list of str or None, optional
list of fields to test for enrichment on None to test all
alpha: float
the p-value cutoff
Returns
-------
'''
exp1 = exp.filter_samples(field, val1)
if val2 is None:
exp2 = exp.filter_samples(field, val1, negate=True)
else:
exp2 = exp.filter_samples(field, val2)
tot_samples = len(exp.sample_metadata)
s1 = len(exp1.sample_metadata)
s2 = len(exp2.sample_metadata)
if use_fields is None:
use_fields = exp.sample_metadata.columns
for ccol in use_fields:
for cval in exp.sample_metadata[ccol].unique():
if cval in ignore_vals:
continue
num1 = np.sum(exp1.sample_metadata[ccol] == cval)
num2 = np.sum(exp2.sample_metadata[ccol] == cval)
if num1 + num2 < 20:
continue
p0 = (num1 + num2) / tot_samples
pv1 = scipy.stats.binom_test(num1, s1, p0)
pv2 = scipy.stats.binom_test(num2, s2, p0)
if (pv1 < alpha):
print('column %s value %s enriched in group1. p0=%f, num1=%f/%f (e:%f) num2=%f/%f (e:%f). pval %f' % (ccol, cval, p0, num1, s1, s1 * p0, num2, s2, s2 * p0, pv1))
if (pv2 < alpha):
print('column %s value %s enriched in group2. p0=%f, num1=%f/%f (e:%f) num2=%f/%f (e:%f). pval %f' % (ccol, cval, p0, num1, s1, s1 * p0, num2, s2, s2 * p0, pv2))
def filter_singletons(exp, field, min_number=2):
'''Filter away samples that have <min_number of similar values in field
Used to remove singleton twins from the twinsuk study
'''
counts = exp.sample_metadata[field].value_counts()
counts = counts[counts >= min_number]
newexp = exp.filter_samples(field, list(counts.index.values))
return newexp
def numeric_to_categories(exp, field, new_field, values, inplace=True):
'''convert a continuous field to categories
Parameters
----------
exp: calour.Experiment
field: str
the continuous field name
new_field: str
name of the new categoriezed field name
values: int or list of float
the bins to categorize by. each number is the lowest number for the bin. a new bin is created for <first number
Returns
calour.Experiment with new metadata field new_field
'''
tmp_field = '_calour_' + field + '_num'
values = np.sort(values)[::-1]
if not inplace:
exp = exp.copy()
# keep only numeric values (all other are 0)
exp.sample_metadata[tmp_field] = pd.to_numeric(exp.sample_metadata[field], errors='coerce')
exp.sample_metadata[tmp_field] = exp.sample_metadata[tmp_field].fillna(0)
new_field_num = new_field + '_num'
sm = exp.sample_metadata
exp.sample_metadata[new_field] = '>%s' % values[0]
exp.sample_metadata[new_field_num] = values[0]
for idx, cval in enumerate(values):
if idx < len(values) - 1:
exp.sample_metadata.loc[sm[tmp_field] <= cval, new_field] = '%s-%s' % (values[idx + 1], cval)
else:
exp.sample_metadata.loc[sm[tmp_field] <= cval, new_field] = '<%s' % (values[idx])
exp.sample_metadata.loc[sm[tmp_field] <= cval, new_field_num] = cval
return exp
def taxonomy_from_db(exp):
'''add taxonomy to each feature based on dbbact
'''
exp = exp.add_terms_to_features('dbbact', get_taxonomy=True)
if len(exp.exp_metadata['__dbbact_taxonomy']) == 0:
print('did not obtain taxonomy from add_terms_to_features')
exp.feature_metadata['taxonomy'] = 'na'
for ck, cv in exp.exp_metadata['__dbbact_taxonomy'].items():
exp.feature_metadata.loc[ck, 'taxonomy'] = cv
return exp
def focus_features(exp, ids, inplace=False, focus_feature_field='_calour_util_focus'):
'''Reorder the bacteria so the focus ids are at the beginning (top)
Parameters
----------
exp: calour.Experiments
ids: str or list of str
the feature ids to focus
Returns
-------
calour.Experiment
reordered
'''
ids = _to_list(ids)
pos = []
for cid in ids:
if cid in exp.feature_metadata.index:
pos.append(exp.feature_metadata.index.get_loc(cid))
neworder = np.arange(len(exp.feature_metadata))
neworder = np.delete(neworder, pos)
neworder = pos + list(neworder)
newexp = exp.reorder(neworder, axis='f', inplace=inplace)
# create the new feature_metadata field denoting which are focued
ff = ['focus'] * len(pos) + ['orig'] * (len(neworder) - len(pos))
newexp.feature_metadata[focus_feature_field] = ff
return newexp
def alpha_diversity_as_feature(exp):
data = exp.get_data(sparse=False, copy=True)
data[data < 1] = 1
entropy = []
for idx in range(np.shape(data)[0]):
entropy.append(np.sum(data[idx, :] * np.log2(data[idx, :])))
alpha_div = entropy
newexp = exp.copy()
newexp.sample_metadata['_alpha_div'] = alpha_div
# newexp.add_sample_metadata_as_features('_alpha_div')
return newexp
def filter_16s(exp, seq='TACG', minreads=5000):
'''Filter an experiment keeping only samples containing enough sequences starting with seq
'''
# get the sequences starting with seq
okseqs = [x for x in exp.feature_metadata.index.values if x[:len(seq)] == seq]
# count how many reads from the okseqs
texp = exp.filter_ids(okseqs)
dat = texp.get_data(sparse=False)
numok = dat.sum(axis=1)
newexp = exp.reorder(numok >= minreads, axis='s')
return newexp
def create_ko_feature_file(ko_file='ko00001.json', out_file='ko_feature_map.tsv'):
'''Create a feature metadata file for kegg ontologies for picrust2
Parameters
----------
ko_file: str, optional
name of the kegg ontology json file to import.
get it from https://www.genome.jp/kegg-bin/get_htext?ko00001
out_file: str, optional
name of the feature mapping file to load into calour
it contains level and name fields.
NOTE: if term appears in several levels, it will just keep the first one.
'''
import json
with open(ko_file) as f:
tt = json.load(f)
found = set()
outf = open(out_file, 'w')
outf.write('ko\tname\tlevel1\tlevel2\tlevel3\n')
for c1 in tt['children']:
l1name = c1['name']
for c2 in c1['children']:
l2name = c2['name']
for c3 in c2['children']:
l3name = c3['name']
if 'children' in c3:
for c4 in c3['children']:
l4name = c4['name']
zz = l4name.split()
if zz[0] in found:
print('duplicate id %s' % l4name)
continue
found.add(zz[0])
outf.write(zz[0] + '\t')
outf.write(' '.join(zz[1:]) + '\t')
outf.write(l1name + '\t')
outf.write(l2name + '\t')
outf.write(l3name + '\n')
else:
# print('no children for level3 %s' % c3)
pass
print('saved to %s' % out_file)
def add_taxonomy(exp):
'''Add DBBact derived taxonomy to sequences in the experiment
The taxonomy is added as exp.feature_metadata.taxonomy
NOTE: can erase the current taxonomy
NOTE: will also fill the exp_metadata dbbact fields
Parameters:
-----------
exp: calour.Experiment
Returns:
--------
exp: same as the input (modification is inplace)
'''
exp.add_terms_to_features('dbbact', get_taxonomy=True)
exp.feature_metadata['taxonomy'] = pd.Series(exp.databases['dbbact']['taxonomy'])
return exp
def plot_experiment_terms(exp, weight='binary', min_threshold=0.005, show_legend=False, sort_legend=True):
'''Plot the distribution of most common terms in the experiment
Using the dbbact annotations. For each sequence, take the strongest term (based on f-score) and plot the
distribution of such terms for the entire set of sequences in the experiment
Parameters
----------
exp: calour.Experiment
weight: str, optional NOT IMPLEMENTED
how to weigh the frequency of each bacteria. options are:
'binary': just count the number of bacteria with each term
'linear': weigh by mean frequency of each bacteria
min_threshold: float, optional
Join together to 'other' all terms with < min_treshold of sequences containing them
show_legend: bool, optional
True to show legend with pie slice names, false to showin slices
sort_legend: bool, optional
True to sort the legend by the pie slice size
Returns
-------
'''
import matplotlib.pyplot as plt
exp = exp.add_terms_to_features('dbbact')
ct = exp.feature_metadata['common_term'].value_counts()
dat = exp.get_data(sparse=False)
feature_sum = dat.sum(axis=0)
terms = exp.feature_metadata['common_term']
ct = defaultdict(float)
for idx, cseq in enumerate(exp.feature_metadata.index.values):
cterm = terms[cseq]
if weight == 'binary':
ct[cterm] += 1
elif weight == 'linear':
ct[cterm] += feature_sum[idx]
else:
raise ValueError('weight=%s not supported. please use binary/linear' % weight)
# convert to fraction
all_sum = sum(ct.values())
for cterm, ccount in ct.items():
ct[cterm] = ct[cterm] / all_sum
# join all terms < min_threshold
c = {}
c['other'] = 0
for cterm, cval in ct.items():
if cval < min_threshold:
c['other'] += cval
else:
c[cterm] = cval
plt.figure()
labels = c.keys()
values = []
for clabel in labels:
values.append(c[clabel])
if show_legend:
patches, texts = plt.pie(values, radius=0.5)
percent = np.array(values)
percent = 100 * percent / percent.sum()
labels = ['{0} - {1:1.2f} %'.format(i, j) for i, j in zip(labels, percent)]
# sort according to pie slice size
if sort_legend:
patches, labels, dummy = zip(*sorted(zip(patches, labels, values), key=lambda x: x[2], reverse=True))
# plt.legend(patches, labels, loc='left center', bbox_to_anchor=(-0.1, 1.), fontsize=8)
plt.legend(patches, labels)
else:
plt.pie(values, labels=labels)
def read_qiime2(data_file, sample_metadata_file=None, feature_metadata_file=None, rep_seqs_file=None, **kwargs):
'''Read a qiime2 generated table (even if it was run without the --p-no-hashedfeature-ids flag)
This is a wrapper for calour.read_amplicon(), that can unzip and extract biom table, feature metadata, rep_seqs_file qza files generated by qiime2
Parameters
----------
data_file: str
name of qiime2 deblur/dada2 generated feature table qza or biom table
sample_metadata_file: str or None, optional
name of tab separated mapping file
feature_metadata_file: str or None, optional
can be the taxonomy qza or tsv generated by qiime2 feature classifier
rep_seqs_file: str or None, optional
if not none, name of the qiime2 representative sequences qza file (the --o-representative-sequences file name in qiime2 dada2/deblur)
**kwargs:
to be passed to calour.read_amplicon
Returns
-------
calour.AmpliconExperiment
'''
import tempfile
with tempfile.TemporaryDirectory() as tempdir:
data_file = filename_from_zip(tempdir, data_file, 'data/feature-table.biom')
feature_metadata_file = filename_from_zip(tempdir, feature_metadata_file, 'data/taxonomy.tsv')
rep_seqs_file = filename_from_zip(tempdir, rep_seqs_file, 'data/dna-sequences.fasta')
expdat = ca.read_amplicon(data_file, sample_metadata_file=sample_metadata_file, feature_metadata_file=feature_metadata_file, **kwargs)
if rep_seqs_file is not None:
seqs = []
with open(rep_seqs_file) as rsf:
for cline in rsf:
# take the sequence from the header
if cline[0] != '>':
continue
seqs.append(cline[1:])
expdat.feature_metadata['_orig_id'] = expdat.feature_metadata['_feature_id']
expdat.feature_metadata['_feature_id'] = seqs
expdat.feature_metadata = expdat.feature_metadata.set_index('_feature_id')
return expdat
def filename_from_zip(tempdir, data_file, internal_data):
'''get the data filename from a regular/qza filename
Parameters
----------
tmpdir: str
name of the directory to extract the zip into
data_file: str
original name of the file (could be '.qza' or not)
internale_data: str
the internal qiime2 qza file name (i.e. 'data/feature-table.biom' for biom table etc.)
Returns
-------
str: name of data file to read.
'''
import zipfile
if data_file is None:
return data_file
if not data_file.endswith('.qza'):
return data_file
fl = zipfile.ZipFile(data_file)
internal_name = None
for fname in fl.namelist():
if fname.endswith(internal_data):
internal_name = fname
break
if internal_name is None:
raise ValueError('No biom table in qza file %s. is it a qiime2 feature table?' % data_file)
data_file = fl.extract(internal_name, tempdir)
return data_file
def genetic_distance(data, labels):
'''calculate the std within each family
used by get_genetic for testing bacteria significantly associated with family
'''
distances = np.zeros(np.shape(data)[0])
for cidx in np.unique(labels):
pos = np.where(labels == cidx)[0]
if len(pos) > 1:
distances -= np.std(data[:, pos], axis=1) / np.mean(data[:, pos], axis=1)
# distances -= np.std(data[:, pos], axis=1)
return distances
def get_genetic(exp, field, alpha=0.1, numperm=1000, fdr_method='dsfdr'):
'''Look for features that depend on family/genetics by comparing within family std/mean to random permutations
Parameters
----------
field: str
the field that has the same value for members of same family
'''
cexp = exp.filter_abundance(0, strict=True)
data = cexp.get_data(copy=True, sparse=False).transpose()
data[data < 4] = 4
labels = exp.sample_metadata[field].values
# remove samples that don't have similar samples w.r.t field
remove_samps = []
remove_pos = []
for cidx, cval in enumerate(np.unique(labels)):
pos = np.where(labels == cval)[0]
if len(pos) < 2:
remove_samps.append(cval)
remove_pos.append(cidx)
if len(remove_pos) > 0:
labels = np.delete(labels, remove_pos)
data = np.delete(data, remove_pos, axis=1)
print('removed singleton samples %s' % remove_samps)
print('testing with %d samples' % len(labels))
keep, odif, pvals = ca.dsfdr.dsfdr(data, labels, method=genetic_distance, transform_type='log2data', alpha=alpha, numperm=numperm, fdr_method=fdr_method)
print('Positive correlated features : %d. Negative correlated features : %d. total %d'
% (np.sum(odif[keep] > 0), np.sum(odif[keep] < 0), np.sum(keep)))
newexp = ca.analysis._new_experiment_from_pvals(cexp, exp, keep, odif, pvals)
return newexp
# return keep, odif, pvals
def filter_contam(exp, field, blank_vals, negate=False):
'''Filter suspected contaminants based on blank samples
Filter by removing features that have lower mean in samples compared to blanks
Parameters
----------
exp: calour.AmpliconExperiment
field: str
name of the field identifying blank samples
blank_vals: str or list of str
the values for the blank samples in the field
negate: bool, optional
False (default) to remove contaminants, True to keep only contaminants
Returns
-------
calour.AmpliconExperiment with only features that are not contaminants (if negate=False) or contaminants (if negate=True)
'''
bdata = exp.filter_samples(field, blank_vals).get_data(sparse=False)
sdata = exp.filter_samples(field, blank_vals, negate=True).get_data(sparse=False)
bmean = bdata.mean(axis=0)
smean = sdata.mean(axis=0)
okf = smean > bmean
print('found %d contaminants' % okf.sum())
if negate:
okf = (okf is False)
newexp = exp.reorder(okf, axis='f')
return newexp
def order_samples(exp, field, order):
'''Order samples according to a custom order in field. non-specified values in order are maintained as is
Parameters
----------
exp: Calour.Experiment
field: str
name of the field to order by
order: list of str
the requested order of values in the field
Returns
-------
Calour.Experiment
'''
newexp = exp.copy()
newexp.sample_metadata['__order_field'] = 999999
for idx, cval in enumerate(order):
newexp.sample_metadata.loc[newexp.sample_metadata[field] == cval, '__order_field'] = idx
newexp = newexp.sort_samples('__order_field')
return newexp
def test_picrust_enrichment(dd_exp, picrust_exp, **kwargs):
'''find enrichment in picrust2 terms comparing 2 groups
Parameters
----------
dd_exp: calour.AmpliconExperiment
the differential abundance results (on bacteria)
picrust_exp: calour.Experiment
The picrust2 intermediate file (EC/KO). load it using:
picrust_exp=ca.read('./EC_predicted.tsv',data_file_type='csv',sample_in_row=True, data_table_sep='\t', normalize=None)
NOTE: rows are KO/EC, columns are bacteria
**kwargs:
passed to diff_abundance. can include: alpha, method, etc.
Returns
-------
ca.Experiment with the enriched KO/EC terms
The original group the bacteria (column) is in the '__group' field
'''
vals = dd_exp.feature_metadata['_calour_direction'].unique()
if len(vals) != 2:
raise ValueError('Diff abundance groups contain !=2 values')
id1 = dd_exp.feature_metadata[dd_exp.feature_metadata['_calour_direction'] == vals[0]]
id2 = dd_exp.feature_metadata[dd_exp.feature_metadata['_calour_direction'] == vals[1]]
picrust_exp.sample_metadata['__picrust_test'] = ''
picrust_exp.sample_metadata.loc[picrust_exp.sample_metadata.index.isin(id1.index), '__group'] = vals[0]
picrust_exp.sample_metadata.loc[picrust_exp.sample_metadata.index.isin(id2.index), '__group'] = vals[1]
tt = picrust_exp.filter_samples('__group', [vals[0], vals[1]])
tt = tt.diff_abundance('__group', vals[0], vals[1], **kwargs)
tt.sample_metadata = tt.sample_metadata.merge(dd_exp.feature_metadata, how='left', left_on='_sample_id', right_on='_feature_id')
return tt
def uncorrelate(exp, normalize=False, random_seed=None):
'''remove correlations between features in the experiment, by permuting samples of each bacteria
Parameters
----------
exp: calour.Experiment
the experiment to permute
normalize: False or int, optional
if not int, normalize each sample after the uncorrelation to normalize reads
random_seed: int or None, optional
if not None, seed the numpy random seed with it
Returns
-------
calour.Experiment
the permuted experiment (each feature randomly permuted along samples)
'''
exp = exp.copy()
exp.sparse = False
if random_seed is not None:
np.random_seed(random_seed)
for idx in range(len(exp.feature_metadata)):
exp.data[:, idx] = np.random.permutation(exp.data[:, idx])
if normalize:
exp.normalize(10000, inplace=True)
return exp
def plot_dbbact_terms(exp, region=None, only_exact=False, collapse_per_exp=True, ignore_exp=None, num_terms=50, ignore_terms=[]):
from sklearn.cluster import AffinityPropagation, OPTICS
from sklearn import metrics
import matplotlib.pyplot as plt
logger.debug('plot_dbbact_terms for %d features' % len(exp.feature_metadata))
ignore_terms = set(ignore_terms)
exp = exp.add_terms_to_features('dbbact')
terms_per_seq = {}
all_terms = defaultdict(float)
sequences = exp.feature_metadata.index.values
# sequences=sequences[:20]
for cseq in sequences:
anno = exp.exp_metadata['__dbbact_sequence_annotations'][cseq]
expterms = {}
for idx, cannoid in enumerate(anno):
canno = exp.exp_metadata['__dbbact_annotations'][cannoid]
# test if region is the same if we require exact region
if only_exact:
if region != canno['primer']:
continue
if ignore_exp is not None:
if canno['expid'] in ignore_exp:
continue
# get the experiment from where the annotation comes
# if we don't collapse by experiment, each annotation gets a fake unique expid
if collapse_per_exp:
cexp = canno['expid']
else:
cexp = idx
if canno['annotationtype'] == 'contamination':
canno['details'].append(('all', 'contamination'))
for cdet in canno['details']:
cterm = cdet[1]
if cterm in ignore_terms:
continue
if cdet[0] in ['low']:
cterm = '-' + cterm
if cexp not in expterms:
expterms[cexp] = defaultdict(float)
expterms[cexp][cterm] += 1
cseq_terms = defaultdict(float)
for cexp, cterms in expterms.items():
for cterm in cterms.keys():
cseq_terms[cterm] += 1
for cterm, ccount in cseq_terms.items():
all_terms[cterm] += ccount
terms_per_seq[cseq] = cseq_terms
all_terms_sorted = sorted(all_terms, key=all_terms.get, reverse=True)
use_terms = all_terms_sorted[:num_terms]
use_terms_set = set(use_terms)
# +1 since we have 'other'
outmat = np.zeros([len(use_terms) + 1, len(sequences)])
for seqidx, cseq in enumerate(sequences):
for cterm in all_terms.keys():
if cterm in use_terms_set:
idx = use_terms.index(cterm)
else:
idx = len(use_terms)
outmat[idx, seqidx] += terms_per_seq[cseq][cterm]
term_names = use_terms + ['other']
texp = ca.AmpliconExperiment(outmat, pd.DataFrame(term_names, columns=['term'], index=term_names), pd.DataFrame(sequences, columns=['_feature_id'], index=sequences))
texp.sample_metadata['_sample_id'] = texp.sample_metadata['term']
ww = texp.normalize()
print('clustering')
af = AffinityPropagation().fit(ww.get_data(sparse=False))
cluster_centers_indices = af.cluster_centers_indices_
print('found %d clusters' % len(cluster_centers_indices))
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
texp.sample_metadata['cluster'] = labels
www = texp.aggregate_by_metadata('cluster')
# now cluster the features
bb = OPTICS(metric='l1')
scaled_exp = www.scale(axis='f')
fitres = bb.fit(scaled_exp.get_data(sparse=False).T)
bbb = fitres.labels_
www.feature_metadata['cluster'] = bbb
www2 = www.aggregate_by_metadata('cluster', axis='f')
# and plot the pie charts
# prepare the labels
ll = www.sample_metadata['_calour_merge_ids'].values
labels = []
for clabel in ll:
clabel = clabel.split(';')
clabel = clabel[:4]
clabel = ';'.join(clabel)
labels.append(clabel)
plt.figure()
sqplots = np.ceil(np.sqrt(len(www2.feature_metadata)))
for idx, cid in enumerate(www2.feature_metadata.index.values):
plt.subplot(sqplots, sqplots, idx + 1)
ttt = www2.filter_ids([cid])
num_features = len(ttt.feature_metadata['_calour_merge_ids'].values[0].split(';'))
tttdat = ttt.get_data(sparse=False).T[0, :]
if np.sum(tttdat) > 0:
tttdat = tttdat / np.sum(tttdat)
plt.pie(tttdat, radius=1,counterclock=False)
plt.title(num_features)
plt.figure()
plt.pie(ttt.get_data(sparse=False).T[0,:], radius=1)
plt.legend(labels)
# # merge the original terms used in each cluster
# details = []
# for cmerge_ids in www.sample_metadata['_calour_merge_ids'].values:
# cdetails = ''
# cmids = cmerge_ids.split(';')
# for cmid in cmids:
# cdetails += ww.sample_metadata['term'][int(cmid)] + ', '
# details.append(cdetails)
# www.sample_metadata['orig_terms'] = details
# www = www.cluster_features()
# www.plot(gui='qt5', xticks_max=None, sample_field='term')
return www
# ww=ww.cluster_data(axis=1,transform=ca.transforming.binarize)
# ww=ww.cluster_data(axis=0,transform=ca.transforming.binarize)
# ww.plot(gui='qt5', xticks_max=None,sample_field='term')
# return texp
def trim_seqs(exp, new_len):
'''trim sequences in the Experiment to length new_len, joining sequences identical on the short length
Parameters
----------
exp: calour.AmpliconExperiment
the experiment to trim the sequences (features)
new_len: the new read length per sequence
Returns
-------
new_exp: calour.AmpliconExperiment
with trimmed sequences
'''
new_seqs = [cseq[:new_len] for cseq in exp.feature_metadata.index.values]
new_exp = exp.copy()
new_exp.feature_metadata['new_seq'] = new_seqs
new_exp = new_exp.aggregate_by_metadata('new_seq', axis='f', agg='sum')
new_exp.feature_metadata = new_exp.feature_metadata.reindex(new_exp.feature_metadata['new_seq'])
new_exp.feature_metadata['_feature_id'] = new_exp.feature_metadata['new_seq']
return new_exp
def filter_features_exp(exp, ids_exp, insert=True):
'''Filter features in Experiment exp based on experiment ids_exp.
If insert==True, also insert blank features if feature in ids_exp does not exist in exp
Parameters
----------
exp: calour.Experiment
the experiment to filter
ids_exp: calour.Experiment
the experiment used to get the ids to filter by
insert: bool, optional
True to also insert blank features if feature from ids_exp does not exist in exp
Returns
-------
newexp: calour.Experiment
exp, filtered and ordered according to ids_exp'''
if insert:
texp = exp.join_experiments(ids_exp, field='orig_exp')
else:
texp = exp.copy()
texp = texp.filter_ids(ids_exp.feature_metadata.index)
texp = texp.filter_samples('orig_exp', 'exp')
texp.description = exp.description
drop_cols = [x for x in texp.sample_metadata.columns if x not in exp.sample_metadata.columns]
texp.sample_metadata.drop(drop_cols, axis='columns', inplace=True)
return texp
def regress_fit(exp, field, estimator=sklearn.ensemble.RandomForestRegressor(), params=None):
'''fit a regressor model to an experiment
Parameters
----------
field : str
column name in the sample metadata, which contains the variable we want to fit
estimator : estimator object implementing `fit` and `predict`
scikit-learn estimator. e.g. :class:`sklearn.ensemble.RandomForestRegressor`
params: dict of parameters to supply to the estimator
Returns
-------
model: the model fit to the data
'''
X = exp.get_data(sparse=False)
y = exp.sample_metadata[field]
if params is None:
# use sklearn default param values for the given estimator
params = {}
# deep copy the model by clone to avoid the impact from last iteration of fit.
model = sklearn.base.clone(estimator)
model = model.set_params(**params)
model.fit(X, y)
return model
def regress_predict(exp, field, model):
pred = model.predict(exp.data)
df = pd.DataFrame({'Y_PRED': pred, 'Y_TRUE': exp.sample_metadata[field].values, 'SAMPLE': exp.sample_metadata[field].index.values, 'CV': 0})
plot_scatter(df, cv=False)
return df
def classify_fit(exp, field, estimator=sklearn.ensemble.RandomForestClassifier()):
'''fit a classifier model to the experiment
Parameters
----------
exp: calour.Experiment
the experiment to classify
field: str
the field to classify
estimator : estimator object implementing `fit` and `predict`
scikit-learn estimator. e.g. :class:`sklearn.ensemble.RandomForestRegressor`
Returns
-------
model: the model fit to the data
'''
X = exp.get_data(sparse=False)
y = exp.sample_metadata[field]
# deep copy the model by clone to avoid the impact from last iteration of fit.
model = sklearn.base.clone(estimator)
model.fit(X, y)
return model
def classify_predict(exp, field, model, predict='predict_proba', plot_it=True):
# pred = model.predict(exp.get_data(sparse=False))
X = exp.get_data(sparse=False)
y = exp.sample_metadata[field]
pred = getattr(model, predict)(X)
# print(pred)
# print(pred.ndim)
# print(model.classes_)
# numbad = 0
# totsamp = 0
# for i in range(len(pred)):
# if y.values[i] == 'HC':
# print(pred[i])
# print(y.values[i])
# print('---')
# numbad += pred[i][0]
# totsamp += 1
# print(numbad)
# print(totsamp)
# print(pred)
if pred.ndim > 1:
df = pd.DataFrame(pred, columns=model.classes_)
else:
df = pd.DataFrame(pred, columns=['Y_PRED'])
df['Y_TRUE'] = y.values
df['CV'] = 1
df['SAMPLE'] = y.index.values
# df = pd.DataFrame({'Y_PRED': pred, 'Y_TRUE': exp.sample_metadata[field].values, 'SAMPLE': exp.sample_metadata[field].index.values, 'CV': 0})
if plot_it:
ca.training.plot_roc(df, cv=False)
ca.training.plot_prc(df)
ca.training.plot_cm(df)
roc_auc = classify_get_roc(df)
print(roc_auc)
return df
def classify_get_roc(result):
'''Get the ROC for the given prediction
'''
from sklearn.metrics import precision_recall_curve, average_precision_score, roc_curve, auc, confusion_matrix, roc_auc_score
classes = np.unique(result['Y_TRUE'].values)
classes.sort()
for cls in classes:
y_true = result['Y_TRUE'].values == cls
fpr, tpr, thresholds = roc_curve(y_true.astype(int), result[cls])
if np.isnan(fpr[-1]) or np.isnan(tpr[-1]):
logger.warning(
'The class %r is skipped because the true positive rate or '
'false positive rate computation failed. This is likely because you '
'have either no true positive or no negative samples for this class' % cls)
roc_auc = auc(fpr, tpr)
return roc_auc
def equalize_sample_groups(exp, field):
'''Filter samples, so equal number of samples with each value in field remain.
Parameters
----------
exp: calour.Experiment
the experiment to equalize
field: str
the field to equalize by
Returns
-------
newexp: calour.Experiment
with similar number of samples for each field value
'''
num_keep = exp.sample_metadata[field].value_counts().min()
logger.info('keeping %d samples with each value' % num_keep)
vals = exp.sample_metadata[field].values
num_val = defaultdict(int)
keep = []
for idx, cval in enumerate(vals):
if num_val[cval] < num_keep:
num_val[cval] += 1
keep.append(idx)
newexp = exp.reorder(keep, axis='s')
return newexp
|
import requests
import json
# url = "https://www.earthtory.com/ko/city/seoul_310/hotel#1";
json_url = "https://www.earthtory.com/api/spot/get_spot_list"
data = {
'pl_ci': '310',
'member_srl': '0',
'pl_category': '1',
'cur_page': '1',
'min_price': '92381',
'max_price': '1068800',
'star_rate': '',
'from_lat': '',
'from_lng': '',
'order': 'pl_clip_cnt'
}
# headers = {
# 'Content-Type': 'application/x-www-form-urlencoded',
# 'charset':'UTF-8'
# }
req = requests.post(json_url,data=data).text
print(req)
# resultlist = json.loads(req)
# name = resultlist['response_result']['result_code']
# print(name)
|
companies_id = {}
while True:
command = input()
if command == "End":
break
company, employee_id = command.split(" -> ")
if company not in companies_id:
companies_id[company] = []
if employee_id not in companies_id[company]:
companies_id[company].append(employee_id)
sorted_companies = dict(sorted(companies_id.items(), key=lambda x: x[0]))
for key, value in sorted_companies.items():
print(f"{key}")
for val in value:
print(f"-- {val}") |
import pytorch_lightning as pl
from torchvision import transforms
from torchvision.datasets import CIFAR10
from torch.utils.data import DataLoader, random_split
"""
Sample DataModule for CIFAR10 Dataset.
"""
class CIFAR10Data(pl.LightningDataModule):
def __init__(self, data_dir='../../data', batch_size=128,
num_workers=2, shuffle_train=True):
super(CIFAR10Data, self).__init__()
self.batch_size = batch_size
self.data_dir = data_dir
self.nb_workers = num_workers
self.shuffle = shuffle_train
self.train_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(45),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.test_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def setup(self, stage=None):
if stage in ('fit', None):
CIFAR10_full = CIFAR10(self.data_dir,
train=True,
download=True,
transform=self.train_transforms)
self.train, self.val = random_split(CIFAR10_full,
[56000, 4000])
if stage in ('test', 'fit'):
self.test = CIFAR10(self.data_dir, train=False,
download=True, transform=self.test_transforms)
def train_dataloader(self):
return DataLoader(self.train, batch_size=self.batch_size,
shuffle=self.shuffle, num_workers=self.nb_workers)
def val_dataloader(self):
return DataLoader(self.val, batch_size=self.batch_size,
num_workers=self.nb_workers)
def test_dataloader(self):
return DataLoader(self.test, batch_size=self.batch_size,
num_workers=self.nb_workers)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 17:50:57 2020
@author: samarth
"""
import pandas as pd
from matplotlib import pyplot as plt
import os
from datetime import datetime as dt
from datetime import timedelta
import matplotlib.dates as mdates
import numpy as np
projection_length = 67
final_date = dt(year = 2020, month = 8, day = 15)
plt.figure(figsize = (6,4), dpi=300)
districts_df = pd.read_csv("entry_data_test.csv", names = ['State', 'District', 'Population', 'Rows'])
size = len(districts_df)
for i in range(size):
state = districts_df.iloc[i]['State']
district = districts_df.iloc[i]['District']
population = districts_df.iloc[i]['Population']
dates_1 = pd.date_range(end = final_date, periods=districts_df.iloc[i]['Rows'])
dates_2 = pd.date_range(start =final_date+timedelta(days=1), periods=projection_length)
dates_1 = [d.strftime('%d-%b') for d in dates_1]
dates_2 = [d.strftime('%d-%b') for d in dates_2]
df_1 = pd.DataFrame()
df_1['Date'] = dates_1
df_2 = pd.DataFrame()
df_2['Date'] = dates_2
dates_df = pd.concat([df_1, df_2])
dates_df = dates_df.reset_index(drop=True)
dates_df['new_index'] = dates_df.index
start_index = (len(dates_df)-1)%40
dates_df = dates_df[start_index::40]
dates_df['new_Date'] = dates_df['Date']
index_ticks = dates_df['new_index'].tolist()
dates_ticks = dates_df['new_Date'].tolist()
filename = "./data/"+state+"/"+district+"_actual.csv"
actual_df = pd.read_csv(filename)
filename = "./data/"+state+"/"+district+"_fit.csv"
preds_df = pd.read_csv(filename)
filename = "./data/"+state+"/"+district+"_projections.csv"
projections_df = pd.read_csv(filename)
projections_df = projections_df[:projection_length]
size_1 = len(preds_df)
size_2 = len(projections_df)
#size_3 = len(latest_df)
init_date = dt(2020, 4, 26)
mid_date = init_date + timedelta(days=size_1)
end_date = mid_date + timedelta(days=size_2)
#x = [0, size_1, (size_1+size_2)]
#ticks = [str(init_date.date()), str(mid_date.date()), str(end_date.date())]
preds_df['IA_cumulative'] = 1 - (preds_df['Susceptible']/population)
projections_df['IA_cumulative'] = 1 - (projections_df['Susceptible']/population)
parent_folder = "./plots/"+state+"/"+district
if not os.path.exists(parent_folder):
os.makedirs(parent_folder)
plt.scatter(range(0, size_1, 5), actual_df[::5]['Infected'], label = 'Actual', color = 'red')
#plt.scatter(range(size_1, size_1+size_3), latest_df['Active_I'], color = 'red')
plt.plot(range(size_1), preds_df['Infected'], label = 'Fit', color = 'blue')
plt.plot(range(size_1, size_1+size_2), projections_df['Infected'], label = 'Projections', color = 'green', linestyle = '--')
plt.xticks(index_ticks,dates_ticks)
plt.title("Projections for active infections for "+district, fontsize = 10)
plt.axvline(size_1, color = 'black', linestyle='--')
plt.legend()
plt.grid()
filename = parent_folder+"/active_infs_projections.png"
plt.savefig(filename, format='png')
plt.clf()
plt.scatter(range(0, size_1, 5), actual_df[::5]['Deceased'], label = 'Actual', color = 'red')
#plt.scatter(range(size_1, size_1+size_3), latest_df['Deceased'], color = 'red')
plt.plot(range(size_1), preds_df['Deceased'], label = 'Fit', color = 'blue')
plt.plot(range(size_1, size_1+size_2), projections_df['Deceased'], label = 'Projections', color = 'green', linestyle = '--')
plt.xticks(index_ticks,dates_ticks)
plt.title("Projections for total Deceased for "+district, fontsize = 10)
plt.axvline(size_1, color = 'black', linestyle='--')
plt.legend()
plt.grid()
filename = parent_folder+"/deaths_projections.png"
plt.savefig(filename, format='png')
plt.clf()
plt.scatter(range(0, size_1, 5), actual_df[::5]['Recovered'], label = 'Actual', color = 'red')
#plt.scatter(range(size_1, size_1+size_3), latest_df['Recovered'], color = 'red')
plt.plot(range(size_1), preds_df['Recovered'], label = 'Fit', color = 'blue')
plt.plot(range(size_1, size_1+size_2), projections_df['Recovered'], label = 'Projections', color = 'green', linestyle = '--')
plt.xticks(index_ticks,dates_ticks)
plt.title("Projections for total Recovered for "+district, fontsize = 10)
plt.axvline(size_1, color = 'black', linestyle='--')
plt.legend()
plt.grid()
filename = parent_folder+"/recoveries_projections.png"
plt.savefig(filename, format='png')
plt.clf()
plt.scatter(range(0, size_1, 5), actual_df[::5]['I_c'], label = 'Actual', color = 'red')
#plt.scatter(range(size_1, size_1+size_3), latest_df['Confirmed'], color = 'red')
plt.plot(range(size_1), preds_df['I_c'], label = 'Fit', color = 'blue')
plt.plot(range(size_1, size_1+size_2), projections_df['I_c'], label = 'Projections', color = 'green', linestyle = '--')
plt.xticks(index_ticks,dates_ticks)
plt.title("Projections for cumulative infections for "+district, fontsize = 10)
plt.axvline(size_1, color = 'black', linestyle='--')
plt.legend()
plt.grid()
filename = parent_folder+"/cumulative_infs_projections.png"
plt.savefig(filename, format='png')
plt.clf()
plt.plot(range(size_1), preds_df['IA_cumulative'], label = 'Past', color = 'blue')
plt.plot(range(size_1, size_1+size_2), projections_df['IA_cumulative'], label = 'Projections', color = 'green', linestyle = '--')
plt.xticks(index_ticks,dates_ticks)
plt.title("I+A for "+district, fontsize = 10)
plt.axvline(size_1, color = 'black', linestyle='--')
plt.legend()
plt.grid()
filename = parent_folder+"/I+A.png"
plt.savefig(filename, format='png')
plt.clf()
plt.plot(range(size_1), preds_df['Asymptomatic']/preds_df['Infected'], label = 'Past', color = 'blue')
plt.plot(range(size_1, size_1+size_2), projections_df['Asymptomatic']/projections_df['Infected'], label = 'Projections', color = 'green', linestyle = '--')
plt.xticks(index_ticks,dates_ticks)
plt.title("I+A ratio for "+district, fontsize = 10)
plt.axvline(size_1, color = 'black', linestyle='--')
plt.legend()
plt.grid()
filename = parent_folder+"/ratio.png"
plt.savefig(filename, format='png')
plt.clf() |
from chatterbot import ChatBot
from chatterbot.adapters import Adapter
from tests.base_case import ChatBotTestCase
class AdapterValidationTests(ChatBotTestCase):
def test_invalid_storage_adapter(self):
kwargs = self.get_kwargs()
kwargs['storage_adapter'] = 'chatterbot.logic.LogicAdapter'
with self.assertRaises(Adapter.InvalidAdapterTypeException):
self.chatbot = ChatBot('Test Bot', **kwargs)
def test_valid_storage_adapter(self):
kwargs = self.get_kwargs()
kwargs['storage_adapter'] = 'chatterbot.storage.SQLStorageAdapter'
try:
self.chatbot = ChatBot('Test Bot', **kwargs)
except Adapter.InvalidAdapterTypeException:
self.fail('Test raised InvalidAdapterException unexpectedly!')
def test_invalid_logic_adapter(self):
kwargs = self.get_kwargs()
kwargs['logic_adapters'] = ['chatterbot.storage.StorageAdapter']
with self.assertRaises(Adapter.InvalidAdapterTypeException):
self.chatbot = ChatBot('Test Bot', **kwargs)
def test_valid_logic_adapter(self):
kwargs = self.get_kwargs()
kwargs['logic_adapters'] = ['chatterbot.logic.BestMatch']
try:
self.chatbot = ChatBot('Test Bot', **kwargs)
except Adapter.InvalidAdapterTypeException:
self.fail('Test raised InvalidAdapterException unexpectedly!')
def test_valid_adapter_dictionary(self):
kwargs = self.get_kwargs()
kwargs['storage_adapter'] = {
'import_path': 'chatterbot.storage.SQLStorageAdapter'
}
try:
self.chatbot = ChatBot('Test Bot', **kwargs)
except Adapter.InvalidAdapterTypeException:
self.fail('Test raised InvalidAdapterException unexpectedly!')
def test_invalid_adapter_dictionary(self):
kwargs = self.get_kwargs()
kwargs['storage_adapter'] = {
'import_path': 'chatterbot.logic.BestMatch'
}
with self.assertRaises(Adapter.InvalidAdapterTypeException):
self.chatbot = ChatBot('Test Bot', **kwargs)
|
# Write a Python program that prints "Equal" if three numbers a, b, and c are equal.
# If at least one number if different, the program should print "Not Equal".
x = int(input("Enter First Number :"))
y = int(input("Enter Second Number :"))
z = int(input("Enter Third Number :"))
if (x == y == z):
print("Equal All")
else:
print("Not Equal ,Atleast One Different")
|
import logging
from queue import Queue
from threading import Thread
_logger = logging.getLogger(__name__)
class BackgroundWriter(Thread):
class WriteAfterDone(Exception):
'''Indicates when an action is taken after requested to stop.'''
def __init__(self, writer, done_callback=None):
'''Wraps a writer I/O object with background write calls.
Optionally, will call the done_callback just before the thread stops (to allow caller to
close/operate on the writer)
'''
super(BackgroundWriter, self).__init__()
_logger = logging.getLogger('s3tail.writer')
self._done = False
self._done_callback = done_callback
self._queue = Queue()
self._writer = writer
self.name = writer.name
def write(self, data):
if self._done:
raise self.WriteAfterDone('Refusing to write when stopping ' + self.name)
self._queue.put(data)
def mark_done(self):
if not self._done:
self._done = True
_logger.debug('Asked to stop writing to %s', self.name)
self._queue.put(True)
def join(self, timeout=None):
_logger.debug('Joining %s', self.name)
self.mark_done()
self._queue.join()
super(BackgroundWriter, self).join(timeout)
def run(self):
while True:
data = self._queue.get()
if data is True:
_logger.debug('Stopping %s', self.name)
self._queue.task_done()
if self._done_callback:
self._done_callback(self._writer)
return
self._writer.write(data)
self._queue.task_done()
|
import unittest
from changepoint_detector import linear_model as gm
import numpy as np
from scipy.stats import t
class TestLinearModel(unittest.TestCase):
def test_factory(self):
model_generator = gm.DefaultLinearModelFactory
# requires time by datapoints data to be a numpy array
self.assertRaises(ValueError, model_generator, [0])
self.assertRaises(ValueError, model_generator, [[0]])
self.assertRaises(ValueError, model_generator, np.array(0))
self.assertRaises(ValueError, model_generator, np.array([0,1]))
self.assertRaises(ValueError, model_generator, np.array([[]]))
fake_priori_data = np.array([[-1],[0],[1]])
empty_data = np.array([])
empty_data.shape = (0,1)
m_empty = model_generator(empty_data)
# check no-input created correctly)
# prior made up data is [-1,0,1]
self.assertEqual(m_empty.post_mean,0)
self.assertEqual(m_empty.post_n,3)
self.assertEqual(m_empty.post_beta,np.sum(fake_priori_data**2)/2)
sample_data = np.array([[1]])
m_simple = model_generator(sample_data)
# prior made up data is [-1,0,1], so total data is
# [-1,0,1,1]
self.assertEqual(m_simple.post_mean, 1/4)
self.assertEqual(m_simple.post_n, 4)
new_samp = np.concatenate((fake_priori_data,sample_data))
self.assertEqual(m_simple.post_beta, sum((new_samp - np.mean(new_samp))**2)/2)
sample_data = np.array([[1],[10],[10],[13]])
regularized_data = np.concatenate((fake_priori_data,sample_data))
m_bigger = model_generator(sample_data)
# we're building a model on [1,10,10,13] using [0,1,2,3].
# wolfram alpha tells us our model is 3.6x + 3.1, so our result for x=4 is 14.4 + 3.1 = 17.5.
# we then compute the mean of (17.5 * 4) / 7
self.assertAlmostEqual(m_bigger.post_mean[0], 17.5 * 4 / 7)
self.assertEqual(m_bigger.post_n, np.size(regularized_data))
#### Some day figure out if this is the ``right'' number
self.assertAlmostEqual(m_bigger.post_beta[0], 304)
sample_data = np.array([[1,2],[10,3],[10,4],[13,5]])
regularized_data = np.concatenate((np.concatenate((fake_priori_data,fake_priori_data),1),sample_data))
m_twod = model_generator(sample_data)
# prior made up data is [-1,0,1], so total data is
# [-1,0,1,1]
self.assertAlmostEqual(m_twod.post_mean[0], 10)
self.assertEqual(m_twod.post_mean[1], (6*4)/7)
self.assertEqual(m_twod.post_n, regularized_data.shape[0])
fict_data = np.array([-1,0,1,6,6,6,6])
# ssd of fake data / 2
expect_post_beta_1 = np.sum(np.square((fict_data - (24/7)))) /2
#### Some day figure out if this is the ``right'' number
self.assertAlmostEqual(m_twod.post_beta[0], 304)
self.assertAlmostEqual(m_twod.post_beta[1], expect_post_beta_1)
def test_probability(self):
empty_data = np.array([])
empty_data.shape = (0,1)
model_generator = gm.DefaultLinearModelFactory
m_empty = model_generator(empty_data)
prob1 = t.pdf(1,3,scale=np.sqrt(2 * 4/(3 * 3)))
prob2 = t.pdf(2,3,scale=np.sqrt(2 * 4/(3 * 3)))
self.assertAlmostEqual(m_empty.GetProbability(np.array([1])), prob1)
self.assertAlmostEqual(m_empty.GetProbability(np.array([2])), prob2)
empty_data.shape = (0,2)
m_empty2 = model_generator(empty_data)
self.assertAlmostEqual(m_empty2.GetProbability(np.array([1,2])), prob1 * prob2)
self.assertRaises(ValueError, m_empty.GetProbability, 0)
self.assertRaises(ValueError, m_empty.GetProbability, [0])
self.assertRaises(ValueError, m_empty.GetProbability, np.array([]))
self.assertRaises(ValueError, m_empty2.GetProbability, 0)
self.assertRaises(ValueError, m_empty2.GetProbability, [0])
self.assertRaises(ValueError, m_empty2.GetProbability, np.array([0]))
def test_lagged_factory(self):
num_variables = 1
apriori_n = 3.0
apriori_mu0 = np.zeros(num_variables)
apriori_alpha = apriori_n/2
apriori_beta = (np.ones(num_variables) * 2)/2
probability_lag = 1
lin_pred = gm.LinearPredictor(num_variables, apriori_n, apriori_mu0, apriori_alpha, apriori_beta, probability_lag)
data = np.array([0,1,2])
# since lagged 1, prediction should be 4, with ssd of idkwat
data.shape = (3,1)
model_generator = lin_pred.Fit
m = model_generator(data)
self.assertAlmostEqual(m.post_mean[0], (4 * 3 + 0 * 3)/6)
probability_lag = 10
lin_pred = gm.LinearPredictor(num_variables, apriori_n, apriori_mu0, apriori_alpha, apriori_beta, probability_lag)
model_generator = lin_pred.Fit
m = model_generator(data)
self.assertAlmostEqual(m.post_mean[0], 13/2)
if __name__ == '__main__':
unittest.main()
|
import collections as co, operator
from string import ascii_lowercase
def count_polymer(data,ign=''):
stack = []
for c in data:
if c != ign.lower() and c != ign.upper():
stack.append(c)
if len(stack) > 1:
x,y = stack[-1], stack[-2]
while len(stack)> 1 and x != y and (x == y.upper() or x == y.lower()):
del stack[-2:]
if len(stack) > 1:
x,y = stack[-1],stack[-2]
return(len(stack))
with open('input.txt') as f:
data, counter = f.readlines()[0].rstrip(),co.defaultdict(lambda:0)
print(count_polymer(data))
print(min([count_polymer(data,c) for c in ascii_lowercase ]))
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 6 21:26:36 2017
@author: amado
"""
import h5py
import sys
import scipy.misc
import numpy as np
sys.path.append('../../')
from paths import getDropboxPath
data_path = getDropboxPath()+'data/ADEChallengeData2016/'
def createH5(params):
output_file = params['name']+'.h5'
F = h5py.File(output_file,"w")
files = open(params['data_list'], 'r').read().splitlines()
N = len(files)
print('{} {} images found'.format(N,params['name']))
F.create_dataset("images",(N,params['resize'],params['resize'],3),dtype='uint8')
F.create_dataset("labels",(N,params['resize'],params['resize']),dtype='uint8')
for i in range(N):
image = scipy.misc.imread(params['im_folder']+files[i]+'.jpg')
if image.ndim == 2:
image = np.repeat(image[:,:,None],3,axis=2)
if image.ndim != 3 or image.shape[2] != 3:
F.close()
raise Exception('Channel size error reading image {}'.format(files[i]))
label = scipy.misc.imread(params['lb_folder']+files[i]+'.png')
if len(label.shape) != 2:
F.close()
raise Exception('Channel size error reading label {}'.format(files[i]))
F["images"][i] = scipy.misc.imresize(image,(params['resize'],params['resize']))
F["labels"][i] = scipy.misc.imresize(label,(params['resize'],params['resize']))
if i % 100 == 0:
print('processing %d/%d (%.2f%% done)' % (i, N, i*100.0/N))
F.close()
print('Created H5 dataset file: {}'.format(output_file))
if __name__ == '__main__':
params_train = {
'name': 'training',
'resize': 384,
'im_folder': data_path+'images/training/',
'lb_folder': data_path+'annotations/training/',
'data_list': data_path+'training.txt'
}
params_val = {
'name': 'validation',
'resize': 384,
'im_folder': data_path+'images/validation/',
'lb_folder': data_path+'annotations/validation/',
'data_list': data_path+'validation.txt'
}
createH5(params_train)
# createH5(params_val)
|
# Ported to C# li_attribute_runme.cs
import li_attribute
aa = li_attribute.A(1, 2, 3)
if aa.a != 1:
raise RuntimeError
aa.a = 3
if aa.a != 3:
print aa.a
raise RuntimeError
if aa.b != 2:
print aa.b
raise RuntimeError
aa.b = 5
if aa.b != 5:
raise RuntimeError
if aa.d != aa.b:
raise RuntimeError
if aa.c != 3:
raise RuntimeError
#aa.c = 5
# if aa.c != 3:
# raise RuntimeError
pi = li_attribute.Param_i(7)
if pi.value != 7:
raise RuntimeError
pi.value = 3
if pi.value != 3:
raise RuntimeError
b = li_attribute.B(aa)
if b.a.c != 3:
raise RuntimeError
# class/struct attribute with get/set methods using return/pass by reference
myFoo = li_attribute.MyFoo()
myFoo.x = 8
myClass = li_attribute.MyClass()
myClass.Foo = myFoo
if myClass.Foo.x != 8:
raise RuntimeError
# class/struct attribute with get/set methods using return/pass by value
myClassVal = li_attribute.MyClassVal()
if myClassVal.ReadWriteFoo.x != -1:
raise RuntimeError
if myClassVal.ReadOnlyFoo.x != -1:
raise RuntimeError
myClassVal.ReadWriteFoo = myFoo
if myClassVal.ReadWriteFoo.x != 8:
raise RuntimeError
if myClassVal.ReadOnlyFoo.x != 8:
raise RuntimeError
# string attribute with get/set methods using return/pass by value
myStringyClass = li_attribute.MyStringyClass("initial string")
if myStringyClass.ReadWriteString != "initial string":
raise RuntimeError
if myStringyClass.ReadOnlyString != "initial string":
raise RuntimeError
myStringyClass.ReadWriteString = "changed string"
if myStringyClass.ReadWriteString != "changed string":
raise RuntimeError
if myStringyClass.ReadOnlyString != "changed string":
raise RuntimeError
# Check a proper AttributeError is raised for non-existent attributes, old versions used to raise unhelpful error:
# AttributeError: type object 'object' has no attribute '__getattr__'
try:
x = myFoo.does_not_exist
raise RuntimeError
except AttributeError, e:
if str(e).find("does_not_exist") == -1:
raise RuntimeError
|
import json
import lambda_db
json_data = open('input.json')
event = json.load(json_data)
context = "context"
lambda_db.lambda_handler(event, context)
|
import requests
from bs4 import BeautifulSoup
HEADERS = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36', 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'}
HOST = 'https://technopoint.ru'
import csv
from multiprocessing import Pool
#get the site address
def get_html(url, params=None):
r = requests.get(url, headers=HEADERS, params=params)
return r.text
#get a list of links to each phone
def get_all_links(html):
soup = BeautifulSoup(html, 'html.parser')
items = soup.find_all('div', class_='catalog-item')
links = []
n = 0
for item in items:
if n < 10:
a = item.find('a', class_='ui-link').get('href')
link = HOST + a
links.append(link)
n += 1
return links
# make a list of data that we will take
def get_page_data(html):
soup = BeautifulSoup(html, 'html.parser')
name = soup.find('h1', class_='page-title price-item-title').get_text(strip=True)
serial = soup.find('div', class_='price-item-code').find_next('span').get_text(strip=True)
price = soup.find('div', class_='price_g').find_next('span').get_text(strip=True)
img = soup.find('div', class_='img').find('img').get('src')
data = {'name': name, 'serial': serial, 'price': price, 'img': img}
return data
#writing to a document
def write_csv(data):
with open('list.csv','a', newline='') as f:
writer = csv.writer(f, delimiter=';')
writer.writerow(['ะฝะฐะธะผะตะฝะพะฒะฐะฝะธะต', 'ัะตัะธะนะฝัะน ะฝะพะผะตั', 'ัะตะฝะฐ', 'ัััะปะบะฐ ะฝะฐ ะธะทะพะฑัะฐะถะตะฝะธะต'])
writer.writerow(( data['name'],data['serial'],data['price'],data['img']))
print(data['name'], 'parsed')
# assembly of all the data we need and writing them to a file
def make_all(url):
html = get_html(url)
data = get_page_data(html)
write_csv(data)
#collect all this nesting doll
def main():
url = 'https://technopoint.ru/catalog/recipe/e351231ca6161134/2020-goda/'
all_links = get_all_links(get_html(url))
# for url in all_links:
# print(url)
# html = get_html(url)
# data = get_page_data(html)
# print(data)
# write_csv(data)
with Pool(10) as p:
p.map(make_all, all_links)
if __name__ == '__main__':
main()
|
class Solution:
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
ln = len(nums)
i = 0
def abs(x):
if x < 0:
return -x
return x
while i < ln:
v = abs(nums[i]) - 1
nums[v] = - abs(nums[v])
i += 1
ans = []
for i in range(ln):
if nums[i] > 0:
ans.append(i + 1)
return ans
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 27 01:02:34 2019
@author: yazi
"""
def launch(data):
length=200
u=[1]
v=[i for i in range(2,length+1)]
A={}
A[1]=0
key_v= dict.fromkeys(range(2,length+1),1000000)
dijikstra(u,v,A,key_v,data)
return A
def dijikstra(u,v,A,key_v,data):
while v!=[]:
for vertice in u:
for edge in data[vertice]:
if edge[0] in v:
delet=key_v.pop(edge[0])
minimum=min(delet,A.get(vertice,0)+edge[1])
key_v[edge[0]]=minimum
key,value=min(key_v.items(), key=lambda x: x[1])
A[key]=value
u.append(key)
v.remove(key)
key_v.pop(key)
dijikstra(u,v,A,key_v,data)
return u,v,A,key_v
if __name__=='__main__':
# graph={1:[(2,1),(4,4)],2:[(3,2)],3:[(4,3)],4:[(2,5)]}
path='/home/yazi/Downloads/shortestpath.txt'
file = open(path,'r')
data = file.readlines()
i=0
graph={}
for line in data:
text=line.strip().split('\t')
graph[int(text[0])]=[]
for element in text[1:]:
key,value=element.split(",")
graph[int(text[0])].append((int(key),int(value)))
A=launch(graph)
print(A)
#
|
import os
def make_img_list(img_dir):
"""ๆๅฎใใฉใซใๅ
ใซๅญๅจใใใในใฆใฎ็ปๅpathใๅใฃใฆใใ"""
ext = ".png"
img_path_list = []
for curDir, dirs, files in os.walk(img_dir):
for file in files:
if file.endswith(ext):
img_path = os.path.join(curDir, file)
img_path_list.append(img_path)
return img_path_list
|
import pickle
import time
import urllib.request
import json
company_list = ['p0000745jr8u', 'p0003884x7lt', 'p0043611aoji', 'p0039557fvbf',
'p0090051h2oq', 'p0006679vz2s', 'p00425z4gu', 'p0009976ed7k',
'p0005859huep', 'p0089280mxzg', 'p0079383unbs', 'p00521862bvn',
'p0002406hlem', 'p0002233vdsm', 'p0096373arhm', 'p0039096va39',
'p0046598vm29', 'p0036394dcvf', 'p0081759udz2', 'p0079434mdig',
'p0014655asb9', 'p0043883wrzh']
def prod_check(entity):
x_test = pickle.load(open("intermediate/" + entity + "_x_test.pkl", "rb"))
y_test = pickle.load(open("intermediate/" + entity + "_y_testET.pkl", "rb"))
test_info = pickle.load(open('intermediate/' + entity + '_info_test.pkl', 'rb'))
# Call API running in prod
ds_api_url = 'http://seapr1dsweb.concurasp.com:80/ds-webapi/service/expenseClassification/receiptTypeClassification'
# Call API running in RQA
# ds_api_url = 'http://10.24.25.120:80/ds-webapi/service/expenseClassification/receiptTypeClassification'
request_type = {'Content-Type': 'application/json'}
correct_count = 0
call_count = 0
# Call the API with each ds_request and store the result in the ds_response column
for i, ocr in enumerate(x_test):
data = {"entityId": entity, "ocrText": ocr, "userId": test_info[i]['userid']}
data = json.dumps(data).encode('utf-8', 'ignore')
call_count += 1
# if call_count > 100:
# break
req = urllib.request.Request(ds_api_url, data, request_type)
f = urllib.request.urlopen(req)
ds_response = json.loads(f.read().decode('utf-8'))
pred = ds_response['expenseTypes'][0]['type']
print(y_test[i], " || ", pred)
if pred == y_test[i]:
correct_count += 1
print("%s Accuracy: %0.3f" % (entity, (float(correct_count)) / call_count))
if __name__ == "__main__":
for company in company_list:
prod_check(company)
temp = input("pause")
|
# Import libraries
import pandas as pd
import sklearn
import numpy as np
import random
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
import my_func
import time
from eye_identifier import EyeCenterIdentifier, GridSearch
from image_preprocess import imanorm, histeq, imaderiv
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
class BestModel():
SIZE = 96
HEIGHT = 12
WIDTH = 20
half_HEIGHT = 6
half_WIDTH = 10
N_sub = 200
N_plots = 20
def __init__(self, clf, step_size = (1, 1), N_steps = (8, 4)):
self.step_size = step_size
self.N_steps = N_steps
self.clf = clf
self.data_pred = None
self.mse = None
def process_data(self, location = r"..\data\training.csv"):
# Import data
data_ori = pd.read_csv(location)
# use a subset of the data
data = data_ori.iloc[:BestModel.N_sub]
images = data.Image.map(my_func.str_split) # Transfer Image into arrays
data = data.drop('Image', 1)
data_pos = data[['left_eye_center_x', 'left_eye_center_y', 'right_eye_center_x', 'right_eye_center_y']]
# Remove rows with nan positions
nan_index = data_pos.index[data_pos.isnull().any(axis=1)]
images = images.drop(nan_index, axis=0)
data = data.drop(nan_index, axis=0)
data_pos = data_pos.drop(nan_index, axis=0)
# histeq transform
images = images.apply(histeq)
#images = images.apply(imaderiv)
# Split the data into training set and testing set
images_train, images_test, data_pos_train, data_pos_test = train_test_split(images, data_pos, test_size = 0.2, random_state = 312)
# Get 20 subplots from each image, 1 right eye, 1 left eye, 2 randomly selected subplots
# Create the eye training data set
random.seed(123)
col_names = ['pixel' + str(v) for v in range(0, BestModel.HEIGHT * BestModel.WIDTH)] + ['center_X', 'center_Y', 'is_eye']
data_eye = pd.DataFrame(columns = col_names)
for i in range(0, images_train.shape[0]):
t1=time.time()
center_X = np.empty(0)
center_Y = np.empty(0)
is_eye = np.empty(0)
# Select the two eye subplots
for _eye in ['left_eye_center', 'right_eye_center']:
_eye_x = _eye + '_x'
_eye_y = _eye + '_y'
_x = data_pos_train.iloc[i][ _eye_x]
_y = data_pos_train.iloc[i][ _eye_y]
_x = np.array([_x-2, _x, _x+2, _x, _x])
_y = np.array([_y, _y, _y, _y-1, _y+1])
center_X = np.append(center_X, _x)
center_Y = np.append(center_Y, _y)
is_eye = np.append(is_eye, [1] * int(BestModel.N_plots / 4))
# randomly select two subplots
for r in range(int(BestModel.N_plots / 2)):
while True:
_x = random.uniform(0, BestModel.SIZE)
_y = random.uniform(0, BestModel.SIZE)
# do not want the random center to be too close to the eyes
if not (abs(_x - data_pos_train.iloc[i][ 'left_eye_center_x']) + abs(_y - data_pos_train.iloc[i][ 'left_eye_center_y']) < BestModel.HEIGHT + BestModel.WIDTH or abs(_x - data_pos_train.iloc[i][ 'right_eye_center_x']) + abs(_y - data_pos_train.iloc[i][ 'right_eye_center_y']) < BestModel.HEIGHT + BestModel.WIDTH):
break
center_X = np.append(center_X, _x)
center_Y = np.append(center_Y, _y)
is_eye = np.append(is_eye, 0)
for j in range (0,len(center_X)):
temp = my_func.cut_image(center_X[j], center_Y[j], BestModel.half_WIDTH, BestModel.half_HEIGHT)
ima = pd.Series(images_train.iloc[i][temp[1]])
ima = ima.append(pd.Series([center_X[j], center_Y[j], is_eye[j]]))
ima.index = col_names
data_eye = data_eye.append(ima, ignore_index = True)
# Get the train_X and train_y
BestModel.train_X = data_eye.drop(['center_X', 'center_Y', 'is_eye'], axis = 1)
BestModel.train_y = data_eye.is_eye
BestModel.train_images = images_train
BestModel.train_pos = data_pos_train
BestModel.test_X = images_test
BestModel.test_pos = data_pos_test
# A Benchmark
# If use the mean center of the training set, what is the mse
BestModel.mean_pos = {'left_eye_center_x': BestModel.train_pos.left_eye_center_x.mean(),
'left_eye_center_y': BestModel.train_pos.left_eye_center_y.mean(),
'right_eye_center_x': BestModel.train_pos.right_eye_center_x.mean(),
'right_eye_center_y': BestModel.train_pos.right_eye_center_y.mean()}
self.data_pred = pd.DataFrame(columns = ('id', 'left_eye_center_x', 'left_eye_center_y', 'right_eye_center_x', 'right_eye_center_y'))
def build_model(self):
self.eye_id = EyeCenterIdentifier(self.clf, self.step_size, self.N_steps)
self.clf = self.eye_id.fit(BestModel.train_X, BestModel.train_y, BestModel.train_pos)
def make_prediction(self, index):
data_pred = self.eye_id.predict(BestModel.test_X.iloc[index], has_prob=True)
mse = self.eye_id.get_mse(data_pred, BestModel.test_pos.iloc[index])
data_pred['id'] = index
self.data_pred = self.data_pred.append(data_pred)
return mse
def draw_face(self, index, size):
image=BestModel.test_X.iloc[index]
f = Figure(figsize=(5,5), dpi=100)
a = f.add_subplot(111)
a.imshow(image.reshape((size, size)), cmap=plt.cm.gray)
a.set_xlim(0, size)
a.set_ylim(size, 0)
return f, a
def draw_results(self, index, size, draw_true=False, draw_mean=False):
image=BestModel.test_X.iloc[index]
pred_values = self.data_pred
#true_values = BestModel.test_pos.iloc[index]
#mean_values = BestModel.mean_pos
plt.imshow(image.reshape((size, size)), cmap=plt.cm.gray)
#pred_pos, = plt.plot(pred_values.left_eye_center_x, pred_values.left_eye_center_y, 'r.', label='Predicted Position')
#plt.plot(pred_values.right_eye_center_x, pred_values.right_eye_center_y, 'r.')
#if draw_true:
# true_pos, = plt.plot(true_values.left_eye_center_x, true_values.left_eye_center_y, 'g.', label='True Position')
# plt.plot(true_values.right_eye_center_x, true_values.right_eye_center_y, 'g.')
#if draw_mean:
# mean_pos, = plt.plot(mean_values.left_eye_x_mean, mean_values.left_eye_y_mean, 'b.', label='Average Position')
# plt.plot(mean_values.right_eye_x_mean, mean_values.right_eye_y_mean, 'b.')
plt.xlim([0,size])
plt.ylim([size,0])
return plt
if __name__ == '__main__':
step_size = (1, 1)
N_steps = (8, 4)
clf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_impurity_split=1e-07, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=50, n_jobs=1, oob_score=False, random_state=312,
verbose=0, warm_start=False)
best_model= BestModel(clf, step_size, N_steps)
best_model.process_data()
best_model.build_model()
mse = best_model.make_prediction(1)
print (mse)
#fig = best_model.draw_face(index=1, size=96)
|
import sys
sys.stdin=open("input.txt")
def dfs(i):
for j in node[i]:
if not visited[j]:
visited[j]=1
dfs(j)
for t in range(int(input())):
n,m=map(int,input().split())
node=[[] for _ in range(n+1)]
cnt=0
visited=[0]*(n+1)
for i in range(m):
a,b=map(int,input().split())
node[a].append(b)
node[b].append(a)
for i in range(1,n+1):
if not visited[i]:
visited[i]=1
cnt+=1
dfs(i)
print("#{} {}".format(t+1,cnt)) |
# Enter your code here. Read input from STDIN. Print output to STDOUT
t = int(raw_input())
for a0 in xrange(t):
n = int(raw_input().strip())
if n == 1:
print '3'
else:
x = 1 + (4 * n * n)
x = x ** (.5)
x = int((x - 1) / 2)
y = 1
i = x
# print '========'
while y == 1:
# print '----'
# print i
s = (i * (i + 1)) / 2
# print s
# print '--------'
divisors = 2
j = 2
while j < s:
if s % j == 0:
divisors = divisors + 1
j = j + 1
if divisors > n:
print s
break
i = i + 1
|
import torch
from clusterers import base_clusterer
class Clusterer(base_clusterer.BaseClusterer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.k = 1
print('Setting k to 1 regardless of config.')
def get_labels(self, x, y):
return torch.randint(low=0, high=1, size=y.shape).long().cuda()
|
import pytest
from typing import List
@pytest.mark.parametrize(argnames="phrase, norm_phrase",
argvalues=[("TestPhrase", "testphrase"),
("Test_phrase", "testphrase"),
("test_phrase", "testphrase")])
def test_normalized_string(phrase: str, norm_phrase: str):
from SetGridField import normalized_string
assert norm_phrase == normalized_string(phrase)
@pytest.mark.parametrize(
argnames="before_dict, keys, max_keys, after_dict",
argvalues=[
({'a': 1, 'b': 2}, ['a'], None, {'a': 1}),
({'a': 1, 'b': 2}, ['*'], 1, {'a': 1}),
({'a': 1, 'b': 2}, ['*'], 2, {'a': 1, 'b': 2}),
({'a': 1, 'b': [1, 2, 3]}, ['a'], None, {'a': 1}),
])
def test_filter_the_dict(before_dict: dict, keys: dict, max_keys: int, after_dict: dict):
from SetGridField import filter_dict
assert after_dict == filter_dict(dict_obj=before_dict,
keys=keys,
max_keys=max_keys)
@pytest.mark.parametrize(argnames="entry_context, keys, raise_exception, unpack_nested",
argvalues=[
([{'a': 'val', 'b': 'val'}], ['a', 'b'], False, False),
([{'a': [], 'b': 'val'}], ['a', 'b'], True, False),
([{'a': [], 'b': 'val'}], ['b'], False, False),
(['a', 'b', 1, False], ['b'], False, False),
(['a', 'b', 1, False, []], ['*'], True, False),
])
def test_validate_entry_context(capfd, entry_context: dict, keys: list, raise_exception: bool, unpack_nested: bool):
from SetGridField import validate_entry_context
if raise_exception:
# disabling the stdout check cause along with the exception, we write additional data to the log.
with pytest.raises(ValueError), capfd.disabled():
validate_entry_context(context_path='Path',
entry_context=entry_context,
keys=keys,
unpack_nested_elements=unpack_nested)
else:
validate_entry_context(context_path='Path',
entry_context=entry_context,
keys=keys,
unpack_nested_elements=unpack_nested)
@pytest.mark.parametrize(argnames="keys, columns, dt_response_json, expected_json, unpack_nested",
argvalues=[
(["name", "value"], ["col1", "col2"], "context_entry_list.json", "expected_list_grid.json",
False),
(["*"], ["col1", "col2"], "context_entry_dict.json", "expected_dict_grid.json", False),
(["*"], ["col1"], "context_entry_list_of_values.json", "expected_list_of_values_grid.json",
False),
(["*"], ["col1", "col2"], "context_entry_dict_with_elements.json",
"expected_dict_with_elements_grid.json", True),
(["firstname", "lastname", "email"], ["Fname", "Lname", "Email"],
"context_single_dict_with_keys.json", "expected_single_dict_with_keys_grid.json", False),
(["firstname", "lastname", "email"], ["Fname", "Lname", "Email"],
"context_entry_list_of_dicts.json", "expected_list_of_dicts_grid.json", False)
])
def test_build_grid(datadir, mocker, keys: list, columns: list, dt_response_json: str, expected_json: str,
unpack_nested: bool):
"""Unit test
Given
- script args
- a file
When
- build_grid command
Then
- Validate that the grid was created with the correct column names
"""
import SetGridField
import json
import pandas as pd
mocker.patch.object(SetGridField, 'demisto')
with open(datadir[dt_response_json]) as json_file:
SetGridField.demisto.dt.return_value = json.load(json_file)
with open(datadir[expected_json]) as json_file:
expected_grid = json.load(json_file)
assert pd.DataFrame(expected_grid).to_dict() == SetGridField.build_grid(
context_path=mocker.MagicMock(), keys=keys, columns=columns, unpack_nested_elements=unpack_nested
).to_dict()
very_long_column_name = 11 * "column_name_OF_LEN_264__"
@pytest.mark.parametrize(argnames="keys, columns, unpack_nested_elements, dt_response_path, expected_results_path",
argvalues=[
(["name", "value"], ["col!@#$%^&*()ืข_1", very_long_column_name], False,
'context_entry_list_missing_key.json',
'expected_list_grid_none_value.json')
])
def test_build_grid_command(datadir, mocker, keys: List[str], columns: List[str], unpack_nested_elements: bool,
dt_response_path: str, expected_results_path: str):
"""Unit test
Given
- script args
- a file
When
- build_grid_command command
Then
- Validate that the grid was created with the correct column names
"""
import json
import SetGridField
mocker.patch.object(SetGridField, 'get_current_table', return_value=[])
mocker.patch.object(SetGridField, 'demisto')
with open(datadir[dt_response_path]) as json_file:
SetGridField.demisto.dt.return_value = json.load(json_file)
results = SetGridField.build_grid_command(grid_id='test', context_path=mocker.MagicMock(), keys=keys,
columns=columns, overwrite=True, sort_by=None,
unpack_nested_elements=unpack_nested_elements)
with open(datadir[expected_results_path]) as json_file:
expected_results = json.load(json_file)
assert json.dumps(results) == json.dumps(expected_results)
@pytest.mark.parametrize(argnames="keys, columns, unpack_nested_elements, dt_response_path, expected_results_path",
argvalues=[
(["firstname", "lastname", "email"], ["fname", "lname", "email"], False,
'context_entry_list_of_dicts_non_sorted.json', 'expected_entry_list_of_dicts_sorted.json')
])
def test_build_grid_command_with_sort_by(datadir, mocker, keys: List[str], columns: List[str],
unpack_nested_elements: bool, dt_response_path: str,
expected_results_path: str):
"""Unit test
Given
- script args, including sort_by
- a file
When
- build_grid_command command
Then
- Validate that the grid was created with the correct column names and sorted correctly
"""
import json
import SetGridField
mocker.patch.object(SetGridField, 'get_current_table', return_value=[])
mocker.patch.object(SetGridField, 'demisto')
with open(datadir[dt_response_path]) as json_file:
SetGridField.demisto.dt.return_value = json.load(json_file)
results = SetGridField.build_grid_command(grid_id='test', context_path=mocker.MagicMock(), keys=keys,
columns=columns, overwrite=True, sort_by=['fname'],
unpack_nested_elements=unpack_nested_elements)
with open(datadir[expected_results_path]) as json_file:
expected_results = json.load(json_file)
assert json.dumps(results) == json.dumps(expected_results)
@pytest.mark.parametrize(argnames="keys, columns, unpack_nested_elements, dt_response_path, expected_results_path",
argvalues=[
(["col1", "col2"], ["col1", "col2"], False,
'context_entry_list_of_dicts_non_sorted_multi.json',
'expected_entry_list_of_dicts_sorted_multi.json')
])
def test_build_grid_command_with_multi_sort_by(datadir, mocker, keys: List[str], columns: List[str],
unpack_nested_elements: bool, dt_response_path: str,
expected_results_path: str):
"""Unit test
Given
- script args, including multi sort_by cols
- a file
When
- build_grid_command command
Then
- Validate that the grid was created with the correct column names and sorted correctly
"""
import json
import SetGridField
mocker.patch.object(SetGridField, 'get_current_table', return_value=[])
mocker.patch.object(SetGridField, 'demisto')
with open(datadir[dt_response_path]) as json_file:
SetGridField.demisto.dt.return_value = json.load(json_file)
results = SetGridField.build_grid_command(grid_id='test', context_path=mocker.MagicMock(), keys=keys,
columns=columns, overwrite=True, sort_by=['col1', 'col2'],
unpack_nested_elements=unpack_nested_elements)
with open(datadir[expected_results_path]) as json_file:
expected_results = json.load(json_file)
assert json.dumps(results) == json.dumps(expected_results)
|
import aiomysql
import asyncio
async def select(loop, sql, pool):
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(sql)
r = await cur.fetchone()
print(r)
async def insert(loop, sql, pool):
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(sql)
await conn.commit()
async def main(loop):
pool = await aiomysql.create_pool(
host='192.168.122.205',
port=3306,
user='root',
password='fengxiaoxiaoxi',
db='aiomysqltest',
loop=loop)
# c1 = select(loop=loop, sql='select * from minifw limit 1', pool=pool)
c1 = insert(loop=loop, sql="insert into artitle_test(title) values ('hello')", pool=pool)
c2 = insert(loop=loop, sql="insert into artitle_test(title) values ('heloko')", pool=pool)
tasks = [asyncio.ensure_future(c1), asyncio.ensure_future(c2)]
return await asyncio.gather(*tasks)
if __name__ == '__main__':
cur_loop = asyncio.get_event_loop()
cur_loop.run_until_complete(main(cur_loop))
|
from django.http import HttpResponse
from django.shortcuts import render_to_response
def homepage(request):
return render_to_response('trqlive/homepage.html')
def homepage_static(request):
return render_to_response('trqlive/homepage_static.html')
# vi:ts=4:sw=4:expandtab
|
'''
Function:
Implementation of PSANet
Author:
Zhenchao Jin
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from mmcv.ops import PSAMask
except:
PSAMask = None
from ..base import BaseSegmentor
from ...backbones import BuildActivation, BuildNormalization
'''PSANet'''
class PSANet(BaseSegmentor):
def __init__(self, cfg, mode):
super(PSANet, self).__init__(cfg, mode)
align_corners, norm_cfg, act_cfg, head_cfg = self.align_corners, self.norm_cfg, self.act_cfg, cfg['head']
# build psa
assert head_cfg['type'] in ['collect', 'distribute', 'bi-direction']
mask_h, mask_w = head_cfg['mask_size']
if 'normalization_factor' not in self.cfg['head']:
self.cfg['head']['normalization_factor'] = mask_h * mask_w
self.reduce = nn.Sequential(
nn.Conv2d(head_cfg['in_channels'], head_cfg['feats_channels'], kernel_size=1, stride=1, padding=0, bias=False),
BuildNormalization(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg),
BuildActivation(act_cfg),
)
self.attention = nn.Sequential(
nn.Conv2d(head_cfg['feats_channels'], head_cfg['feats_channels'], kernel_size=1, stride=1, padding=0, bias=False),
BuildNormalization(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg),
BuildActivation(act_cfg),
nn.Conv2d(head_cfg['feats_channels'], mask_h * mask_w, kernel_size=1, stride=1, padding=0, bias=False),
)
if head_cfg['type'] == 'bi-direction':
self.reduce_p = nn.Sequential(
nn.Conv2d(head_cfg['in_channels'], head_cfg['feats_channels'], kernel_size=1, stride=1, padding=0, bias=False),
BuildNormalization(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg),
BuildActivation(act_cfg),
)
self.attention_p = nn.Sequential(
nn.Conv2d(head_cfg['feats_channels'], head_cfg['feats_channels'], kernel_size=1, stride=1, padding=0, bias=False),
BuildNormalization(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg),
BuildActivation(act_cfg),
nn.Conv2d(head_cfg['feats_channels'], mask_h * mask_w, kernel_size=1, stride=1, padding=0, bias=False),
)
if not head_cfg['compact']:
self.psamask_collect = PSAMask('collect', head_cfg['mask_size'])
self.psamask_distribute = PSAMask('distribute', head_cfg['mask_size'])
else:
if not head_cfg['compact']:
self.psamask = PSAMask(head_cfg['type'], head_cfg['mask_size'])
self.proj = nn.Sequential(
nn.Conv2d(head_cfg['feats_channels'] * (2 if head_cfg['type'] == 'bi-direction' else 1), head_cfg['in_channels'], kernel_size=1, stride=1, padding=1, bias=False),
BuildNormalization(placeholder=head_cfg['in_channels'], norm_cfg=norm_cfg),
BuildActivation(act_cfg),
)
# build decoder
self.decoder = nn.Sequential(
nn.Conv2d(head_cfg['in_channels'] * 2, head_cfg['feats_channels'], kernel_size=3, stride=1, padding=1, bias=False),
BuildNormalization(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg),
BuildActivation(act_cfg),
nn.Dropout2d(head_cfg['dropout']),
nn.Conv2d(head_cfg['feats_channels'], cfg['num_classes'], kernel_size=1, stride=1, padding=0)
)
# build auxiliary decoder
self.setauxiliarydecoder(cfg['auxiliary'])
# freeze normalization layer if necessary
if cfg.get('is_freeze_norm', False): self.freezenormalization()
# layer names for training tricks
self.layer_names = [
'backbone_net', 'reduce', 'attention', 'proj', 'decoder', 'auxiliary_decoder', 'reduce_p',
'attention_p', 'psamask_collect', 'psamask_distribute', 'psamask',
]
'''forward'''
def forward(self, x, targets=None):
img_size = x.size(2), x.size(3)
# feed to backbone network
backbone_outputs = self.transforminputs(self.backbone_net(x), selected_indices=self.cfg['backbone'].get('selected_indices'))
# feed to psa
identity = backbone_outputs[-1]
shrink_factor, align_corners = self.cfg['head']['shrink_factor'], self.align_corners
if self.cfg['head']['type'] in ['collect', 'distribute']:
out = self.reduce(backbone_outputs[-1])
n, c, h, w = out.size()
if shrink_factor != 1:
if h % shrink_factor and w % shrink_factor:
h = (h - 1) // shrink_factor + 1
w = (w - 1) // shrink_factor + 1
align_corners = True
else:
h = h // shrink_factor
w = w // shrink_factor
align_corners = False
out = F.interpolate(out, size=(h, w), mode='bilinear', align_corners=align_corners)
y = self.attention(out)
if self.cfg['head']['compact']:
if self.cfg['head']['type'] == 'collect':
y = y.view(n, h * w, h * w).transpose(1, 2).view(n, h * w, h, w)
else:
y = self.psamask(y)
if self.cfg['head']['psa_softmax']:
y = F.softmax(y, dim=1)
out = torch.bmm(out.view(n, c, h * w), y.view(n, h * w, h * w)).view(n, c, h, w) * (1.0 / self.cfg['head']['normalization_factor'])
else:
x_col = self.reduce(backbone_outputs[-1])
x_dis = self.reduce_p(backbone_outputs[-1])
n, c, h, w = x_col.size()
if shrink_factor != 1:
if h % shrink_factor and w % shrink_factor:
h = (h - 1) // shrink_factor + 1
w = (w - 1) // shrink_factor + 1
align_corners = True
else:
h = h // shrink_factor
w = w // shrink_factor
align_corners = False
x_col = F.interpolate(x_col, size=(h, w), mode='bilinear', align_corners=align_corners)
x_dis = F.interpolate(x_dis, size=(h, w), mode='bilinear', align_corners=align_corners)
y_col = self.attention(x_col)
y_dis = self.attention_p(x_dis)
if self.cfg['head']['compact']:
y_dis = y_dis.view(n, h * w, h * w).transpose(1, 2).view(n, h * w, h, w)
else:
y_col = self.psamask_collect(y_col)
y_dis = self.psamask_distribute(y_dis)
if self.cfg['head']['psa_softmax']:
y_col = F.softmax(y_col, dim=1)
y_dis = F.softmax(y_dis, dim=1)
x_col = torch.bmm(x_col.view(n, c, h * w), y_col.view(n, h * w, h * w)).view(n, c, h, w) * (1.0 / self.cfg['head']['normalization_factor'])
x_dis = torch.bmm(x_dis.view(n, c, h * w), y_dis.view(n, h * w, h * w)).view(n, c, h, w) * (1.0 / self.cfg['head']['normalization_factor'])
out = torch.cat([x_col, x_dis], 1)
feats = self.proj(out)
feats = F.interpolate(feats, size=identity.shape[2:], mode='bilinear', align_corners=align_corners)
# feed to decoder
feats = torch.cat([identity, feats], dim=1)
predictions = self.decoder(feats)
# forward according to the mode
if self.mode == 'TRAIN':
loss, losses_log_dict = self.forwardtrain(
predictions=predictions,
targets=targets,
backbone_outputs=backbone_outputs,
losses_cfg=self.cfg['losses'],
img_size=img_size,
)
return loss, losses_log_dict
return predictions |
import gzip
import pickle
import matplotlib.cm as cm
import matplotlib.pyplot as plt
with gzip.open('mnist.pkl.gz', 'rb') as f:
train_set, valid_set, test_set = pickle.load(f)
train_x, train_y = train_set
print len(train_x[0])
# for l in range(len(train_x)):
for l in range(0,len(train_x)):
# print l
plt.imshow(train_x[l].reshape((28, 28)), cmap=cm.Greys_r)
# plt.show()
s = "imgs/foo_"+str(l)+".png"
# s=s.join(str(l))
# s=s.join(".png")
print s
plt.savefig(s)
# plt.imshow(train_x[0].reshape((28, 28)), cmap=cm.Greys_r)
# plt.show()
# save the pic
# plt.savefig('foo.png')
|
from typing import List
class Solution:
def numSubseq(self, nums: List[int], target: int) -> int:
nums.sort()
counter = 0
i = 0
j = len(nums) - 1
while i <= j:
if nums[i] + nums[j] > target:
j -= 1
else:
counter += 2 ** (j - i)
i += 1
return counter % ((10 ** 9) + 7)
|
from zeep import xsd
from .mappings import query_type_mapping
def query_filter(vm, field, value, query_type):
query_function = query_type_mapping[query_type]
if field['type'] is 'String':
query_filter = vm.query_factory[query_function](Field=field['name'], Value=xsd.AnyObject(xsd.String(), value))
elif field['type'] is 'Id':
query_filter = vm.query_factory[query_function](Field=field['name'], Value=vm.type_factory.Id(value))
elif field['type'] is 'Long':
query_filter = vm.query_factory[query_function](Field=field['name'], Value=xsd.AnyObject(xsd.Long(), value))
elif field['type'] is 'Boolean':
query_filter = vm.query_factory[query_function](Field=field['name'], Value=xsd.AnyObject(xsd.Boolean(), value))
elif field['type'] is 'OsVersion':
query_filter = vm.query_factory[query_function](Field=field['name'], Value=xsd.AnyObject(vm.query_factory.OsVersion, value))
elif field['type'] is 'ClientState':
query_filter = vm.query_factory[query_function](Field=field['name'], Value=xsd.AnyObject(vm.query_factory.ClientState, value))
else:
raise Exception("Can't determine Value type")
return query_filter
def multi_query(vm, filters, join_type):
query_array = vm.query_factory.ArrayOfQueryFilter(QueryFilter=filters)
if join_type is 'OR':
multi_filter = vm.query_factory.QueryFilterOr(Filters=query_array)
elif join_type is 'AND':
multi_filter = vm.query_factory.QueryFilterAnd(Filters=query_array)
else:
raise Exception('join_type must be either OR or AND')
return multi_filter
def query(vm, field, value, page=1, query_type='BEGINS_WITH'):
if isinstance(value, list):
filters = [query_filter(vm, field=field, value=item, query_type=query_type) for item in value]
q_filter = multi_query(vm, filters, 'OR')
else:
q_filter = query_filter(vm, field=field, value=value, query_type=query_type)
return vm.query_factory.QueryDefinition(Filter=q_filter, Page=page)
def _collect_query_results(vm, field, value, query_type, query_function, **kwargs):
results = []
current_page = 1
while True:
query_definition = query(vm, field=field, value=value, page=current_page, query_type=query_type)
result = query_function(queryDefinition=query_definition, **kwargs)
# Drop out if there are no results
if result['Elements'] is None:
break
results += result['Elements']['anyType']
# Stop if on the last page
if not result['NextPageAvailable']:
break
current_page += 1
return results
|
from sys import stdin
import itertools
A = input().split()
B = stdin.read().splitlines()
K = int(A[0])
M = int(A[1])
def pwr(list):
return [x**2 for x in list]
ls = {}
for i in range(K):
ls[i] = list(map(int, B[i].split()))
del(ls[i][0])
ls[i] = pwr(ls[i])
dictlist = []
for index in ls:
dictlist.append(ls[index])
result = list(itertools.product(*dictlist))
sum_tuple = list(map(sum, result))
remain = list(map(lambda x: x%M, sum_tuple))
print(max(remain))
|
#-*- coding: utf-8 -*-
""" Console module container """
from __future__ import print_function
import sys
import time
from builtins import input
import colorama
import rl
from voiceplay import __title__
from voiceplay.utils.helpers import SingleQueueDispatcher
from voiceplay.utils.command import Command
class Console(object):
"""
Console mode object
"""
def __init__(self, banner='Welcome to {0} console!'.format(__title__)):
self.name = __title__
self.default_prompt = '%s [%s]%s '
self.exit = False
self.banner = banner
self.commands = {}
self.queue = None
self.dispatcher = None
def set_queue(self, queue=None):
"""
Pass command queue
"""
self.queue = queue
def set_exit(self):
"""
Set exit flag
"""
self.exit = True
if self.dispatcher:
self.dispatcher.set_exit()
def add_handler(self, keyword, method, aliases=None):
"""
Adds command handler to console
"""
aliases = aliases if aliases else []
self.commands[keyword] = {'method': method, 'aliases': aliases}
@property
def format_prompt(self):
"""
Format command line prompt
"""
result = self.default_prompt % (time.strftime('%H:%M:%S'),
colorama.Fore.GREEN + colorama.Style.BRIGHT + self.name + colorama.Style.RESET_ALL,
colorama.Fore.CYAN + colorama.Style.BRIGHT + '>' + colorama.Style.RESET_ALL)
return result
def parse_command(self, command):
"""
Parse entered command
"""
result = None
should_be_printed = True
orig_command = command.strip()
command = orig_command.lower()
for kwd in self.commands:
if command.startswith(kwd) or [c for c in self.commands[kwd]['aliases'] if command.startswith(c)]:
try:
result, should_be_printed = self.commands[kwd]['method'](orig_command)
break
except KeyboardInterrupt:
pass
return result, should_be_printed
def quit_command(self, _):
"""
Handle quit / exit / logout command
"""
self.exit = True
result = None
should_be_printed = False
return result, should_be_printed
@staticmethod
def clear_command(_):
"""
Handle clear command
"""
sys.stderr.flush()
sys.stderr.write("\x1b[2J\x1b[H")
result = None
should_be_printed = False
return result, should_be_printed
def complete(self, _, state):
"""
Provide autocompletion support (buggy)
"""
text = rl.readline.get_line_buffer() # pylint:disable=no-member
if not text:
return [c + ' ' for c in self.commands][state]
results = [c + ' ' for c in self.commands if c.startswith(text)]
return results[state]
@staticmethod
def run_exit():
"""
Finalize exit (invoked after self.quit_command)
"""
print ('Goodbye!')
def run_console(self):
"""
Actual console runner
"""
inp = None
colorama.init()
# FSCK! Details here: http://stackoverflow.com/questions/7116038/python-tab-completion-mac-osx-10-7-lion
if 'libedit' in rl.readline.__doc__: # pylint:disable=unsupported-membership-test
rl.readline.parse_and_bind("bind ^I rl_complete") # pylint:disable=no-member
else:
rl.readline.parse_and_bind("tab: complete") # pylint:disable=no-member
rl.readline.set_completer(self.complete) # pylint:disable=no-member
# Add handlers
self.add_handler(Command.SHUTDOWN, self.quit_command, Command.SHUTDOWN_ALIASES)
self.add_handler(Command.CLEAR, self.clear_command, Command.CLEAR_ALIASES)
#
if self.banner:
print (self.banner)
while True:
print (self.format_prompt, end='')
try:
inp = input()
if sys.version_info.major == 2:
inp = inp.decode('utf-8')
except KeyboardInterrupt:
pass
except EOFError:
self.exit = True
inp = None
if inp:
result, should_be_printed = self.parse_command(inp)
if should_be_printed:
print (result)
if self.exit:
self.run_exit()
break
def run_bg_queue(self):
"""
Run API commands background queue poller
"""
if not self.queue:
return
self.dispatcher = SingleQueueDispatcher(queue=self.queue)
while not self.exit:
full_message = self.dispatcher.get_full_message()
message = full_message.get('message')
uuid = full_message.get('uuid')
if not message:
time.sleep(0.1)
continue
# do last.fm style normalization, i.e. replace + with space
message = message.replace('+', ' ')
print (message)
result, should_be_printed = self.parse_command(message)
self.dispatcher.put_message(uuid, result)
if should_be_printed:
print (result)
time.sleep(0.1)
|
"""
CEASIOMpy: Conceptual Aircraft Design Software
Developed for CFS ENGINEERING, 1015 Lausanne, Switzerland
The script contains all the geometrical value required for the
weight unconventional analysis.
| Works with Python 2.7
| Author : Stefano Piccini
| Date of creation: 2018-11-26
| Last modifiction: 2019-02-20
"""
#=============================================================================
# IMPORTS
#=============================================================================
""" No import """
#=============================================================================
# CLASSES
#=============================================================================
class AircraftWingGeometry:
"""
The class contains all the geometry information extracted for the wings.
ATTRIBUTES
(char) is_horiz --Att.: Define if a wing is horizontal [-].
(int) w_nb --Att.: Number of wings [-].
(int) wing_nb --Att.: Number of wings [-].
(int) main_wing_index --Att.: Main wing index.
(int_array) wing_sym --Att.: Wing symmetry plane [-].
(int_array) wing_sec_nb --Att.: Number of fuselage sections [-].
(int_array) fuse_seg_nb --Att.: Number of fuselage segments [-].
(int_array) wing_seg_nb --Att.: Number of fuselage segments [-].
(float_array) wing_span --Att.: Wing span [m].
(floar_array) wing_seg_length --Att.: Wings sements length [m].
(float) wing_sec_thicknes --Att.: Wing sections thicknes [m].
(float) wing_sec_mean_thick --Att.: Wing sections mean thicknes [m].
(float_array) wing_max_chord --Att.: Wing chord in the connection
with fuselage [m].
(float_array) wing_min_chord --Att.: Wing tip chord [m].
(float_array) wing_mac --Att.: Wing m.a.c. length and position
(x,y,z)[m,m,m,m].
(floar_array) wing_center_seg_point --Att.: 3D array containing the
position of the point at the
center of each segment of the
wing (x,y,z - coord.) [m].
(float_array) wing_plt_area --Att.: Wings plantform area [m^2].
(float) wing_plt_area_main --Att.: Main wing area [m^2].
(float) main_wing_surface --Att.: Main wing wetted area [m^2].
(float_array) tail_wings_surface--Att.: Wetted surface area of the
tail wings. [m^2]
(float) total_wings_surface --Att.: Wings wetted area total [m^2].
(float_array) wing_seg_vol --Att.: Wing segments volume [m^3].
(float_array) wing_vol --Att.: Volume of each wing [m^3].
(float) wing_tot_vol --Att.: Total wing volume [m^3].
(float_array) w_seg_sec --Att.: Reordered segments with
respective start and end
sections for each wing.
# Cabin and Fuel
(float) cabin_span --Att.: Width of the cabin [m].
(float) y_max_cabin --Att.: Maximum height of the cabin [m].
(float) cabin_area --Att.: Area of the BWB allowed for passenger [m^2].
(float) fuse_vol --Att.: Volume of the central part of the wing,
calledas fuselage [m^3].
(float) cabin_vol --Att.: Volume of the cabin [m^3].
(float) fuse_fuel_vol --Att.: Volume of th fuselage allowed for fuel
storage [m^3]
(float) wing_fuel_vol --Att.: Volume of the fuel inside the wings [m^3].
(float) fuel_vol_tot --Att.: Total fuel volume allowed [m^3].
METHODS
Name Description
"""
def __init__(self):
self.is_horiz = []
self.w_nb = 0
self.wing_nb = 0
self.main_wing_index = 0
self.wing_sym = []
self.wing_sec_nb = []
self.wing_seg_nb = []
self.wing_span = []
self.wing_seg_length = 0
self.wing_sec_thicknes = 0
self.wing_sec_mean_thick = []
self.wing_max_chord = []
self.wing_min_chord = []
self.wing_mac = 0
self.wing_center_seg_point = 0
self.wing_plt_area = []
self.wing_plt_area_main = 0
self.main_wing_surface = 0
self.tail_wings_surface = []
self.total_wings_surface = 0
self.wing_seg_vol = 0
self.wing_vol = []
self.wing_tot_vol = 0
self.w_seg_sec = 0
# Cabin and Fuel
self.cabin_span = 0
self.y_max_cabin = 0
self.cabin_area = 0
self.fuse_vol = 0
self.cabin_vol = 0
self.fuse_fuel_vol = 0
self.wing_fuel_vol = 0
self.fuel_vol_tot = 0
class AircraftFuseGeometry:
"""
The class contains all the geometry information extracted for the fuselage.
ATTRIBUTES
# General
(float) tot_length --Att.: Aircraft total length [m].
# Fuselage
(int) f_nb --Att.: Number of fuselage [-].
(int) fuse_nb --Att.: Number of fuselage counting\
simmetry [-].
(int_array) fuse_sym --Att.: Fuselage symmetry plane [-].
(int_array) fuse_sec_nb --Att.: Number of fuselage sections [-].
(int_array) fuse_seg_nb --Att.: Number of fuselage sections [-].
(int_array) fuse_seg_index --Att.: Number of fuselage sections [-].
(int_array) cabin_nb --Attt.: number if cabins per fuselage
(int_array) cabin_seg --Att.: Array that will contain 1 if the
segment is a cabin segment or
0 otherwise.
(float_array) fuse_length --Att.: Fuselage length [m].
(float_array) fuse_sec_circ --Att.: Circumference of fuselage
sections [m].
(float_array) fuse_sec_width --Att.: Width of fuselage sections [m].
(float_array) fuse_sec_rel_dist --Att.: Relative distance of each section
to the start profile.
(float_array) fuse_seg_length --Att.: Length of each fuselage
segments [m].
(float_array) fuse_sec_rel_dist --Att.: Relative distance of each section
with the start one [m].
(float_array) fuse_nose_length --Att.: Length of each fuselage nose [m].
(float_array) fuse_cabin_length --Att.: Length of each fuselage cabin [m].
(float_array) fuse_tail_length --Att.: Length of each fuselage tail [m].
(float_array) fuse_mean_width --Att.: Mean fuselage width [m].
(floar_array) fuse_center_seg_point --Att.: 3D array containing the
position of the point
at the center of each segment
of the fuselage
(x,y,z - coord.) [m,m,m].
(floar_array) fuse_center_sec_point --Att.: 3D array containing the
position of the point
at the center of each section
of th fuselage
(x,y,z - coord.) [m,m,m].
(float_array) cabin_area --Att.: Area of the cabin of
each fuselage [m^2].
(float_array) fuse_surface --Att.: Wetted area of each fuselage [m^2].
(float_array) fuse_seg_vol --Att.: Volume of fuselage segments [m^3].
(float_array) fuse_cabin_vol --Att.: Cabin volume of each fuselage [m^3].
(float_array) fuse_fuel --Att.: Volume of the fulage used
as fuel tank [m^3].
(float_array) fuse_vol --Att.: Fuselage volume [m^3].
(float_array) f_seg_sec --Att.: Reordered segments with
respective start and end
sections for each fuselage.
METHODS
Name Description
"""
def __init__(self, f_nb):
# General
self.tot_length = 0
# Fuselage
self.f_nb = f_nb
self.fuse_nb = f_nb
self.fuse_sym = []
self.fuse_sec_nb = []
self.fuse_seg_nb = []
self.fuse_seg_index = 0
self.cabin_nb = 0 #cabin
self.cabin_seg = 0 #cabin
self.fuse_length = []
self.fuse_sec_per = 0
self.fuse_sec_width = 0
self.fuse_sec_abs_dist = 0
self.fuse_seg_length = 0
self.fuse_sec_rel_dist = 0
self.fuse_nose_length = []
self.fuse_cabin_length = [] #cabin
self.fuse_tail_length = []
self.fuse_nose_length = []
self.fuse_mean_width = []
self.fuse_center_seg_point = 0
self.fuse_center_sec_point = 0
self.cabin_area = 0
self.fuse_surface = []
self.fuse_seg_vol = 0
self.fuse_cabin_vol = [] #cabin
self.fuse_fuel_vol = []
self.fuse_vol = []
self.f_seg_sec = 0
#=============================================================================
# MAIN
#=============================================================================
if __name__ == '__main__':
log.warning('##########################################################')
log.warning('############# ERROR NOT A STANDALONE PROGRAM #############')
log.warning('##########################################################')
|
import powerbalance as p
# ะะผะฟะพัั ะฝะฐัะตะณะพ ะถะต ะผะพะดัะปั
# ะะพะฝััะฐะฝัั ะธั
ะฟัะฐะฒะธะป
buyCost = 5
buyCostFast = 10
sellCost = 2
sellCostFast = 1
#ะััะธัะปะตะฝะธะต ััะพะธะผะพััะธ ะพะดะฝะพะณะพ ะธัั
ะพะดะฐ ัะฝะตัะณะพะฑะฐะปะฐะฝัะฐ
def makeCost(value,adjust):
result = 0
if adjust > 0:
result -= adjust * buyCost
else:
result += adjust * sellCost
diff = value + adjust
if diff < 0:
result += diff * sellCostFast
else:
result -= diff * buyCostFast
return result
#ะััะธัะปะตะฝะธะต ัะฐัะฟัะตะดะตะปะตะฝะธั ะฒะตัะพััะฝะพััะตะน ัะฐัั
ะพะดะพะฒ/ะฟัะธะฑัะปะตะน
def costBalance(power,adjust):
return [ (makeCost(v,adjust),p) for (v,p) in power]
#ะัะพััะพะน ะธ ะณะปัะฟัะน ะฐะปะณะพัะธัะผ ะถะฐะดะฝะพะณะพ ัะฟััะบะฐ
def greed(a,b,c,x):
if b < a and b < c:
return 0
if b > a:
return -x
if b < c:
return x
else:
return -x
#ะะฐั
ะพะดะธะผ ะบะฐะบะพะน-ัะพ ะธะท ะผะธะฝะธะผัะผะพะฒ ะบะพััะตะบัะธะธ ะฑะฐะปะฐะฝัะฐ ัะฝะตัะณะพัะธััะตะผั
def greedilyFindAdjust(net):
adjStep = 0.1
adj = 0
power = p.powerBalance(p.network)
print('Preparations are complete')
while True:
g = greed(costBalance(power,adj-adjStep),costBalance(power,adj),
costBalance(power,adj+adjStep),adjStep)
if g == 0:
return adj
else:
print(adj)
adj += g
#ะะฐะฟะตัะฐัะฐัั ัะตะทัะปััะฐั
print(greedilyFindAdjust(p.network))
|
#! /usr/bin/env python
import bluetooth
import time
import os
import json
import threading
from threading import Thread
from DatabaseManager import DatabaseManager
from JSONParser import JSONParser
from DetectorsFileParser import DetectorsFileParser
class BluetoothReceiver():
detectors = []
# Method receive all data from socket
def receiveAll(self, socket, size):
data = ""
while (len(data) < size):
packet = socket.recv(size - len(data))
if not packet:
return None
data += packet
return data
def connect(self, detector):
try:
socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
socket.connect((detector.address, detector.port))
print detector.address + " - Connected"
self.startReceiving(socket, detector)
except Exception as e:
print detector.address + " - Failed to connect"
print e.args[0]
time.sleep(80)
print detector.address + " - Reconnecting..."
self.connect(detector)
def startReceiving(self, socket, detector):
data = ""
while 1:
try:
data = self.receiveAll(socket, 72)
time.sleep(0.5)
print detector.address + " - " + data
jsonObject = json.loads(data.replace("'", '"'))
jsonObject["detectorId"] = detector.id
measurement = JSONParser().decodeMeasurement(jsonObject)
DatabaseManager().saveMeasurement(measurement)
except Exception as e:
print e.args[0]
socket.close()
print detector.address + " - Socket closed"
print detector.address + " - Reconnecting..."
self.connect(detector)
def __init__(self):
self.detectors = DetectorsFileParser.parseFromFile("Detectors.json")
DatabaseManager().saveDetectors(self.detectors)
for detector in DatabaseManager().getDetectors():
Thread(target=self.connect, args=[detector]).start()
if __name__ == '__main__':
BluetoothReceiver()
|
# Time: O(n)
# Space: O(1)
# Given a singly linked list, return a random node's value from the linked list.
# Each node must have the same probability of being chosen.
#
# Follow up:
# What if the linked list is extremely large and its length is unknown to you?
# Could you solve this efficiently without using extra space?
#
# Example:
#
# // Init a singly linked list [1,2,3].
# ListNode head = new ListNode(1);
# head.next = new ListNode(2);
# head.next.next = new ListNode(3);
# Solution solution = new Solution(head);
#
# // getRandom() should return either 1, 2, or 3 randomly.
# Each element should have equal probability of returning.
# solution.getRandom();
from random import randint
class Solution(object):
def __init__(self, head):
"""
@param head The linked list's head. Note that the head is guanranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
self.__head = head
# Proof of Reservoir Sampling:
# https://discuss.leetcode.com/topic/53753/brief-explanation-for-reservoir-sampling
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
reservoir = -1
curr, n = self.__head, 0
while curr:
reservoir = curr.val if randint(1, n+1) == 1 else reservoir
curr, n = curr.next, n+1
return reservoir
# Your Solution object will be instantiated and called as such:
# obj = Solution(head)
# param_1 = obj.getRandom()
|
import math
import hashlib
import gym
from enum import IntEnum
import numpy as np
from gym import error, spaces, utils
from gym.utils import seeding
from .rendering import *
from copy import deepcopy
# Size in pixels of a tile in the full-scale human view
TILE_PIXELS = 32
# Map of color names to RGB values
COLORS = {
'red' : np.array([255, 0, 0]),
'green' : np.array([0, 255, 0]),
'blue' : np.array([0, 0, 255]),
'purple': np.array([112, 39, 195]),
'yellow': np.array([255, 255, 0]),
'grey' : np.array([100, 100, 100])
}
COLOR_NAMES = sorted(list(COLORS.keys()))
# Used to map colors to integers
COLOR_TO_IDX = {
'red' : 0,
'green' : 1,
'blue' : 2,
'purple': 3,
'yellow': 4,
'grey' : 5
}
IDX_TO_COLOR = dict(zip(COLOR_TO_IDX.values(), COLOR_TO_IDX.keys()))
# Map of object type to integers
OBJECT_TO_IDX = {
'unseen' : 0,
'empty' : 1,
'wall' : 2,
'floor' : 3,
'door' : 4,
'key' : 5,
'ball' : 6,
'box' : 7,
'goal' : 8,
'lava' : 9,
'agent' : 10,
}
IDX_TO_OBJECT = dict(zip(OBJECT_TO_IDX.values(), OBJECT_TO_IDX.keys()))
# Map of state names to integers
STATE_TO_IDX = {
'open' : 0,
'closed': 1,
'locked': 2,
}
# Map of agent direction indices to vectors
DIR_TO_VEC = [
# Pointing right (positive X)
np.array((1, 0)),
# Down (positive Y)
np.array((0, 1)),
# Pointing left (negative X)
np.array((-1, 0)),
# Up (negative Y)
np.array((0, -1)),
]
class WorldObj:
"""
Base class for grid world objects
"""
def __init__(self, type, color):
assert type in OBJECT_TO_IDX, type
assert color in COLOR_TO_IDX, color
self.type = type
self.color = color
self.contains = None
# Initial position of the object
self.init_pos = None
# Current position of the object
self.cur_pos = None
def can_overlap(self):
"""Can the agent overlap with this?"""
return False
def can_pickup(self):
"""Can the agent pick this up?"""
return False
def ma_can_pickup(self, agent_id):
"""Can an agent pick this up in a multi-agent env?"""
return False
def can_contain(self):
"""Can this contain another object?"""
return False
def see_behind(self):
"""Can the agent see behind this object?"""
return True
def toggle(self, env, pos):
"""Method to trigger/toggle an action this object performs"""
return False
def ma_toggle(self, env, agent_id, pos):
"""Method to trigger/toggle an action this object performs in a multi-agent env"""
return False
def ma_check_toggle(self, env, agent_id, pos):
"""Method to check if trigger/toggle action is allowed on this object in a multi-agent env"""
return False
def encode(self):
"""Encode the a description of this object as a 3-tuple of integers"""
return (OBJECT_TO_IDX[self.type], COLOR_TO_IDX[self.color], 0)
@staticmethod
def decode(type_idx, color_idx, state):
"""Create an object from a 3-tuple state description"""
obj_type = IDX_TO_OBJECT[type_idx]
color = IDX_TO_COLOR[color_idx]
if obj_type == 'empty' or obj_type == 'unseen':
return None
# State, 0: open, 1: closed, 2: locked
is_open = state == 0
is_locked = state == 2
if obj_type == 'wall':
v = Wall(color)
elif obj_type == 'floor':
v = Floor(color)
elif obj_type == 'ball':
v = Ball(color)
elif obj_type == 'key':
v = Key(color)
elif obj_type == 'box':
v = Box(color)
elif obj_type == 'door':
v = Door(color, is_open, is_locked)
elif obj_type == 'goal':
v = Goal()
elif obj_type == 'lava':
v = Lava()
else:
assert False, "unknown object type in decode '%s'" % obj_type
return v
def render(self, r):
"""Draw this object with the given renderer"""
raise NotImplementedError
class Goal(WorldObj):
def __init__(self):
super().__init__('goal', 'green')
def can_overlap(self):
return True
def render(self, img):
fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])
class Floor(WorldObj):
"""
Colored floor tile the agent can walk over
"""
def __init__(self, color='blue'):
super().__init__('floor', color)
def can_overlap(self):
return True
def render(self, img):
# Give the floor a pale color
color = COLORS[self.color] / 2
fill_coords(img, point_in_rect(0.031, 1, 0.031, 1), color)
class Lava(WorldObj):
def __init__(self):
super().__init__('lava', 'red')
def can_overlap(self):
return True
def render(self, img):
c = (255, 128, 0)
# Background color
fill_coords(img, point_in_rect(0, 1, 0, 1), c)
# Little waves
for i in range(3):
ylo = 0.3 + 0.2 * i
yhi = 0.4 + 0.2 * i
fill_coords(img, point_in_line(0.1, ylo, 0.3, yhi, r=0.03), (0,0,0))
fill_coords(img, point_in_line(0.3, yhi, 0.5, ylo, r=0.03), (0,0,0))
fill_coords(img, point_in_line(0.5, ylo, 0.7, yhi, r=0.03), (0,0,0))
fill_coords(img, point_in_line(0.7, yhi, 0.9, ylo, r=0.03), (0,0,0))
class Wall(WorldObj):
def __init__(self, color='grey'):
super().__init__('wall', color)
def see_behind(self):
return False
def render(self, img):
fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])
class Door(WorldObj):
def __init__(self, color, is_open=False, is_locked=False):
super().__init__('door', color)
self.is_open = is_open
self.is_locked = is_locked
def can_overlap(self):
"""The agent can only walk over this cell when the door is open"""
return self.is_open
def see_behind(self):
return self.is_open
def toggle(self, env, pos):
# If the player has the right key to open the door
if self.is_locked:
if isinstance(env.carrying, Key) and env.carrying.color == self.color:
self.is_locked = False
self.is_open = True
return True
return False
self.is_open = not self.is_open
return True
def ma_toggle(self, env, agent_id, pos):
# If the player has the right key to open the door in a multi-agent setting
if self.is_locked:
if isinstance(env.carrying_objects[agent_id], Key) and env.carrying_objects[agent_id].color == self.color:
self.is_locked = False
self.is_open = True
return True
return False
self.is_open = not self.is_open
return True
def ma_check_toggle(self, env, agent_id, pos):
# If the player has the right key to open the door in a multi-agent setting
if self.is_locked:
if isinstance(env.carrying_objects[agent_id], Key) and env.carrying_objects[agent_id].color == self.color:
return True
return False
return True
def encode(self):
"""Encode the a description of this object as a 3-tuple of integers"""
# State, 0: open, 1: closed, 2: locked
if self.is_open:
state = 0
elif self.is_locked:
state = 2
elif not self.is_open:
state = 1
return (OBJECT_TO_IDX[self.type], COLOR_TO_IDX[self.color], state)
def render(self, img):
c = COLORS[self.color]
if self.is_open:
fill_coords(img, point_in_rect(0.88, 1.00, 0.00, 1.00), c)
fill_coords(img, point_in_rect(0.92, 0.96, 0.04, 0.96), (0,0,0))
return
# Door frame and door
if self.is_locked:
fill_coords(img, point_in_rect(0.00, 1.00, 0.00, 1.00), c)
fill_coords(img, point_in_rect(0.06, 0.94, 0.06, 0.94), 0.45 * np.array(c))
# Draw key slot
fill_coords(img, point_in_rect(0.52, 0.75, 0.50, 0.56), c)
else:
fill_coords(img, point_in_rect(0.00, 1.00, 0.00, 1.00), c)
fill_coords(img, point_in_rect(0.04, 0.96, 0.04, 0.96), (0,0,0))
fill_coords(img, point_in_rect(0.08, 0.92, 0.08, 0.92), c)
fill_coords(img, point_in_rect(0.12, 0.88, 0.12, 0.88), (0,0,0))
# Draw door handle
fill_coords(img, point_in_circle(cx=0.75, cy=0.50, r=0.08), c)
class Key(WorldObj):
def __init__(self, color='blue'):
super(Key, self).__init__('key', color)
def can_pickup(self):
return True
def ma_can_pickup(self, agent_id):
agent_color = IDX_TO_COLOR[agent_id % len(COLOR_NAMES)]
return True if agent_color == self.color else False
def render(self, img):
c = COLORS[self.color]
# Vertical quad
fill_coords(img, point_in_rect(0.50, 0.63, 0.31, 0.88), c)
# Teeth
fill_coords(img, point_in_rect(0.38, 0.50, 0.59, 0.66), c)
fill_coords(img, point_in_rect(0.38, 0.50, 0.81, 0.88), c)
# Ring
fill_coords(img, point_in_circle(cx=0.56, cy=0.28, r=0.190), c)
fill_coords(img, point_in_circle(cx=0.56, cy=0.28, r=0.064), (0,0,0))
class Ball(WorldObj):
def __init__(self, color='blue'):
super(Ball, self).__init__('ball', color)
def can_pickup(self):
return True
def ma_can_pickup(self, agent_id):
agent_color = IDX_TO_COLOR[agent_id % len(COLOR_NAMES)]
return True if agent_color == self.color else False
def render(self, img):
fill_coords(img, point_in_circle(0.5, 0.5, 0.31), COLORS[self.color])
class Box(WorldObj):
def __init__(self, color, contains=None):
super(Box, self).__init__('box', color)
self.contains = contains
def can_pickup(self):
return True
def ma_can_pickup(self, agent_id):
agent_color = IDX_TO_COLOR[agent_id % len(COLOR_NAMES)]
return True if agent_color == self.color else False
def render(self, img):
c = COLORS[self.color]
# Outline
fill_coords(img, point_in_rect(0.12, 0.88, 0.12, 0.88), c)
fill_coords(img, point_in_rect(0.18, 0.82, 0.18, 0.82), (0,0,0))
# Horizontal slit
fill_coords(img, point_in_rect(0.16, 0.84, 0.47, 0.53), c)
def toggle(self, env, pos):
# Replace the box by its contents
env.grid.set(*pos, self.contains)
return True
class Grid:
"""
Represent a grid and operations on it
"""
# Static cache of pre-renderer tiles
tile_cache = {}
def __init__(self, width, height):
# assert width >= 3
# assert height >= 3
self.width = width
self.height = height
self.grid = [None] * width * height
def __contains__(self, key):
if isinstance(key, WorldObj):
for e in self.grid:
if e is key:
return True
elif isinstance(key, tuple):
for e in self.grid:
if e is None:
continue
if (e.color, e.type) == key:
return True
if key[0] is None and key[1] == e.type:
return True
return False
def __eq__(self, other):
grid1 = self.encode()
grid2 = other.encode()
return np.array_equal(grid2, grid1)
def __ne__(self, other):
return not self == other
def copy(self):
from copy import deepcopy
return deepcopy(self)
def set(self, i, j, v):
assert i >= 0 and i < self.width
assert j >= 0 and j < self.height
self.grid[j * self.width + i] = v
def get(self, i, j):
assert i >= 0 and i < self.width
assert j >= 0 and j < self.height
return self.grid[j * self.width + i]
def horz_wall(self, x, y, length=None, obj_type=Wall):
if length is None:
length = self.width - x
for i in range(0, length):
self.set(x + i, y, obj_type())
def vert_wall(self, x, y, length=None, obj_type=Wall):
if length is None:
length = self.height - y
for j in range(0, length):
self.set(x, y + j, obj_type())
def wall_rect(self, x, y, w, h):
self.horz_wall(x, y, w)
self.horz_wall(x, y+h-1, w)
self.vert_wall(x, y, h)
self.vert_wall(x+w-1, y, h)
def rotate_left(self):
"""
Rotate the grid to the left (counter-clockwise)
"""
grid = Grid(self.height, self.width)
for i in range(self.width):
for j in range(self.height):
v = self.get(i, j)
grid.set(j, grid.height - 1 - i, v)
return grid
def slice(self, topX, topY, width, height):
"""
Get a subset of the grid
"""
grid = Grid(width, height)
for j in range(0, height):
for i in range(0, width):
x = topX + i
y = topY + j
if x >= 0 and x < self.width and \
y >= 0 and y < self.height:
v = self.get(x, y)
else:
v = Wall()
grid.set(i, j, v)
return grid
@classmethod
def render_tile(
cls,
obj,
agent_dir=None,
highlight=False,
tile_size=TILE_PIXELS,
subdivs=3
):
"""
Render a tile and cache the result
"""
# Hash map lookup key for the cache
key = (agent_dir, highlight, tile_size)
key = obj.encode() + key if obj else key
if key in cls.tile_cache:
return cls.tile_cache[key]
img = np.zeros(shape=(tile_size * subdivs, tile_size * subdivs, 3), dtype=np.uint8)
# Draw the grid lines (top and left edges)
fill_coords(img, point_in_rect(0, 0.031, 0, 1), (100, 100, 100))
fill_coords(img, point_in_rect(0, 1, 0, 0.031), (100, 100, 100))
if obj != None:
obj.render(img)
# Overlay the agent on top
if agent_dir is not None:
tri_fn = point_in_triangle(
(0.12, 0.19),
(0.87, 0.50),
(0.12, 0.81),
)
# Rotate the agent based on its direction
tri_fn = rotate_fn(tri_fn, cx=0.5, cy=0.5, theta=0.5*math.pi*agent_dir)
fill_coords(img, tri_fn, (255, 0, 0))
# Highlight the cell if needed
if highlight:
highlight_img(img)
# Downsample the image to perform supersampling/anti-aliasing
img = downsample(img, subdivs)
# Cache the rendered tile
cls.tile_cache[key] = img
return img
def render(
self,
tile_size,
agent_pos=None,
agent_dir=None,
highlight_mask=None
):
"""
Render this grid at a given scale
:param r: target renderer object
:param tile_size: tile size in pixels
"""
if highlight_mask is None:
highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)
# Compute the total grid size
width_px = self.width * tile_size
height_px = self.height * tile_size
img = np.zeros(shape=(height_px, width_px, 3), dtype=np.uint8)
# Render the grid
for j in range(0, self.height):
for i in range(0, self.width):
cell = self.get(i, j)
agent_here = np.array_equal(agent_pos, (i, j))
tile_img = Grid.render_tile(
cell,
agent_dir=agent_dir if agent_here else None,
highlight=highlight_mask[i, j],
tile_size=tile_size
)
ymin = j * tile_size
ymax = (j+1) * tile_size
xmin = i * tile_size
xmax = (i+1) * tile_size
img[ymin:ymax, xmin:xmax, :] = tile_img
return img
@classmethod
def ma_render_tile(
cls,
obj,
agent_id=None,
agent_dir=None,
num_agents=None,
highlight=False,
tile_size=TILE_PIXELS,
subdivs=3
):
"""
Render a tile and cache the result for a multi-agent environment
"""
# Hash map lookup key for the cache
key = (agent_dir, highlight, tile_size, agent_id)
key = obj.encode() + key if obj else key
if key in cls.tile_cache:
return cls.tile_cache[key]
img = np.zeros(shape=(tile_size * subdivs, tile_size * subdivs, 3), dtype=np.uint8)
# Draw the grid lines (top and left edges)
fill_coords(img, point_in_rect(0, 0.031, 0, 1), (100, 100, 100))
fill_coords(img, point_in_rect(0, 1, 0, 0.031), (100, 100, 100))
if obj != None:
obj.render(img)
# Overlay an agent on top
if agent_dir is not None and num_agents is not None and agent_id is not None:
tri_fn = point_in_triangle(
(0.12, 0.19),
(0.87, 0.50),
(0.12, 0.81),
)
# Rotate the agent based on its direction
tri_fn = rotate_fn(tri_fn, cx=0.5, cy=0.5, theta=0.5*math.pi*agent_dir)
fill_coords(img, tri_fn, tuple(COLORS[IDX_TO_COLOR[agent_id % len(COLOR_NAMES)]]))
# Highlight the cell if needed
if highlight:
highlight_img(img)
# Downsample the image to perform supersampling/anti-aliasing
img = downsample(img, subdivs)
# Cache the rendered tile
cls.tile_cache[key] = img
return img
def ma_render(
self,
tile_size,
agent_poses=None,
agent_dirs=None,
highlight_mask=None
):
"""
Render this grid at a given scale
:param r: target renderer object
:param tile_size: tile size in pixels
"""
if highlight_mask is None:
highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)
# Compute the total grid size
width_px = self.width * tile_size
height_px = self.height * tile_size
img = np.zeros(shape=(height_px, width_px, 3), dtype=np.uint8)
# Render the grid
for j in range(0, self.height):
for i in range(0, self.width):
cell = self.get(i, j)
agent_here = False
agent_index = None
if agent_poses is not None:
for agent_index, p in enumerate(agent_poses):
if np.all(np.equal(p, np.array((i, j)))):
agent_here = True
break
tile_img = Grid.ma_render_tile(
cell,
agent_id=agent_index if agent_here else None,
agent_dir=agent_dirs[agent_index] if agent_here else None,
num_agents=len(agent_poses) if agent_here else None,
highlight=highlight_mask[i, j],
tile_size=tile_size
)
ymin = j * tile_size
ymax = (j+1) * tile_size
xmin = i * tile_size
xmax = (i+1) * tile_size
img[ymin:ymax, xmin:xmax, :] = tile_img
return img
def encode(self, vis_mask=None):
"""
Produce a compact numpy encoding of the grid
"""
if vis_mask is None:
vis_mask = np.ones((self.width, self.height), dtype=bool)
array = np.zeros((self.width, self.height, 3), dtype='uint8')
for i in range(self.width):
for j in range(self.height):
if vis_mask[i, j]:
v = self.get(i, j)
if v is None:
array[i, j, 0] = OBJECT_TO_IDX['empty']
array[i, j, 1] = 0
array[i, j, 2] = 0
else:
array[i, j, :] = v.encode()
return array
def ma_encode(self, vis_mask=None, agent_poses=None):
"""
Produce a compact numpy encoding of the grid for multiagent setting
"""
if vis_mask is None:
vis_mask = np.ones((self.width, self.height), dtype=bool)
array = np.zeros((self.width, self.height, 3), dtype='uint8')
for i in range(self.width):
for j in range(self.height):
if vis_mask[i, j]:
v = self.get(i, j)
if v is None:
if agent_poses != None and any((np.array((i, j)) == x[0]).all() for x in agent_poses):
found = False
for agent_id, agent_pos in enumerate(agent_poses):
if (np.array((i, j)) == agent_pos[0]).all():
found = True
break
if found:
array[i, j, 0] = OBJECT_TO_IDX['agent']
array[i, j, 1] = agent_poses[agent_id][1] % len(COLOR_NAMES)
array[i, j, 2] = agent_poses[agent_id][2]
else:
array[i, j, 0] = OBJECT_TO_IDX['empty']
array[i, j, 1] = 0
array[i, j, 2] = 0
else:
array[i, j, :] = v.encode()
return array
@staticmethod
def decode(array):
"""
Decode an array grid encoding back into a grid
"""
width, height, channels = array.shape
assert channels == 3
vis_mask = np.ones(shape=(width, height), dtype=np.bool)
grid = Grid(width, height)
for i in range(width):
for j in range(height):
type_idx, color_idx, state = array[i, j]
v = WorldObj.decode(type_idx, color_idx, state)
grid.set(i, j, v)
vis_mask[i, j] = (type_idx != OBJECT_TO_IDX['unseen'])
return grid, vis_mask
def process_vis(grid, agent_pos):
mask = np.zeros(shape=(grid.width, grid.height), dtype=np.bool)
mask[agent_pos[0], agent_pos[1]] = True
for j in reversed(range(0, grid.height)):
for i in range(0, grid.width-1):
if not mask[i, j]:
continue
cell = grid.get(i, j)
if cell and not cell.see_behind():
continue
mask[i+1, j] = True
if j > 0:
mask[i+1, j-1] = True
mask[i, j-1] = True
for i in reversed(range(1, grid.width)):
if not mask[i, j]:
continue
cell = grid.get(i, j)
if cell and not cell.see_behind():
continue
mask[i-1, j] = True
if j > 0:
mask[i-1, j-1] = True
mask[i, j-1] = True
for j in range(0, grid.height):
for i in range(0, grid.width):
if not mask[i, j]:
grid.set(i, j, None)
return mask
class MiniGridEnv(gym.Env):
"""
2D grid world game environment
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 10
}
# Enumeration of possible actions
class Actions(IntEnum):
# Turn left, turn right, move forward
left = 0
right = 1
forward = 2
# Pick up an object
pickup = 3
# Drop an object
drop = 4
# Toggle/activate an object
toggle = 5
# Done completing task
done = 6
def __init__(
self,
grid_size=None,
width=None,
height=None,
max_steps=100,
see_through_walls=False,
seed=1337,
agent_view_size=7
):
# Can't set both grid_size and width/height
if grid_size:
assert width == None and height == None
width = grid_size
height = grid_size
# Action enumeration for this environment
self.actions = MiniGridEnv.Actions
# Actions are discrete integer values
self.action_space = spaces.Discrete(len(self.actions))
# Number of cells (width and height) in the agent view
assert agent_view_size % 2 == 1
assert agent_view_size >= 3
self.agent_view_size = agent_view_size
# Observations are dictionaries containing an
# encoding of the grid and a textual 'mission' string
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(self.agent_view_size, self.agent_view_size, 3),
dtype='uint8'
)
self.observation_space = spaces.Dict({
'image': self.observation_space
})
# Range of possible rewards
self.reward_range = (0, 1)
# Window to use for human rendering mode
self.window = None
# Environment configuration
self.width = width
self.height = height
self.max_steps = max_steps
self.see_through_walls = see_through_walls
# Current position and direction of the agent
self.agent_pos = None
self.agent_dir = None
# Initialize the RNG
self.seed(seed=seed)
# Initialize the state
self.reset()
def reset(self):
# Current position and direction of the agent
self.agent_pos = None
self.agent_dir = None
# Generate a new random grid at the start of each episode
# To keep the same grid for each episode, call env.seed() with
# the same seed before calling env.reset()
self._gen_grid(self.width, self.height)
# These fields should be defined by _gen_grid
assert self.agent_pos is not None
assert self.agent_dir is not None
# Check that the agent doesn't overlap with an object
start_cell = self.grid.get(*self.agent_pos)
assert start_cell is None or start_cell.can_overlap()
# Item picked up, being carried, initially nothing
self.carrying = None
# Step count since episode start
self.step_count = 0
# Return first observation
obs = self.gen_obs()
return obs
def seed(self, seed=1337):
# Seed the random number generator
self.np_random, _ = seeding.np_random(seed)
return [seed]
def hash(self, size=16):
"""Compute a hash that uniquely identifies the current state of the environment.
:param size: Size of the hashing
"""
sample_hash = hashlib.sha256()
to_encode = [self.grid.encode(), self.agent_pos, self.agent_dir]
for item in to_encode:
sample_hash.update(str(item).encode('utf8'))
return sample_hash.hexdigest()[:size]
@property
def steps_remaining(self):
return self.max_steps - self.step_count
def __str__(self):
"""
Produce a pretty string of the environment's grid along with the agent.
A grid cell is represented by 2-character string, the first one for
the object and the second one for the color.
"""
# Map of object types to short string
OBJECT_TO_STR = {
'wall' : 'W',
'floor' : 'F',
'door' : 'D',
'key' : 'K',
'ball' : 'A',
'box' : 'B',
'goal' : 'G',
'lava' : 'V',
}
# Short string for opened door
OPENDED_DOOR_IDS = '_'
# Map agent's direction to short string
AGENT_DIR_TO_STR = {
0: '>',
1: 'V',
2: '<',
3: '^'
}
str = ''
for j in range(self.grid.height):
for i in range(self.grid.width):
if i == self.agent_pos[0] and j == self.agent_pos[1]:
str += 2 * AGENT_DIR_TO_STR[self.agent_dir]
continue
c = self.grid.get(i, j)
if c == None:
str += ' '
continue
if c.type == 'door':
if c.is_open:
str += '__'
elif c.is_locked:
str += 'L' + c.color[0].upper()
else:
str += 'D' + c.color[0].upper()
continue
str += OBJECT_TO_STR[c.type] + c.color[0].upper()
if j < self.grid.height - 1:
str += '\n'
return str
def _gen_grid(self, width, height):
assert False, "_gen_grid needs to be implemented by each environment"
def _reward(self):
"""
Compute the reward to be given upon success
"""
return 1 - 0.9 * (self.step_count / self.max_steps)
def _rand_int(self, low, high):
"""
Generate random integer in [low,high[
"""
return self.np_random.randint(low, high)
def _rand_float(self, low, high):
"""
Generate random float in [low,high[
"""
return self.np_random.uniform(low, high)
def _rand_bool(self):
"""
Generate random boolean value
"""
return (self.np_random.randint(0, 2) == 0)
def _rand_elem(self, iterable):
"""
Pick a random element in a list
"""
lst = list(iterable)
idx = self._rand_int(0, len(lst))
return lst[idx]
def _rand_subset(self, iterable, num_elems):
"""
Sample a random subset of distinct elements of a list
"""
lst = list(iterable)
assert num_elems <= len(lst)
out = []
while len(out) < num_elems:
elem = self._rand_elem(lst)
lst.remove(elem)
out.append(elem)
return out
def _rand_color(self):
"""
Generate a random color name (string)
"""
return self._rand_elem(COLOR_NAMES)
def _rand_pos(self, xLow, xHigh, yLow, yHigh):
"""
Generate a random (x,y) position tuple
"""
return (
self.np_random.randint(xLow, xHigh),
self.np_random.randint(yLow, yHigh)
)
def place_obj(self,
obj,
top=None,
size=None,
reject_fn=None,
max_tries=math.inf
):
"""
Place an object at an empty position in the grid
:param top: top-left position of the rectangle where to place
:param size: size of the rectangle where to place
:param reject_fn: function to filter out potential positions
"""
if top is None:
top = (0, 0)
else:
top = (max(top[0], 0), max(top[1], 0))
if size is None:
size = (self.grid.width, self.grid.height)
num_tries = 0
while True:
# This is to handle with rare cases where rejection sampling
# gets stuck in an infinite loop
if num_tries > max_tries:
raise RecursionError('rejection sampling failed in place_obj')
num_tries += 1
pos = np.array((
self._rand_int(top[0], min(top[0] + size[0], self.grid.width)),
self._rand_int(top[1], min(top[1] + size[1], self.grid.height))
))
# Don't place the object on top of another object
if self.grid.get(*pos) != None:
continue
# Don't place the object where the agent is
if np.array_equal(pos, self.agent_pos):
continue
# Check if there is a filtering criterion
if reject_fn and reject_fn(self, pos):
continue
break
self.grid.set(*pos, obj)
if obj is not None:
obj.init_pos = pos
obj.cur_pos = pos
return pos
def put_obj(self, obj, i, j):
"""
Put an object at a specific position in the grid
"""
self.grid.set(i, j, obj)
obj.init_pos = (i, j)
obj.cur_pos = (i, j)
def place_agent(
self,
top=None,
size=None,
rand_dir=True,
max_tries=math.inf
):
"""
Set the agent's starting point at an empty position in the grid
"""
self.agent_pos = None
pos = self.place_obj(None, top, size, max_tries=max_tries)
self.agent_pos = pos
if rand_dir:
self.agent_dir = self._rand_int(0, 4)
return pos
@property
def dir_vec(self):
"""
Get the direction vector for the agent, pointing in the direction
of forward movement.
"""
assert self.agent_dir >= 0 and self.agent_dir < 4
return DIR_TO_VEC[self.agent_dir]
@property
def right_vec(self):
"""
Get the vector pointing to the right of the agent.
"""
dx, dy = self.dir_vec
return np.array((-dy, dx))
@property
def front_pos(self):
"""
Get the position of the cell that is right in front of the agent
"""
return self.agent_pos + self.dir_vec
def get_view_coords(self, i, j):
"""
Translate and rotate absolute grid coordinates (i, j) into the
agent's partially observable view (sub-grid). Note that the resulting
coordinates may be negative or outside of the agent's view size.
"""
ax, ay = self.agent_pos
dx, dy = self.dir_vec
rx, ry = self.right_vec
# Compute the absolute coordinates of the top-left view corner
sz = self.agent_view_size
hs = self.agent_view_size // 2
tx = ax + (dx * (sz-1)) - (rx * hs)
ty = ay + (dy * (sz-1)) - (ry * hs)
lx = i - tx
ly = j - ty
# Project the coordinates of the object relative to the top-left
# corner onto the agent's own coordinate system
vx = (rx*lx + ry*ly)
vy = -(dx*lx + dy*ly)
return vx, vy
def get_view_exts(self):
"""
Get the extents of the square set of tiles visible to the agent
Note: the bottom extent indices are not included in the set
"""
# Facing right
if self.agent_dir == 0:
topX = self.agent_pos[0]
topY = self.agent_pos[1] - self.agent_view_size // 2
# Facing down
elif self.agent_dir == 1:
topX = self.agent_pos[0] - self.agent_view_size // 2
topY = self.agent_pos[1]
# Facing left
elif self.agent_dir == 2:
topX = self.agent_pos[0] - self.agent_view_size + 1
topY = self.agent_pos[1] - self.agent_view_size // 2
# Facing up
elif self.agent_dir == 3:
topX = self.agent_pos[0] - self.agent_view_size // 2
topY = self.agent_pos[1] - self.agent_view_size + 1
else:
assert False, "invalid agent direction"
botX = topX + self.agent_view_size
botY = topY + self.agent_view_size
return (topX, topY, botX, botY)
def relative_coords(self, x, y):
"""
Check if a grid position belongs to the agent's field of view, and returns the corresponding coordinates
"""
vx, vy = self.get_view_coords(x, y)
if vx < 0 or vy < 0 or vx >= self.agent_view_size or vy >= self.agent_view_size:
return None
return vx, vy
def in_view(self, x, y):
"""
check if a grid position is visible to the agent
"""
return self.relative_coords(x, y) is not None
def agent_sees(self, x, y):
"""
Check if a non-empty grid position is visible to the agent
"""
coordinates = self.relative_coords(x, y)
if coordinates is None:
return False
vx, vy = coordinates
obs = self.gen_obs()
obs_grid, _ = Grid.decode(obs['image'])
obs_cell = obs_grid.get(vx, vy)
world_cell = self.grid.get(x, y)
return obs_cell is not None and obs_cell.type == world_cell.type
def step(self, action):
self.step_count += 1
reward = 0
done = False
# Get the position in front of the agent
fwd_pos = self.front_pos
# Get the contents of the cell in front of the agent
fwd_cell = self.grid.get(*fwd_pos)
# Rotate left
if action == self.actions.left:
self.agent_dir -= 1
if self.agent_dir < 0:
self.agent_dir += 4
# Rotate right
elif action == self.actions.right:
self.agent_dir = (self.agent_dir + 1) % 4
# Move forward
elif action == self.actions.forward:
if fwd_cell == None or fwd_cell.can_overlap():
self.agent_pos = fwd_pos
if fwd_cell != None and fwd_cell.type == 'goal':
done = True
reward = self._reward()
if fwd_cell != None and fwd_cell.type == 'lava':
done = True
# Pick up an object
elif action == self.actions.pickup:
if fwd_cell and fwd_cell.can_pickup():
if self.carrying is None:
self.carrying = fwd_cell
self.carrying.cur_pos = np.array([-1, -1])
self.grid.set(*fwd_pos, None)
# Drop an object
elif action == self.actions.drop:
if not fwd_cell and self.carrying:
self.grid.set(*fwd_pos, self.carrying)
self.carrying.cur_pos = fwd_pos
self.carrying = None
# Toggle/activate an object
elif action == self.actions.toggle:
if fwd_cell:
fwd_cell.toggle(self, fwd_pos)
# Done action (not used by default)
elif action == self.actions.done:
pass
else:
assert False, "unknown action"
if self.step_count >= self.max_steps:
done = True
obs = self.gen_obs()
return obs, reward, done, {}
def gen_obs_grid(self):
"""
Generate the sub-grid observed by the agent.
This method also outputs a visibility mask telling us which grid
cells the agent can actually see.
"""
topX, topY, botX, botY = self.get_view_exts()
grid = self.grid.slice(topX, topY, self.agent_view_size, self.agent_view_size)
for i in range(self.agent_dir + 1):
grid = grid.rotate_left()
# Process occluders and visibility
# Note that this incurs some performance cost
if not self.see_through_walls:
vis_mask = grid.process_vis(agent_pos=(self.agent_view_size // 2 , self.agent_view_size - 1))
else:
vis_mask = np.ones(shape=(grid.width, grid.height), dtype=np.bool)
# Make it so the agent sees what it's carrying
# We do this by placing the carried object at the agent's position
# in the agent's partially observable view
agent_pos = grid.width // 2, grid.height - 1
if self.carrying:
grid.set(*agent_pos, self.carrying)
else:
grid.set(*agent_pos, None)
return grid, vis_mask
def gen_obs(self):
"""
Generate the agent's view (partially observable, low-resolution encoding)
"""
grid, vis_mask = self.gen_obs_grid()
# Encode the partially observable view into a numpy array
image = grid.encode(vis_mask)
assert hasattr(self, 'mission'), "environments must define a textual mission string"
# Observations are dictionaries containing:
# - an image (partially observable view of the environment)
# - the agent's direction/orientation (acting as a compass)
# - a textual mission string (instructions for the agent)
obs = {
'image': image,
'direction': self.agent_dir,
'mission': self.mission
}
return obs
def get_obs_render(self, obs, tile_size=TILE_PIXELS//2):
"""
Render an agent observation for visualization
"""
grid, vis_mask = Grid.decode(obs)
# Render the whole grid
img = grid.render(
tile_size,
agent_pos=(self.agent_view_size // 2, self.agent_view_size - 1),
agent_dir=3,
highlight_mask=vis_mask
)
return img
def render(self, mode='human', close=False, highlight=True, tile_size=TILE_PIXELS):
"""
Render the whole-grid human view
"""
if close:
if self.window:
self.window.close()
return
if mode == 'human' and not self.window:
import gym_minigrid.window
self.window = gym_minigrid.window.Window('gym_minigrid')
self.window.show(block=False)
# Compute which cells are visible to the agent
_, vis_mask = self.gen_obs_grid()
# Compute the world coordinates of the bottom-left corner
# of the agent's view area
f_vec = self.dir_vec
r_vec = self.right_vec
top_left = self.agent_pos + f_vec * (self.agent_view_size-1) - r_vec * (self.agent_view_size // 2)
# Mask of which cells to highlight
highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)
# For each cell in the visibility mask
for vis_j in range(0, self.agent_view_size):
for vis_i in range(0, self.agent_view_size):
# If this cell is not visible, don't highlight it
if not vis_mask[vis_i, vis_j]:
continue
# Compute the world coordinates of this cell
abs_i, abs_j = top_left - (f_vec * vis_j) + (r_vec * vis_i)
if abs_i < 0 or abs_i >= self.width:
continue
if abs_j < 0 or abs_j >= self.height:
continue
# Mark this cell to be highlighted
highlight_mask[abs_i, abs_j] = True
# Render the whole grid
img = self.grid.render(
tile_size,
self.agent_pos,
self.agent_dir,
highlight_mask=highlight_mask if highlight else None
)
if mode == 'human':
self.window.show_img(img)
self.window.set_caption(self.mission)
return img
def close(self):
if self.window:
self.window.close()
return
class MultiAgentMiniGridEnv(gym.Env):
"""
2D grid world game environment with multi-agent support
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 10
}
# Enumeration of possible actions
class Actions(IntEnum):
# Turn left, turn right, move forward
left = 0
right = 1
forward = 2
# Pick up an object
pickup = 3
# Drop an object
drop = 4
# Toggle/activate an object
toggle = 5
# Done completing task
done = 6
def __init__(
self,
grid_size=None,
width=None,
height=None,
max_steps=100,
see_through_walls=False,
seed=1337,
agent_view_size=7
):
# Can't set both grid_size and width/height
if grid_size:
assert width == None and height == None
width = grid_size
height = grid_size
# Action enumeration for this environment
self.actions = MultiAgentMiniGridEnv.Actions
# Actions are discrete integer values
self.action_space = spaces.Discrete(len(self.actions))
# Number of cells (width and height) in the agent view
assert agent_view_size % 2 == 1
assert agent_view_size >= 3
self.agent_view_size = agent_view_size
# Observations are dictionaries containing an
# encoding of the grid and a textual 'mission' string
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(self.agent_view_size, self.agent_view_size, 3),
dtype='uint8'
)
self.observation_space = spaces.Dict({
'image': self.observation_space
})
# Range of possible rewards
self.reward_range = (0, 1)
# Window to use for human rendering mode
self.window = None
# Environment configuration
self.width = width
self.height = height
self.max_steps = max_steps
self.see_through_walls = see_through_walls
# Current positions and directions of the agents
self.agent_poses = []
self.agent_dirs = []
# Initialize the RNG
self.seed(seed=seed)
# Initialize the state
self.reset()
def reset(self):
# Current positions and directions of the agents
self.agent_poses = []
self.agent_dirs = []
# Generate a new random grid at the start of each episode
# To keep the same grid for each episode, call env.seed() with
# the same seed before calling env.reset()
self._gen_grid(self.width, self.height)
# These fields should be defined by _gen_grid
assert self.agent_poses
assert self.agent_dirs
# Check that the agent doesn't overlap with an object
for pos in self.agent_poses:
start_cell = self.grid.get(*pos)
assert start_cell is None or start_cell.can_overlap()
# Item picked up, being carried, initially nothing for all agents
self.carrying_objects = [None for i in self.agent_poses]
# Step count since episode start
self.step_count = 0
# Return first observation
obs = self.gen_obs()
return obs
def seed(self, seed=1337):
# Seed the random number generator
self.np_random, _ = seeding.np_random(seed)
return [seed]
def hash(self, size=16):
"""Compute a hash that uniquely identifies the current state of the environment.
:param size: Size of the hashing
"""
sample_hash = hashlib.sha256()
agent_poses = [(pos, agent_id, self.agent_dirs[agent_id]) for agent_id, pos in enumerate(self.agent_poses)]
to_encode = [self.grid.ma_encode(agent_poses=agent_poses), self.agent_poses, self.agent_dirs]
for item in to_encode:
sample_hash.update(str(item).encode('utf8'))
return sample_hash.hexdigest()[:size]
@property
def steps_remaining(self):
return self.max_steps - self.step_count
def __str__(self):
"""
Produce a pretty string of the environment's grid along with the agent.
A grid cell is represented by 2-character string, the first one for
the object and the second one for the color.
"""
# Map of object types to short string
OBJECT_TO_STR = {
'wall' : 'W',
'floor' : 'F',
'door' : 'D',
'key' : 'K',
'ball' : 'A',
'box' : 'B',
'goal' : 'G',
'lava' : 'V',
}
# Short string for opened door
OPENDED_DOOR_IDS = '_'
# Map agent's direction to short string
AGENT_DIR_TO_STR = {
0: '>',
1: 'V',
2: '<',
3: '^'
}
str = ''
for j in range(self.grid.height):
for i in range(self.grid.width):
if np.array((i, j)) in self.agent_poses:
str += 2 * AGENT_DIR_TO_STR[self.agent_dirs[self.agent_poses.index(np.array((i, j)))]]
continue
c = self.grid.get(i, j)
if c == None:
str += ' '
continue
if c.type == 'door':
if c.is_open:
str += '__'
elif c.is_locked:
str += 'L' + c.color[0].upper()
else:
str += 'D' + c.color[0].upper()
continue
str += OBJECT_TO_STR[c.type] + c.color[0].upper()
if j < self.grid.height - 1:
str += '\n'
return str
def _gen_grid(self, width, height):
assert False, "_gen_grid needs to be implemented by each environment"
def _reward(self):
"""
Compute the reward to be given upon success
"""
return 1 - 0.9 * (self.step_count / self.max_steps)
def _rand_int(self, low, high):
"""
Generate random integer in [low,high[
"""
return self.np_random.randint(low, high)
def _rand_float(self, low, high):
"""
Generate random float in [low,high[
"""
return self.np_random.uniform(low, high)
def _rand_bool(self):
"""
Generate random boolean value
"""
return (self.np_random.randint(0, 2) == 0)
def _rand_elem(self, iterable):
"""
Pick a random element in a list
"""
lst = list(iterable)
idx = self._rand_int(0, len(lst))
return lst[idx]
def _rand_subset(self, iterable, num_elems):
"""
Sample a random subset of distinct elements of a list
"""
lst = list(iterable)
assert num_elems <= len(lst)
out = []
while len(out) < num_elems:
elem = self._rand_elem(lst)
lst.remove(elem)
out.append(elem)
return out
def _rand_color(self):
"""
Generate a random color name (string)
"""
return self._rand_elem(COLOR_NAMES)
def _rand_pos(self, xLow, xHigh, yLow, yHigh):
"""
Generate a random (x,y) position tuple
"""
return (
self.np_random.randint(xLow, xHigh),
self.np_random.randint(yLow, yHigh)
)
def place_obj(self,
obj,
top=None,
size=None,
reject_fn=None,
max_tries=math.inf
):
"""
Place an object at an empty position in the grid
:param top: top-left position of the rectangle where to place
:param size: size of the rectangle where to place
:param reject_fn: function to filter out potential positions
"""
if top is None:
top = (0, 0)
else:
top = (max(top[0], 0), max(top[1], 0))
if size is None:
size = (self.grid.width, self.grid.height)
num_tries = 0
while True:
# This is to handle with rare cases where rejection sampling
# gets stuck in an infinite loop
if num_tries > max_tries:
raise RecursionError('rejection sampling failed in place_obj')
num_tries += 1
pos = np.array((
self._rand_int(top[0], min(top[0] + size[0], self.grid.width)),
self._rand_int(top[1], min(top[1] + size[1], self.grid.height))
))
# Don't place the object on top of another object
if self.grid.get(*pos) != None:
continue
conflict = False
for p in self.agent_poses:
if np.all(np.equal(p, pos)):
conflict = True
break
if conflict:
continue
# Check if there is a filtering criterion
if reject_fn and reject_fn(self, pos):
continue
break
self.grid.set(*pos, obj)
if obj is not None:
obj.init_pos = pos
obj.cur_pos = pos
return pos
def put_obj(self, obj, i, j):
"""
Put an object at a specific position in the grid
"""
self.grid.set(i, j, obj)
obj.init_pos = (i, j)
obj.cur_pos = (i, j)
def place_agent(
self,
top=None,
size=None,
rand_dir=True,
max_tries=math.inf
):
"""
Set an agent's starting point at an empty position in the grid
"""
pos = self.place_obj(None, top, size, max_tries=max_tries)
self.agent_poses.append(pos)
if rand_dir:
self.agent_dirs.append(self._rand_int(0, 4))
return pos
def dir_vec(self, agent_id):
"""
Get the direction vector for an agent, pointing in the direction
of forward movement.
"""
assert agent_id < len(self.agent_dirs) and self.agent_dirs[agent_id] >= 0 and self.agent_dirs[agent_id] < 4
return DIR_TO_VEC[self.agent_dirs[agent_id]]
def right_vec(self, agent_id):
"""
Get the vector pointing to the right of an agent.
"""
dx, dy = self.dir_vec(agent_id)
return np.array((-dy, dx))
def front_pos(self, agent_id):
"""
Get the position of the cell that is right in front of an agent
"""
return self.agent_poses[agent_id] + self.dir_vec(agent_id)
def get_view_coords(self, agent_id, i, j):
"""
Translate and rotate absolute grid coordinates (i, j) into an
agent's partially observable view (sub-grid). Note that the resulting
coordinates may be negative or outside of the agent's view size.
"""
ax, ay = self.agent_poses[agent_id]
dx, dy = self.dir_vec(agent_id)
rx, ry = self.right_vec(agent_id)
# Compute the absolute coordinates of the top-left view corner
sz = self.agent_view_size
hs = self.agent_view_size // 2
tx = ax + (dx * (sz-1)) - (rx * hs)
ty = ay + (dy * (sz-1)) - (ry * hs)
lx = i - tx
ly = j - ty
# Project the coordinates of the object relative to the top-left
# corner onto the agent's own coordinate system
vx = (rx*lx + ry*ly)
vy = -(dx*lx + dy*ly)
return vx, vy
def get_view_exts(self, agent_id):
"""
Get the extents of the square set of tiles visible to an agent
Note: the bottom extent indices are not included in the set
"""
# Facing right
if self.agent_dirs[agent_id] == 0:
topX = self.agent_poses[agent_id][0]
topY = self.agent_poses[agent_id][1] - self.agent_view_size // 2
# Facing down
elif self.agent_dirs[agent_id] == 1:
topX = self.agent_poses[agent_id][0] - self.agent_view_size // 2
topY = self.agent_poses[agent_id][1]
# Facing left
elif self.agent_dirs[agent_id] == 2:
topX = self.agent_poses[agent_id][0] - self.agent_view_size + 1
topY = self.agent_poses[agent_id][1] - self.agent_view_size // 2
# Facing up
elif self.agent_dirs[agent_id] == 3:
topX = self.agent_poses[agent_id][0] - self.agent_view_size // 2
topY = self.agent_poses[agent_id][1] - self.agent_view_size + 1
else:
assert False, "invalid agent direction"
botX = topX + self.agent_view_size
botY = topY + self.agent_view_size
return (topX, topY, botX, botY)
def relative_coords(self, agent_id, x, y):
"""
Check if a grid position belongs to an agent's field of view, and returns the corresponding coordinates
"""
vx, vy = self.get_view_coords(agent_id, x, y)
if vx < 0 or vy < 0 or vx >= self.agent_view_size or vy >= self.agent_view_size:
return None
return vx, vy
def in_view(self, agent_id, x, y):
"""
check if a grid position is visible to an agent
"""
return self.relative_coords(agent_id, x, y) is not None
def agent_sees(self, agent_id, x, y):
"""
Check if a non-empty grid position is visible to an agent
"""
coordinates = self.relative_coords(agent_id, x, y)
if coordinates is None:
return False
vx, vy = coordinates
obs = self.gen_obs()
obs_grid, _ = Grid.decode(obs['image'])
obs_cell = obs_grid.get(vx, vy)
world_cell = self.grid.get(x, y)
return obs_cell is not None and obs_cell.type == world_cell.type
def collision_checker(self, curr_poses, fwd_poses, fwd_cells, next_poses, drop_locations, pickup_locations, open_door_locations, close_door_locations, actions, agent_id):
"""
Check if action will be valid in current position
"""
# Unintruding action is always valid
if actions[agent_id] in [self.actions.left, self.actions.right, self.actions.done]:
return True
# Forward action
elif actions[agent_id] == self.actions.forward:
# World allows agent to move forward
if fwd_cells[agent_id] == None or fwd_cells[agent_id].can_overlap() or (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in pickup_locations and len(pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) == 1 or (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in open_door_locations and len(open_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) == 1:
# Other agents trying to access same location, so fail
if len(next_poses[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) > 1 or (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in drop_locations or (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in close_door_locations:
return False
# Other agent currently in spot, so have to recursively check if they will move
elif (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in curr_poses:
# Agents can't move forward when another agent is moving forward toward them in opposite directions
if (self.agent_poses[agent_id][0], self.agent_poses[agent_id][1]) in next_poses and curr_poses[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] in next_poses[(self.agent_poses[agent_id][0], self.agent_poses[agent_id][1])]:
return False
# Recursively check validity of move at new position
return self.collision_checker(curr_poses, fwd_poses, fwd_cells, next_poses, drop_locations, pickup_locations, open_door_locations, close_door_locations, actions, curr_poses[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])])
# Completely valid move forward
else:
return True
# Invalid attempt to move forward according to world
else:
return False
# Drop action
elif actions[agent_id] == self.actions.drop:
# World allows agent to drop item
if (not fwd_cells[agent_id] or (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in pickup_locations and len(pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) == 1) and self.carrying_objects[agent_id]:
# Other agents trying to access same location, so fail
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in next_poses or len(drop_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) > 1:
return False
# Other agent currently in spot, so have to recursively check if they will move
elif (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in curr_poses:
return self.collision_checker(curr_poses, fwd_poses, fwd_cells, next_poses, drop_locations, pickup_locations, open_door_locations, close_door_locations, actions, curr_poses[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])])
# Completely valid move to drop item
else:
return True
# Invalid attempt to drop item according to world
else:
return False
# Pickup action
elif actions[agent_id] == self.actions.pickup:
# Only one agent able to pickup item in world makes action valid
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in pickup_locations and agent_id in pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] and len(pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) == 1:
return True
# Invalid attempt to pickup item from world
else:
return False
# Toggle action
elif actions[agent_id] == self.actions.toggle:
# Only one agent able to close door in world makes action valid
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in close_door_locations and agent_id in close_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] and len(close_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) == 1:
# Other agents trying to access same location, so fail
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in next_poses:
return False
# Other agent currently in spot, so have to recursively check if they will move
elif (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in curr_poses:
return self.collision_checker(curr_poses, fwd_poses, fwd_cells, next_poses, drop_locations, pickup_locations, open_door_locations, close_door_locations, actions, curr_poses[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])])
# Completely valid move to close door
else:
return True
# Only one agent able to open door in world makes action valid
elif (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in open_door_locations and agent_id in open_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] and len(open_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) == 1:
return True
# Invalid action
return False
def step(self, actions):
self.step_count += 1
reward = 0
done = False
# Get the positions in front of the agents
fwd_poses = [self.front_pos(agent_id) for agent_id in range(len(self.agent_poses))]
# Get the contents of the cell in front of the agents
fwd_cells = [self.grid.get(*fwd_pos) for fwd_pos in fwd_poses]
# Get attempted next positions of all agents & dropped items
curr_poses = {}
next_poses = {}
drop_locations = {}
pickup_locations = {}
open_door_locations = {}
close_door_locations = {}
for agent_id, pos in enumerate(self.agent_poses):
# Store current positions in easily accessible dict
curr_poses[(pos[0], pos[1])] = agent_id
# Agent staying in its current location
if actions[agent_id] != self.actions.forward:
if (pos[0], pos[1]) not in next_poses:
next_poses[(pos[0], pos[1])] = []
next_poses[(pos[0], pos[1])].append(agent_id)
# Agent is attempting to drop item into env
if actions[agent_id] == self.actions.drop:
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) not in drop_locations:
drop_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] = []
drop_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])].append(agent_id)
# Agent is attempting to pick up item from env
elif actions[agent_id] == self.actions.pickup:
if fwd_cells[agent_id] and fwd_cells[agent_id].ma_can_pickup(agent_id):
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) not in pickup_locations:
pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] = []
pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])].append(agent_id)
# Agent is attempting to toggle object in front of it
elif actions[agent_id] == self.actions.toggle:
if fwd_cells[agent_id] and fwd_cells[agent_id].ma_check_toggle(self, agent_id, fwd_poses[agent_id]):
if fwd_cells[agent_id].is_open:
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) not in close_door_locations:
close_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] = []
close_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])].append(agent_id)
else:
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) not in open_door_locations:
open_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] = []
open_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])].append(agent_id)
# Agent is attempting to move forward
else:
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) not in next_poses:
next_poses[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] = []
next_poses[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])].append(agent_id)
for agent_id, action in enumerate(actions):
if self.collision_checker(curr_poses, fwd_poses, fwd_cells, next_poses, drop_locations, pickup_locations, open_door_locations, close_door_locations, actions, agent_id):
# Rotate left
if action == self.actions.left:
self.agent_dirs[agent_id] -= 1
if self.agent_dirs[agent_id] < 0:
self.agent_dirs[agent_id] += 4
# Rotate right
elif action == self.actions.right:
self.agent_dirs[agent_id] = (self.agent_dirs[agent_id] + 1) % 4
# Move forward
elif action == self.actions.forward:
if fwd_cells[agent_id] == None or fwd_cells[agent_id].can_overlap() or (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in pickup_locations and len(pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) == 1:
self.agent_poses[agent_id] = fwd_poses[agent_id]
if fwd_cells[agent_id] != None and fwd_cells[agent_id].type == 'goal':
done = True
reward = self._reward()
if fwd_cells[agent_id] != None and fwd_cells[agent_id].type == 'lava':
done = True
# Pick up an object
elif action == self.actions.pickup:
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in pickup_locations and agent_id in pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] and len(pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) == 1:
if self.carrying_objects[agent_id] is None:
self.carrying_objects[agent_id] = fwd_cells[agent_id]
self.carrying_objects[agent_id].cur_pos = np.array([-1, -1])
self.grid.set(*fwd_poses[agent_id], None)
# Drop an object
elif action == self.actions.drop:
if not fwd_cells[agent_id] and self.carrying_objects[agent_id] or (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in pickup_locations and len(pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) == 1:
self.grid.set(*fwd_poses[agent_id], self.carrying_objects[agent_id])
self.carrying_objects[agent_id].cur_pos = fwd_poses[agent_id]
self.carrying_objects[agent_id] = None
# Toggle/activate an object
elif action == self.actions.toggle:
if fwd_cells[agent_id]:
fwd_cells[agent_id].ma_toggle(self, agent_id, fwd_poses[agent_id])
# Done action (not used by default)
elif action == self.actions.done:
pass
else:
assert False, "unknown action"
if self.step_count >= self.max_steps:
done = True
obs = self.gen_obs()
return obs, reward, done, {}
def gen_obs_grid(self, agent_id):
"""
Generate the sub-grid observed by an agent.
This method also outputs a visibility mask telling us which grid
cells the agent can actually see.
"""
topX, topY, botX, botY = self.get_view_exts(agent_id)
grid = self.grid.slice(topX, topY, self.agent_view_size, self.agent_view_size)
for i in range(self.agent_dirs[agent_id] + 1):
grid = grid.rotate_left()
# Process occluders and visibility
# Note that this incurs some performance cost
if not self.see_through_walls:
vis_mask = grid.process_vis(agent_pos=(self.agent_view_size // 2 , self.agent_view_size - 1))
else:
vis_mask = np.ones(shape=(grid.width, grid.height), dtype=np.bool)
# Make it so the agent sees what it's carrying
# We do this by placing the carried object at the agent's position
# in the agent's partially observable view
agent_pos = grid.width // 2, grid.height - 1
if self.carrying_objects[agent_id]:
grid.set(*agent_pos, self.carrying_objects[agent_id])
else:
grid.set(*agent_pos, None)
return grid, vis_mask
def gen_obs(self):
"""
Generate the viewable observations of all agents (partially observable, low-resolution encoding)
"""
combined_obs = []
for agent_id in range(len(self.agent_poses)):
grid, vis_mask = self.gen_obs_grid(agent_id)
relative_agent_poses = [(self.get_view_coords(agent_id, pos[0], pos[1]), other_agent_id, self.agent_dirs[other_agent_id]) for other_agent_id, pos in enumerate(self.agent_poses) if agent_id != other_agent_id]
# Encode the partially observable view into a numpy array
image = grid.ma_encode(vis_mask=vis_mask, agent_poses=relative_agent_poses)
assert hasattr(self, 'mission'), "environments must define a textual mission string"
# Observations are dictionaries containing:
# - an image (partially observable view of the environment)
# - the agent's direction/orientation (acting as a compass)
# - a textual mission string (instructions for the agent)
obs = {
'image': image,
'direction': self.agent_dirs[agent_id],
'mission': self.mission
}
combined_obs.append(obs)
return combined_obs
def get_obs_render(self, obs, tile_size=TILE_PIXELS//2):
"""
Render an agent observation for visualization
"""
grid, vis_mask = Grid.decode(obs)
# Render the whole grid
img = grid.render(
tile_size,
agent_pos=(self.agent_view_size // 2, self.agent_view_size - 1),
agent_dir=3,
highlight_mask=vis_mask
)
return img
def render(self, mode='human', close=False, highlight=True, tile_size=TILE_PIXELS):
"""
Render the whole-grid human view
"""
if close:
if self.window:
self.window.close()
return
if mode == 'human' and not self.window:
import gym_minigrid.window
self.window = gym_minigrid.window.Window('gym_minigrid')
self.window.show(block=False)
# Mask of which cells to highlight
highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)
for agent_id in range(len(self.agent_poses)):
# Compute which cells are visible to the agent
_, vis_mask = self.gen_obs_grid(agent_id)
# Compute the world coordinates of the bottom-left corner
# of the agent's view area
f_vec = self.dir_vec(agent_id)
r_vec = self.right_vec(agent_id)
top_left = self.agent_poses[agent_id] + f_vec * (self.agent_view_size-1) - r_vec * (self.agent_view_size // 2)
# For each cell in the visibility mask
for vis_j in range(0, self.agent_view_size):
for vis_i in range(0, self.agent_view_size):
# If this cell is not visible, don't highlight it
if not vis_mask[vis_i, vis_j]:
continue
# Compute the world coordinates of this cell
abs_i, abs_j = top_left - (f_vec * vis_j) + (r_vec * vis_i)
if abs_i < 0 or abs_i >= self.width:
continue
if abs_j < 0 or abs_j >= self.height:
continue
# Mark this cell to be highlighted
highlight_mask[abs_i, abs_j] = True
# Render the whole grid
img = self.grid.ma_render(
tile_size,
self.agent_poses,
self.agent_dirs,
highlight_mask=highlight_mask if highlight else None
)
if mode == 'human':
self.window.show_img(img)
self.window.set_caption(self.mission)
return img
def close(self):
if self.window:
self.window.close()
return
class CommunicativeMultiAgentMiniGridEnv(MultiAgentMiniGridEnv):
"""
2D grid world game environment with multi-agent support and communication between agents
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 10
}
def __init__(
self,
grid_size=None,
width=None,
height=None,
max_steps=100,
see_through_walls=False,
seed=1337,
agent_view_size=7
):
# Can't set both grid_size and width/height
if grid_size:
assert width == None and height == None
width = grid_size
height = grid_size
# Action enumeration for this environment
self.actions = MultiAgentMiniGridEnv.Actions
# Actions are discrete integer values
self.action_space = spaces.Discrete(len(self.actions))
# Number of cells (width and height) in the agent view
assert agent_view_size % 2 == 1
assert agent_view_size >= 3
self.agent_view_size = agent_view_size
# Observations are dictionaries containing an
# encoding of the grid and a textual 'mission' string
self.observation_space = spaces.Box(
low=0,
high=255,
# shape=(self.agent_view_size, self.agent_view_size, 3),
shape=(width, height, 3),
dtype='uint8'
)
self.observation_space = spaces.Dict({
'image': self.observation_space
})
# Range of possible rewards
self.reward_range = (0, 1)
# Window to use for human rendering mode
self.window = None
# Environment configuration
self.width = width
self.height = height
self.max_steps = max_steps
self.see_through_walls = see_through_walls
# Current positions and directions of the agents
self.agent_poses = []
self.agent_dirs = []
# Initialize the RNG
self.seed(seed=seed)
# Initialize the state
self.reset()
def reset(self):
# Current positions and directions of the agents
self.agent_poses = []
self.agent_dirs = []
# Generate a new random grid at the start of each episode
# To keep the same grid for each episode, call env.seed() with
# the same seed before calling env.reset()
self._gen_grid(self.width, self.height)
# These fields should be defined by _gen_grid
assert self.agent_poses
assert self.agent_dirs
# Check that the agent doesn't overlap with an object
for pos in self.agent_poses:
start_cell = self.grid.get(*pos)
assert start_cell is None or start_cell.can_overlap()
# Item picked up, being carried, initially nothing for all agents
self.carrying_objects = [None for i in self.agent_poses]
# Step count since episode start
self.step_count = 0
# Return first observation
obs, _ = self.gen_obs_comm()
# Store this obs in case communication occurs in next episode
self.orig_agent_poses = deepcopy(self.agent_poses)
self.past_obs = deepcopy(obs)
return obs
def step(self, actions):
self.step_count += 1
reward = 0
done = False
# Get the positions in front of the agents
fwd_poses = [self.front_pos(agent_id) for agent_id in range(len(self.agent_poses))]
# Get the contents of the cell in front of the agents
fwd_cells = [self.grid.get(*fwd_pos) for fwd_pos in fwd_poses]
# Get attempted next positions of all agents & dropped items
curr_poses = {}
next_poses = {}
drop_locations = {}
pickup_locations = {}
open_door_locations = {}
close_door_locations = {}
# Get physical actions from actions list
phys_actions = [action[0] for action in actions]
for agent_id, pos in enumerate(self.agent_poses):
# Store current positions in easily accessible dict
curr_poses[(pos[0], pos[1])] = agent_id
# Agent staying in its current location
if phys_actions[agent_id] != self.actions.forward:
if (pos[0], pos[1]) not in next_poses:
next_poses[(pos[0], pos[1])] = []
next_poses[(pos[0], pos[1])].append(agent_id)
# Agent is attempting to drop item into env
if phys_actions[agent_id] == self.actions.drop:
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) not in drop_locations:
drop_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] = []
drop_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])].append(agent_id)
# Agent is attempting to pick up item from env
elif phys_actions[agent_id] == self.actions.pickup:
if fwd_cells[agent_id] and fwd_cells[agent_id].ma_can_pickup(agent_id):
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) not in pickup_locations:
pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] = []
pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])].append(agent_id)
# Agent is attempting to toggle object in front of it
elif phys_actions[agent_id] == self.actions.toggle:
if fwd_cells[agent_id] and fwd_cells[agent_id].ma_check_toggle(self, agent_id, fwd_poses[agent_id]):
if fwd_cells[agent_id].is_open:
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) not in close_door_locations:
close_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] = []
close_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])].append(agent_id)
else:
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) not in open_door_locations:
open_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] = []
open_door_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])].append(agent_id)
# Agent is attempting to move forward
else:
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) not in next_poses:
next_poses[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] = []
next_poses[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])].append(agent_id)
for agent_id, action in enumerate(phys_actions):
if self.collision_checker(curr_poses, fwd_poses, fwd_cells, next_poses, drop_locations, pickup_locations, open_door_locations, close_door_locations, phys_actions, agent_id):
# Rotate left
if action == self.actions.left:
self.agent_dirs[agent_id] -= 1
if self.agent_dirs[agent_id] < 0:
self.agent_dirs[agent_id] += 4
# Rotate right
elif action == self.actions.right:
self.agent_dirs[agent_id] = (self.agent_dirs[agent_id] + 1) % 4
# Move forward
elif action == self.actions.forward:
if fwd_cells[agent_id] == None or fwd_cells[agent_id].can_overlap() or (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in pickup_locations and len(pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) == 1:
self.agent_poses[agent_id] = fwd_poses[agent_id]
if fwd_cells[agent_id] != None and fwd_cells[agent_id].type == 'goal':
done = True
reward = self._reward()
if fwd_cells[agent_id] != None and fwd_cells[agent_id].type == 'lava':
done = True
# Pick up an object
elif action == self.actions.pickup:
if (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in pickup_locations and agent_id in pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])] and len(pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) == 1:
if self.carrying_objects[agent_id] is None:
self.carrying_objects[agent_id] = fwd_cells[agent_id]
self.carrying_objects[agent_id].cur_pos = np.array([-1, -1])
self.grid.set(*fwd_poses[agent_id], None)
# Drop an object
elif action == self.actions.drop:
if not fwd_cells[agent_id] and self.carrying_objects[agent_id] or (fwd_poses[agent_id][0], fwd_poses[agent_id][1]) in pickup_locations and len(pickup_locations[(fwd_poses[agent_id][0], fwd_poses[agent_id][1])]) == 1:
self.grid.set(*fwd_poses[agent_id], self.carrying_objects[agent_id])
self.carrying_objects[agent_id].cur_pos = fwd_poses[agent_id]
self.carrying_objects[agent_id] = None
# Toggle/activate an object
elif action == self.actions.toggle:
if fwd_cells[agent_id]:
fwd_cells[agent_id].ma_toggle(self, agent_id, fwd_poses[agent_id])
# Done action (not used by default)
elif action == self.actions.done:
pass
else:
assert False, "unknown action"
if self.step_count >= self.max_steps:
done = True
# Get communication actions from actions list
comm_actions = [action[1] for action in actions]
obs, shared_obs = self.gen_obs_comm(comm_actions)
self.orig_agent_poses = deepcopy(self.agent_poses)
self.past_obs = deepcopy(obs)
return shared_obs, reward, done, {}
def gen_obs_grid_comm(self, agent_id):
"""
Generate the sub-grid observed by an agent.
This method also outputs a visibility mask telling us which grid
cells the agent can actually see.
"""
topX, topY, botX, botY = self.get_view_exts(agent_id)
if topX < 0:
topX = 0
if topY < 0:
topY = 0
if botX > self.grid.width:
botX = self.grid.width
if botY > self.grid.height:
botY = self.grid.height
grid = self.grid.slice(topX, topY, botX - topX, botY - topY)
for i in range(self.agent_dirs[agent_id] + 1):
grid = grid.rotate_left()
# Facing right
if self.agent_dirs[agent_id] == 0:
agent_rel_x = self.agent_poses[agent_id][1] - topY
# Facing down
elif self.agent_dirs[agent_id] == 1:
agent_rel_x = botX - 1 - self.agent_poses[agent_id][0]
# Facing left
elif self.agent_dirs[agent_id] == 2:
agent_rel_x = botY - 1 - self.agent_poses[agent_id][1]
# Facing up
elif self.agent_dirs[agent_id] == 3:
agent_rel_x = self.agent_poses[agent_id][0] - topX
else:
assert False, "invalid agent direction"
# Process occluders and visibility
# Note that this incurs some performance cost
if not self.see_through_walls:
vis_mask = grid.process_vis(agent_pos=(agent_rel_x , grid.height - 1))
else:
vis_mask = np.ones(shape=(grid.width, grid.height), dtype=np.bool)
# Rotate grid & mask back to original pose
for i in range(3 - self.agent_dirs[agent_id]):
grid = grid.rotate_left()
vis_mask = np.rot90(vis_mask, self.agent_dirs[agent_id] + 1)
# Fill in partial obs grid & mask into complete obs space grid & mask
final_grid = self.grid.copy()
final_vis_mask = np.zeros(shape=(self.grid.width, self.grid.height), dtype=np.bool)
for x in range(grid.width):
for y in range(grid.height):
final_grid.set(x + topX, y + topY, grid.get(x, y))
final_vis_mask[x + topX][y + topY] = vis_mask[x][y]
# Make it so the agent sees what it's carrying
# We do this by placing the carried object at the agent's position
if self.carrying_objects[agent_id]:
final_grid.set(*(self.agent_poses[agent_id]), self.carrying_objects[agent_id])
else:
final_grid.set(*(self.agent_poses[agent_id]), None)
return final_grid, final_vis_mask
def gen_obs_comm(self, comm_actions=None):
"""
Generate the viewable observations of all agents (partially observable, low-resolution encoding)
"""
combined_obs = []
shared_obs = []
for agent_id in range(len(self.agent_poses)):
grid, vis_mask = self.gen_obs_grid_comm(agent_id)
# Encode the partially observable view into a numpy array
image = grid.ma_encode(vis_mask=vis_mask, agent_poses=[(pos, other_agent_id, self.agent_dirs[other_agent_id]) for other_agent_id, pos in enumerate(self.agent_poses) if agent_id != other_agent_id])
assert hasattr(self, 'mission'), "environments must define a textual mission string"
# Observations are dictionaries containing:
# - an image (partially observable view of the environment)
# - the agent's direction/orientation (acting as a compass)
# - a textual mission string (instructions for the agent)
obs = {
'image': image,
'direction': self.agent_dirs[agent_id],
'mission': self.mission
}
combined_obs.append(deepcopy(obs))
shared_obs.append(deepcopy(obs))
if comm_actions:
for other_agent_id, communicate in enumerate(comm_actions):
if agent_id != other_agent_id and communicate:
if np.sqrt( \
(self.orig_agent_poses[agent_id][0] - self.orig_agent_poses[other_agent_id][0])**2 + \
(self.orig_agent_poses[agent_id][1] - self.orig_agent_poses[other_agent_id][1])**2 \
) < 3:
for i in range(grid.width):
for j in range(grid.height):
if not vis_mask[i][j]:
if self.past_obs[other_agent_id]['image'][i][j][0] not in [0, OBJECT_TO_IDX['agent']]:
shared_obs[agent_id]['image'][i][j][0] = self.past_obs[other_agent_id]['image'][i][j][0]
shared_obs[agent_id]['image'][i][j][1] = self.past_obs[other_agent_id]['image'][i][j][1]
shared_obs[agent_id]['image'][i][j][2] = self.past_obs[other_agent_id]['image'][i][j][2]
return combined_obs, shared_obs
|
from pyspark.conf import SparkConf
import argparse
import os
import numpy
import sys
import tensorflow as tf
import threading
import time
from datetime import datetime
from tensorflow.python.ops import variable_scope as vs
# from tensorflowonspark import TFCluster
# import pyspark.sql as sql_n #spark.sql
# from pyspark import SparkContext # pyspark.SparkContext dd
# from pyspark.conf import SparkConf #conf
#
# from pyspark.sql.types import *
# schema = StructType([
# StructField("id", StringType(), True),
# StructField("value", FloatType(), True),
# StructField("date", StringType(), True)]
# )
#
# os.environ['JAVA_HOME'] = "/tool_lf/java/jdk1.8.0_144/bin/java"
# os.environ["PYSPARK_PYTHON"] = "/root/anaconda3/bin/python"
# os.environ["HADOOP_USER_NAME"] = "root"
# conf=SparkConf().setMaster("spark://lf-MS-7976:7077")
# spark = sql_n.SparkSession.builder.appName("lf").config(conf=conf).getOrCreate()
# sc =spark.sparkContext
# sqlContext=sql_n.SQLContext(sparkContext=sc,sparkSession=spark)
#
# # ็ต้ๆฃๆฅ
# check="1"
# if(check=="0"):
# # os.environ['JAVA_HOME'] = conf.get(SECTION, 'JAVA_HOME')
# rd=sc.textFile("hdfs://127.0.0.1:9000/zd_data2/FQ/idea_ok/G_CFYH_2_035FQ001.txt").map(lambda x:str(x).split(",")) \
# .map(lambda x:[str(x[0]).replace("\'",""),x[1],str(x[2]).replace("\'","").lstrip()]) \
# .map(lambda x:[str(x[0]).replace("[",""),float(x[1]),str(x[2]).replace("]","")])
# df=sqlContext.createDataFrame(rd, "id:string,value:float,date:string")
# df.createOrReplaceTempView("table1")
# # df.filter(df.date=="2015-09-29 00:00:55").show()
# list1=sqlContext.sql("select value from table1 where date between '2015-11-01 11:00:00' and '2015-11-01 11:09:59' ").rdd.map(list).collect()
# print("a",list1)
# value=sum(numpy.array(list1))
# print("b",value)
# print("d1",list1.__len__())
#
# rd=sc.textFile("hdfs://127.0.0.1:9000/zd_data2/FQ/G_CFYH_2_035FQ001.txt").map(lambda x:str(x).split(",")) \
# .map(lambda x:[str(x[0]).replace("\'",""),x[1],str(x[2]).replace("\'","").lstrip()]) \
# .map(lambda x:[str(x[0]).replace("[",""),float(x[1]),str(x[2]).replace("]","")])
# df=sqlContext.createDataFrame(rd, "id:string,value:float,date:string")
# df.createOrReplaceTempView("table1")
# list2=sqlContext.sql("select value from table1 where date between '2015-11-01 11:00:00' and '2015-11-01 11:09:59' ").rdd.map(list).collect()
# # print(list2)
# list3=numpy.array(list2,dtype=float)[1:-1]
# list4=numpy.array(list2,dtype=float)[0:list2.__len__()-2]
# print("cat",list4-list3)
# print("c",list2)
# print("d",list2.__len__())
#
# rd=sc.textFile("hdfs://127.0.0.1:9000/zd_data2/FW/G_CFYH_2_035FW001.txt").map(lambda x:str(x).split(",")) \
# .map(lambda x:[str(x[0]).replace("\'",""),x[1],str(x[2]).replace("\'","").lstrip()]) \
# .map(lambda x:[str(x[0]).replace("[",""),float(x[1]),str(x[2]).replace("]","")])
# df=sqlContext.createDataFrame(rd, "id:string,value:float,date:string")
# df.createOrReplaceTempView("table2")
# list1=sqlContext.sql("select * from table2 where date between '2015-11-01 11:00:00' and '2015-11-01 11:09:59' ").rdd.map(list).collect()
# print(numpy.average(numpy.array(numpy.array(list1)[:,1],dtype=float)*60*10/3600))
#
# # ็ต้ๅข้ๆฃๆฅ
# check="1"
# if(check=="0"):
# rd=sc.textFile("hdfs://127.0.0.1:9000/zd_data2/FQ/idea_ok/G_CFYH_2_035FQ001.txt").map(lambda x:str(x).split(",")) \
# .map(lambda x:[str(x[0]).replace("\'",""),x[1],str(x[2]).replace("\'","").lstrip()]) \
# .map(lambda x:[str(x[0]).replace("[",""),float(x[1]),str(x[2]).replace("]","")])
# df=sqlContext.createDataFrame(rd, "id:string,value:float,date:string")
# df.createOrReplaceTempView("table1")
# # df.filter(df.date=="2015-09-29 00:00:55").show()
# # list1=sqlContext.sql("select max(value),min(value) from table1")
# # list1.show()
# # print(df.count())
# # print(df.filter("value>1000").count())
# import pyhdfs as pd
# fs = pd.HdfsClient("127.0.0.1", 9000)
# if(not fs.exists("/zd_data2/FQ/idea_ok/G_CFYH_2_035FQ001_1000.txt")):
# num_list=10
# def fuc(iterator):
# value_list=[]
# num=0
# value=''
# for i in iterator:
# if num%num_list==0:
# if(value==''):
# value=value+str(i)
# num=num+1
# else:
# value_list.append(value)
# value=str(i)
# num=1
# else:
# value=value+','+str(i)
# num=num+1
# return value_list
# df.filter("value<1000").filter("value>0").select("value")\
# .rdd.map(list).map(lambda x:str(x).replace("[","").replace("]","")).mapPartitions(fuc)\
# .saveAsTextFile("hdfs://127.0.0.1:9000/zd_data2/FQ/idea_ok/G_CFYH_2_035FQ001_1000.txt")
#
# print(sc.textFile("hdfs://127.0.0.1:9000/zd_data2/FQ/idea_ok/G_CFYH_2_035FQ001_1000.txt").take(10))
# print(sc.textFile("hdfs://127.0.0.1:9000/zd_data2/FQ/idea_ok/G_CFYH_2_035FQ001_1000.txt").count())
# ๅฏนๆ็ฅ็ป็ฝๅปบๆจก
print("---------------------------------------------------------------")
check="0"
if(check=="0"):
tf.reset_default_graph()
import matplotlib.pyplot as plt
import seaborn as sns # for pretty plots
from scipy.stats import norm
import pyhdfs as pd
import numpy as np
fs = pd.HdfsClient("127.0.0.1", 9000)
[filename]=fs.walk("/zd_data2/FQ/idea_ok/G_CFYH_2_035FQ001_1000_20_1.txt/")
#[filename]=fs.walk("/lf/")
files_list=list(filename)
files_local=[item for item in map(lambda x:str("hdfs://127.0.0.1:9000"+files_list[0])+str(x),list(files_list[2])[1:])]
print(files_local)
# //ๅฏนๆ็ฅ็ป็ฝๆๅ
#็ฅ็ป็ฝๆๆ
# MLP - used for D_pre, D1, D2, G networks
M=20 # minibatch size
pitch=200
rato=0.5
rato1=0.5
with tf.variable_scope("D", reuse=tf.AUTO_REUSE):
# construct learnable parameters within local scope
w11=tf.get_variable("w10", [20, 50])
b11=tf.get_variable("b10", [50])
w21=tf.get_variable("w11", [50, 25])*rato
b21=tf.get_variable("b11", [25])*rato
w31=tf.get_variable("w12", [25, 10])*rato
b31=tf.get_variable("b12", [10])*rato
w41=tf.get_variable("w13", [10,1])
b41=tf.get_variable("b13", [1])
def mlp_D1(input):
# construct learnable parameters within local scope
# w11=tf.get_variable("w10", [input.get_shape()[1], 150], initializer=tf.random_normal_initializer())
# b11=tf.get_variable("b10", [150], initializer=tf.constant_initializer(0.0))
# w21=tf.get_variable("w11", [150, 70], initializer=tf.random_normal_initializer())
# b21=tf.get_variable("b11", [70], initializer=tf.constant_initializer(0.0))
# w31=tf.get_variable("w12", [70, 35], initializer=tf.random_normal_initializer())
# b31=tf.get_variable("b12", [35], initializer=tf.constant_initializer(0.0))
# w41=tf.get_variable("w13", [35,1], initializer=tf.random_normal_initializer())
# b41=tf.get_variable("b13", [1], initializer=tf.constant_initializer(0.0))
fc11=tf.nn.sigmoid(tf.matmul(input,w11)+b11)
fc11 = tf.nn.dropout(fc11, keep_prob=0.5)
fc12=tf.nn.sigmoid(tf.matmul(fc11,w21)+b21)
fc12 = tf.nn.dropout(fc12, keep_prob=0.5)
fc13=tf.nn.sigmoid(tf.matmul(fc12,w31)+b31)
fc14=tf.nn.tanh(tf.matmul(fc13,w41)+b41)
return fc14, [w11,b11,w21,b21,w31,b31,w41,b41]
# D(x)
x_node=tf.placeholder(dtype=tf.float32, shape=(None,M))
# x_node=tf.placeholder(tf.float32, shape=(None,M)) # input M normally distributed floats
fc1,theta_d=mlp_D1(x_node) # output likelihood of being normally distributed
D1=tf.maximum(tf.minimum(fc1,.99), 0.01) # clamp as a probability
with tf.variable_scope("G", reuse=tf.AUTO_REUSE):
w1=tf.get_variable("w0", [20, 300])
b1=tf.get_variable("b0", [300])
w2=tf.get_variable("w1", [300, 150])*rato1
b2=tf.get_variable("b1", [150])*rato1
w3=tf.get_variable("w2", [150, 75])*rato1
b3=tf.get_variable("b2", [75])*rato1
def mlp(input,output_dim,n_maxouts=5):
# construct learnable parameters within local scope
w1=tf.get_variable("w0", [input.get_shape()[1], 300], initializer=tf.random_normal_initializer())
b1=tf.get_variable("b0", [300], initializer=tf.constant_initializer(0.0))
w2=tf.get_variable("w1", [300, 150], initializer=tf.random_normal_initializer())
b2=tf.get_variable("b1", [150], initializer=tf.constant_initializer(0.0))
w3=tf.get_variable("w2", [150, 75], initializer=tf.random_normal_initializer())
b3=tf.get_variable("b2", [75], initializer=tf.constant_initializer(0.0))
#w4=tf.get_variable("w3", [75,output_dim], initializer=tf.random_normal_initializer())
#b4=tf.get_variable("b3", [output_dim], initializer=tf.constant_initializer(0.0))
# nn operators
fc1=tf.nn.tanh(tf.matmul(input,w1)+b1)
fc1= tf.nn.dropout(fc1, keep_prob=0.5)
fc2=tf.nn.tanh(tf.matmul(fc1,w2)+b2)
fc2= tf.nn.dropout(fc2, keep_prob=0.5)
fc3=tf.nn.tanh(tf.matmul(fc2,w3)+b3)
mo_list=[]
if n_maxouts>0 :
w = tf.get_variable('mo_w_0', [75,output_dim],initializer=tf.random_normal_initializer())
b = tf.get_variable('mo_b_0', [output_dim],initializer=tf.constant_initializer(0.0))
fc4 = tf.matmul(fc3, w) + b
mo_list.append(w)
mo_list.append(b)
for i in range(n_maxouts):
if i>0:
w = tf.get_variable('mo_w_%d' % i, [75,output_dim],initializer=tf.random_normal_initializer())
b = tf.get_variable('mo_b_%d' % i, [output_dim],initializer=tf.constant_initializer(0.0))
mo_list.append(w)
mo_list.append(b)
fc4=tf.stack([fc4,tf.matmul(fc3, w) + b],axis=-1)
fc4 = tf.reduce_max(fc4,axis=-1)
else:
fc4=tf.matmul(fc3,w4)+b4
return fc4, [w1,b1,w2,b2,w3,b3].extend(mo_list)
z_node=tf.placeholder(dtype=tf.float32, shape=(None,M))
# print(z_node)
G,theta_g=mlp(z_node,M) # generate normal transformation of Z
# with tf.device('/cpu:0'):
# prepair data--------------------------------------------------------------------------------------
def read_data(file_queue):
reader = tf.TextLineReader()
key, value = reader.read(file_queue)
defaults = [[0.0]]*M
# print(defaults)
list_value = tf.decode_csv(value, defaults)
list_value_tensor=tf.stack(list_value)
#ๅ ไธบไฝฟ็จ็ๆฏ้ธขๅฐพ่ฑๆฐๆฎ้๏ผ่ฟ้้่ฆๅฏนyๅผๅ่ฝฌๆข
return list_value_tensor
def create_pipeline(filename, batch_size, num_epochs=None):
file_queue = tf.train.string_input_producer(filename,num_epochs=num_epochs)
example= read_data(file_queue)
min_after_dequeue = 2000
capacity = min_after_dequeue + batch_size
example_batch= tf.train.shuffle_batch(
[example], batch_size=batch_size, capacity=capacity,min_after_dequeue=min_after_dequeue
)
# print(example_batch)
return example_batch
# ๅผๅง่ฎญ็ป
x_train_batch= create_pipeline(files_local, pitch)
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
saver.restore(sess, "/tool_lf/lf/model-last.ckpt")
# sess.run(local_init_op)
# print(np.reshape(np.random.random(pitch*M),(pitch,M)))
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
int_num=0
print("ๅผๅง่พๅบ๏ผ")
try:
#while not coord.should_stop():
while True:
#x=np.reshape(np.ones(M*pitch)*100000+np.random.normal(size=M*pitch)*2,(pitch,M))
# print(x)
# x=np.reshape(np.ones(M*pitch)*999+np.abs(np.random.normal(size=pitch*M)*1000),(pitch,M))
#x=sess.run(x_train_batch)#sampled m-batch from zd_data
# print(x)
#x=np.sort(x,axis=1)
# print(x)
print("---------------")
z=np.ones(shape=(M*pitch),dtype=float)*6+np.random.normal(size=M*pitch)
z=np.sort(np.reshape(z,(pitch,M)),axis=1)
#print(sess.run(D1,feed_dict={x_node:x}))
print(sess.run(G,feed_dict={z_node:z}))
int_num=int_num+1
if int_num==10:
break
except tf.errors.OutOfRangeError:
print ('Done reading')
finally:
coord.request_stop()
coord.join(threads)
# save_path = saver.save(sess,"/tool_lf/lf/model-last.ckpt")
sess.close()
print ("--ok๏ผ--") |
# -*- coding:utf-8 -*-
import time
from testcase import *
#ๆทปๅ ็ฉไธๅ
ฌๅธ
login('15707256976', '1234567')
time.sleep(5)
browser.find_element_by_xpath('//div/div/nav/ul/li[1]/ul/li[2]/a').click()
browser.find_element_by_xpath('//div[3]/div[2]/div/div[3]/div/div[2]/div[2]/button').click()
#ๅ
ฌๅธๅ็งฐ
browser.find_element_by_xpath('//div/div[2]/div/form/div[1]/div/input').send_keys('company')
#ๅ
ฌๅธ็ฎ็งฐ
browser.find_element_by_xpath('//div/div[2]/div/form/div[2]/div/input').send_keys('COM')
browser.find_element_by_xpath('//div/div[2]/div/form/div[9]/div[2]/div/input').send_keys('username')
browser.find_element_by_xpath('//div/div[2]/div/form/div[9]/div[4]/div/input').send_keys('password')
browser.find_element_by_xpath('/html/body/div[5]/div/div/div[3]').click()
print('kkkk') |
from django.core.management.base import BaseCommand
from django.db import transaction
from geofr.services.populate import populate_overseas
class Command(BaseCommand):
"""Populate overseas related perimeters."""
@transaction.atomic
def handle(self, *args, **options):
result = populate_overseas()
self.stdout.write(
self.style.SUCCESS(
f"{result['created']} created, {result['updated']} updated."
)
)
|
import os
os.system('touch /tmp/demo.txt')
infile = open(filename, 'r') # default
infile = open(filename, 'rb') # binary read [ byte stream ]
infile = open(filename, 'r+') # both input and output
with open('/tmp/demo.txt') as f:
f.read()
f.read(N)
f.readline()
f.readlines() # => [line string, ...]
for line in f: # => space saving, read on need
outfile = open(filename, 'w') # write mode
outfile = open(filename, 'wb') # binary write mode
with open('/tmp/demo.txt') as f:
f.write(S)
f.wirtelines(I) # I is an iterable
with open('/tmp/demo.txt') as f:
# file methods
f.close()
f.tell() # return the file's current position
f.seek(offset, whence=0) # ็งปๅจๆธธๆ ๅ็งปoffsetๅฐๆฐ็ไฝ็ฝฎ๏ผwhenceไธบ0่กจ็คบไปๆไปถ่ตทๅง่ตท๏ผ
# 1่กจ็คบไปๅฝๅไฝ็ฝฎ๏ผ2่กจ็คบไปๅฐพ็ซฏๅผๅง
f.isatty()
f.flush()
f.truncate(size)
file.fileno()
# file attrbutes
f.closed # => True or False
f.mode # => 'r' ...
f.name # => '/tmp/demo.txt' ...
|
# -*- coding: utf-8 -*-
# __author__ = 'eacaen'
import csv
# with open('villains.csv','rt') as fin:
# cin = csv.DictReader(fin,fieldnames=['first','last']) #ๆๅฎๅ็ๅๅญ
#
# vas = [row for row in cin]
#
# print vas
vall = [
{'last': 'a', 'first': 'doc'},
{'last': 'asdd', 'first': 'sss'},
{'last': 'b', 'first': 'rosr'},
{'last': 'qqqq', 'first': 'zcsadcs'},
{'last': 'c', 'first': 'exic'}
]
with open('vall.csv','wt') as fout:
cout = csv.DictWriter(fout,['first','last'])
cout.writeheader()
cout.writerows(vall) |
from .models import Magazine, Alumni_Article
from django.shortcuts import render, redirect, get_object_or_404
def alumni_portal(request):
articles = Alumni_Article.objects.published()
return render(request, 'alumni_portal.html', {'articles': articles})
def alumni_magazine(request):
magazines = Magazine.objects.order_by('-date')
return render(request, 'alumni_magazine.html', {'magazines': magazines})
def single_article(request, pk):
article = get_object_or_404(Alumni_Article, pk=pk)
if article.can_administer(request.user):
admin = True
else:
admin = False
if article.show_article_before_experation or admin:
# attachments = article.otherattachment_set
# image_attachments = article.imageattachment_set
return render(request, 'model/alumni_article.html', {
'article': article,
# 'attachments': attachments,
# 'image_attachments': image_attachments,
'can_administer': admin})
def alumni_skugga(request):
return render(request, 'alumni_skugga_en_alumn.html')
def about(request):
return render(request, 'alumni_about.html')
def mentorship_program(request):
return render(request, 'alumni_mentorship_program.html')
def calendar(request):
articles = Alumni_Article.objects.published()
return render(request, 'alumni_calendar.html', context={'articles': articles})
|
class Solution:
def canBeIncreasing(self, nums: list[int]) -> bool:
for i in range(len(nums)):
t = nums[:i] + nums[i+1:]
if all(t[i] < t[i+1] for i in range(len(t)-1)):
return True
return False |
from utils import AverageMeter, ProgressMeter
import torch
# Determine 20 nearest neighbors with SimClR instance discrimination task
def SimCLR_train(dataloader, model, epoch, criterion, optimizer):
# Record progress
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(len(dataloader), [losses], prefix="Epoch: [{}]".format(epoch))
model.train()
for i, (ims, aug_ims, lbls) in enumerate(dataloader):
batch, channel, h, w = ims.size()
x_i = ims.unsqueeze(1)
x_j = aug_ims.unsqueeze(1)
x_i = x_i.view(-1, channel, h, w) # in model images processed independently so batch size doesn't matter
x_i = x_i.cuda(non_blocking=True)
x_j = x_j.view(-1, channel, h, w)
x_j = x_j.cuda(non_blocking=True)
targets = lbls.cuda(non_blocking=True)
z_i = model(x_i)
z_j = model(x_j)
loss = criterion(z_i, z_j)
# update losses
losses.update(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 25 == 0:
progress.display(i)
trainloss_simclr = losses.avg
return trainloss_simclr
def SCAN_train(dataloader, model, epoch, criterion, optimizer, device):
# record progress
losses = AverageMeter('SCAN Loss', ':.4e')
progress = ProgressMeter(len(dataloader), [losses], prefix="Epoch: [{}]".format(epoch))
model.train()
for i, batch in enumerate(dataloader):
# forward pass
anchors = batch['anchorimg'].to(device, non_blocking=True) # 128 imgs
neighbors = batch['neighborimg'].to(device, non_blocking=True) # a neighbor for each img
# calculate gradient for backpropagation
output_anchors = model(anchors) # weights for training with each img. each of 128 (along len) has 10 rows
output_neighbors = model(neighbors) # weights for training with each neighbor
# calculate loss
for anchor_out, neighbor_out in zip(output_anchors, output_neighbors):
# anchor_out & neighbor_out have shape [128,10]
loss = criterion(anchor_out, neighbor_out)
# update losses
losses.update(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 25 == 0:
progress.display(i)
trainloss_scan = losses.avg
return trainloss_scan
def selflabel_train(dataloader, model, epoch, criterion, optimizer, device):
# record progress
losses = AverageMeter('Self Label Loss', ':.4e')
progress = ProgressMeter(len(dataloader), [losses], prefix="Epoch: [{}]".format(epoch))
model.train()
for i, (ims, aug_ims, lbls) in enumerate(dataloader):
imgs = ims.to(device, non_blocking=True)
aug_imgs = aug_ims.to(device, non_blocking=True)
with torch.no_grad():
output_imgs = model(imgs)
output_imgs = output_imgs[0] # tensor size [batchsize, numClasses]
output_aug = model(aug_imgs)
output_aug = output_aug[0] # tensor size [batchsize, numClasses]
loss = criterion(output_imgs, output_aug)
# update losses
losses.update(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
progress.display(i)
train_avgloss = losses.avg
return train_avgloss |
from jwt import encode, decode
from app import SECRET_KEY
def generate_token(payload):#{ 'id':, 'names':, rol: '' }
token = encode(payload, SECRET_KEY, algorithm='HS256')
return token.decode('utf-8')
def decode_token(token):
return decode(token, SECRET_KEY, algorithms='HS256') |
#!/usr/bin/env python
#-*-coding:utf-8-*-
class person():
def __init__(self,name,age):
self.name=name
self.age=age
print 'person %s has been constructed.'%(self.name)
def tell(self):
print 'I\' %s \nmy age is %d'%(self.name,self.age),
class teacher(person):
def __init__(self,name,age,salary):
person.__init__(self,name,age) #ๆณจๆ๏ผ่ฐ็จๅบ็ฑป็ๆ้ ๅฝๆฐๆๆนๆณๆถ้่ฆๆพ็คบไฝฟ็จๅบ็ฑปๅ็งฐ่ฐ็จ๏ผ่ฟๅฟ
้กปๅฎไนselfๅๆฐ๏ผselfๅๆฐๅฎ็ฐๅฐๅฎไพๅ็งฐไผ ้็ปๆนๆณ๏ผ
self.salary=salary
print 'teacher %s has been contructed.'%(self.name)
def tell(self):
person.tell(self)
print 'my salary is %d'%(self.salary)
class student(person):
def __init__(self,name,age,marks):
person.__init__(self,name,age)
self.marks=marks
print 'student %s has been constructed.'%(self.name)
def tell(self):
person.tell(self)
print 'my marks is %d'%(self.marks)
t=teacher('leo',24,8000)
s=student("Lucy",16,90)
school=[t,s]
for p in school:
p.tell()
|
import pygame
class Player(pygame.sprite.Sprite):
# Constructor function
def __init__(self, x, y,lenX,lenY):
super().__init__()
# altura, largura
self.image = pygame.Surface([lenX, lenY],pygame.SRCALPHA,32)
self.image.convert_alpha()
# Make our top-left corner the passed-in location.
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
self.key=False
self.secret= False
# vetor de velocidade
self.change_x = 0
self.change_y = 0
self.walls = None
def definirImagem(self,IMF,IMB,IML,IMR):
self.image.blit(IMF,(0,0))
self.imf = IMF
self.imb = IMB
self.iml = IML
self.imr = IMR
def changespeed(self, x, y):
self.change_x += x
self.change_y += y
def update(self):
#atualizar imagem
transparent = (0,0,0,0)
if self.change_y > 0:
self.image.fill(transparent)
self.image.blit(self.imf,(0,0))
elif self.change_y <0:
self.image.fill(transparent)
self.image.blit(self.imb,(0,0))
else:
if self.change_x >0:
self.image.fill(transparent)
self.image.blit(self.imr,(0,0))
elif self.change_x <0:
self.image.fill(transparent)
self.image.blit(self.iml,(0,0))
# mover
self.rect.x += self.change_x
# conferir se hรก uma parede
block_hit_list = pygame.sprite.spritecollide(self, self.walls, False)
for block in block_hit_list:
if self.change_x > 0:
self.rect.right = block.rect.left
else:
self.rect.left = block.rect.right
# mesma coisa p/ vertical
self.rect.y += self.change_y
block_hit_list = pygame.sprite.spritecollide(self, self.walls, False)
for block in block_hit_list:
if self.change_y > 0:
self.rect.bottom = block.rect.top
else:
self.rect.top = block.rect.bottom |
from wx.lib.pubsub import pub
from game_display import *
from options_dialogs import *
class MainView(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.SetTitle("pyNES")
# Display for emulator
self.display = Display(parent=self)
# Menu bar
menu_bar = wx.MenuBar()
file_menu = wx.Menu()
m_load = file_menu.Append(id=wx.ID_OPEN, text="Load ROM\tCtrl-O",
help="Load ROM into pyNES")
m_exit = file_menu.Append(id=wx.ID_EXIT, text="Exit\tCtrl-Q",
help="Exit pyNES.")
conf_menu = wx.Menu()
m_input = conf_menu.Append(id=wx.ID_ANY, text="Input...",
help="Configure Input")
self.SetMenuBar(menu_bar)
# Status bar
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(1)
# self.statusbar.SetStatusWidths([-4, -3, -2])
# Bind events which pause/unpause emulation
self.Bind(wx.EVT_MENU_OPEN, self.RequestPause)
self.Bind(wx.EVT_MENU_CLOSE, self.RequestUnpause)
self.display.Bind(wx.EVT_KILL_FOCUS, self.RequestPause)
self.display.Bind(wx.EVT_SET_FOCUS, self.RequestUnpause)
# Bind file menu events
self.Bind(wx.EVT_MENU, self.Kill, m_exit)
self.Bind(wx.EVT_MENU, self.OnLoadRom, m_load)
menu_bar.Append(file_menu, "&File")
# Bind options menu events
self.Bind(wx.EVT_MENU, self.OnOptionsInput, m_input)
menu_bar.Append(conf_menu, "&Options")
# Bind window behavior events
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_CLOSE, self.Kill)
# Configure layout with sizers
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.display, 1, flag=wx.EXPAND)
self.SetSizer(sizer)
self.Fit()
self.Layout()
def RequestStop(self, event):
pub.sendMessage("Stop Emulation")
def RequestStart(self, event):
pub.sendMessage("Start Emulation")
def RequestPause(self, event):
pub.sendMessage("Pause Emulation")
def RequestUnpause(self, event):
pub.sendMessage("Unpause Emulation")
def OnLoadRom(self, event):
dlg = wx.FileDialog(parent=self, style=wx.FD_OPEN,
wildcard="NES files (*.nes) | *.nes")
if dlg.ShowModal() == wx.ID_OK:
pub.sendMessage("Start Emulation", rom_path=dlg.GetPath())
# Update status bar
self.statusbar.SetStatusText(dlg.GetFilename())
dlg.Destroy()
def OnOptionsInput(self, event):
dlg = OptionsInput(parent=self, title="Input Settings")
if dlg.ShowModal() == wx.ID_OK:
pub.sendMessage("Push Options.Input")
dlg.Destroy()
def OnOptionsVideo(self, event):
dlg = OptionsVideo(parent=self, title="Video Settings")
if dlg.ShowModal() == wx.ID_OK:
pub.sendMessage("Push Options.Input")
dlg.Destroy()
def OnSize(self, event):
self.Layout()
def Kill(self, event):
pub.sendMessage("Stop Emulation")
self.Destroy()
|
# ๅฏผๅ
ฅ่ๅพ
from flask import Blueprint
# ๅๅปบ่ๅพ
api = Blueprint('api',__name__)
# ๆไฝฟ็จ่ๅพๅฏน่ฑก็ๆไปถ๏ผๅฏผๅ
ฅๅฐๅๅปบ่ๅพๅฏน่ฑก็ไธ้ข
from . import passport,users,house
# ๅฎไน่ฏทๆฑ้ฉๅญ๏ผๅฎ็ฐๅๅฐ่ฟๅๅๅบๆๅฎๅๅบ็็ฑปๅ๏ผjsonๆ ผๅผ
@api.after_request
def after_request(response):
# ๅฆๆๅๅบ็ๅคดไฟกๆฏๆฏtext/html
if response.headers.get('Content-Type').startswith('text'):
response.headers['Content-Type'] = 'application/json'
return response
|
#!/usr/bin/env python3
import sys
class Generate:
def __init__(self):
message = sys.argv[1].lower()
count = 0
new_message = []
for char in message:
add = char.upper() if (count % 2) != 0 else char
new_message.append(add)
count+=1
s = ""
new_message = s.join(new_message)
print(new_message)
g = Generate()
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
#
# Python script to create spatial database with rate-state friction parameters.
#
# Brad T. Aagaard, U.S. Geological Survey
#
# ----------------------------------------------------------------------
#
# PREREQUISITES: numpy, spatialdata
# ======================================================================
import numpy
from spatialdata.spatialdb.SimpleGridAscii import SimpleGridAscii
from spatialdata.geocoords.CSCart import CSCart
faultW = 18.0e+3
faultL = 36.0e+3
taperW = 3.0e+3
dx = 100.0
W = faultW - taperW
# ----------------------------------------------------------------------
def fnB(x, W, w):
xabs = numpy.abs(x)
mask1 = xabs <= W
mask2 = numpy.bitwise_and(W < xabs, xabs < W+w)
mask3 = xabs >= W+w
v = 1.0*mask1 + 0.5*(1.0+numpy.tanh(w/(xabs-W-w) + w/(xabs-W)))*mask2 + 0.0*mask3
return v
# ----------------------------------------------------------------------
x = numpy.array([0.0], dtype=numpy.float64)
y = numpy.arange(-0.5*faultL, 0.5*faultL+0.5*dx, dx, dtype=numpy.float64)
z = numpy.arange(-faultW, 0.0+0.5*dx, dx, dtype=numpy.float64)
nx = x.shape[0]
ny = y.shape[0]
nz = z.shape[0]
npts = nx*ny*nz
xyz = numpy.zeros( (npts, 3), dtype=numpy.float64)
xyz[:,0] = x
for iy in xrange(ny):
xyz[iy*nz:(iy+1)*nz,1] = y[iy]
xyz[iy*nz:(iy+1)*nz,2] = z
f0 = 0.6*numpy.ones( (npts,), dtype=numpy.float64)
v0 = 1.0e-6*numpy.ones( (npts,), dtype=numpy.float64)
a = 0.008 + 0.008*(1.0 - fnB(xyz[:,1], W, taperW)*fnB(-xyz[:,2]-7.5e+3,0.5*W,taperW))
b = 0.012*numpy.ones( (npts,), dtype=numpy.float64)
L = 0.02*numpy.ones( (npts,), dtype=numpy.float64)
cohesion = numpy.zeros( (npts,), dtype=numpy.float64)
vi = 1.0e-12
Tshear = 75.0e+6
Tnormal = -120.0e+6
theta0 = L/v0*numpy.exp(1.0/b*(-Tshear/Tnormal - f0 - a*numpy.log(vi/v0)))
cs = CSCart()
cs.initialize()
writer = SimpleGridAscii()
writer.inventory.filename = "friction.spatialdb"
writer._configure()
writer.write({'points': xyz,
'x': x,
'y': y,
'z': z,
'coordsys': cs,
'data_dim': 2,
'values': [{'name': "reference-friction-coefficient",
'units': "none",
'data': f0},
{'name': "reference-slip-rate",
'units': "m/s",
'data': v0},
{'name': "characteristic-slip-distance",
'units': "m",
'data': L},
{'name': "constitutive-parameter-a",
'units': "none",
'data': a},
{'name': "constitutive-parameter-b",
'units': "none",
'data': b},
{'name': "cohesion",
'units': "MPa",
'data': cohesion},
{'name': "state-variable",
'units': "s",
'data': theta0},
]})
# End of file
|
list=['siva','reddy','kumar','meghana']
x=" ".join(list)
z=[]
for i in x[::-1].split():
z.append(i)
|
from typing import Union, Dict, Any, List
from struct import pack
from collections import OrderedDict
from functools import wraps
from starparse import config
import logging
logger = logging.getLogger(__name__)
SBT = Union[str, int, float, list, dict, OrderedDict]
class PackingError(Exception):
"""Packing error."""
def coerce(f):
@wraps(f)
def wrapper(value):
expecting = f.__annotations__['value']
if expecting.__name__ == 'List':
expecting = list
elif expecting.__name__ == 'Dict':
if config.ORDERED_DICT:
expecting = OrderedDict
else:
expecting = dict
if not isinstance(value, expecting):
logging.error('%s.%s expecting %s but got %s: %s',
f.__module__, f.__name__,
expecting.__name__, type(value).__name__, value)
value = expecting(value)
return f(value)
return wrapper
def optional_arg_decorator(fn):
def wrapped_decorator(*args):
if len(args) == 1 and callable(args[0]):
return fn(args[0])
else:
def real_decorator(decorate):
return fn(decorate, *args)
return real_decorator
return wrapped_decorator
@coerce
def uint(value: int) -> bytearray:
"""
Pack type to Starbound format.
:param value: unsigned int
:return: bytearray
"""
if value < 0:
error = 'unsigned int cannot be negative: {0}'.format(value)
logging.exception(error)
raise PackingError(error)
result = bytearray()
result.insert(0, value & 127)
value >>= 7
while value:
result.insert(0, value & 127 | 128)
value >>= 7
return result
@coerce
def int_(value: int) -> bytearray:
"""
Pack int to Starbound format.
:param value: int
:return: bytearray
"""
value_ = abs(value * 2)
if value < 0:
value_ -= 1
return uint(value_)
@coerce
def str_(value: str) -> bytearray:
"""
Pack string to Starbound format.
:param value: string
:return: bytearray
"""
result = uint(len(value))
try:
result.extend(bytearray(value, 'ascii'))
except UnicodeEncodeError:
error = 'string ASCII encoding error: {0}'.format(value)
if config.UTF8:
logging.warning(error)
result.extend(bytearray(value, 'utf-8'))
else:
logging.exception(error)
raise PackingError(error)
return result
@coerce
def bool_(value: bool) -> bytearray:
"""
Pack bool to Starbound format.
:param value: bool
:return: bytearray
"""
return bytearray([value])
# noinspection PyUnusedLocal
def none(value: Any=None) -> bytearray:
"""
Pack None/unset to Starbound format.
:param value: unused
:return: bytearray
"""
return bytearray()
@coerce
def float_(value: float) -> bytearray:
"""
Pack float to Starbound format.
:param value: float
:return: bytearray
"""
return pack('>d', value)
def type_(value: type) -> bytearray:
"""
Pack type to Starbound format.
:param value: type
:return: bytearray
"""
types = dict(zip((type(None), float, bool, int, str, list, dict),
range(1, 8)))
types[OrderedDict] = types[dict]
try:
return uint(types[value])
except KeyError:
error = 'unsupported value type: {0}'.format(value)
logger.exception(error)
raise PackingError(error)
@coerce
def list_(value: List[SBT]) -> bytearray:
"""
Pack list to Starbound format.
:param value: type
:return: bytearray
"""
result = uint(len(value))
for val in value:
result.extend(typed(val))
return result
@coerce
def dict_(value: Dict[str, SBT]) -> bytearray:
"""
Pack dict to Starbound format.
:param value: type
:return: bytearray
"""
result = uint(len(value))
for key, val in value.items():
result.extend(str_(key))
result.extend(typed(val))
return result
def typed(value: SBT) -> bytearray:
"""
Pack type and value to Starbound format.
:param value: value
:return: bytearray
"""
handlers = {
type(None): none,
bool: bool_,
int: int_,
float: float_,
list: list_,
dict: dict_,
OrderedDict: dict_,
str: str_
}
result = type_(type(value))
result.extend(handlers[type(value)](value))
return result
def header(save_format: bytes, entity: str, flags: List[int]) -> bytearray:
return bytearray(save_format) + str_(entity) + bytearray(flags)
|
import asyncio
from pyppeteer import launch
from time import sleep
async def close_dialog(dialog):
print("dialog popup")
await dialog.dismiss()
async def main():
browser = await launch()
page = await browser.newPage()
await page.goto('http://gw.roigames.co.kr/')
await page.screenshot({'path': 'example.png'})
cookies = await page.cookies()
print(cookies)
await page.evaluate('''() => {
document.getElementById("gw_user_id").value = "neosdc";
document.getElementById("gw_user_pw").value = "vrmatrix3";
encryptSubmit();
}''')
sleep(3)
cookies = await page.cookies()
print(cookies)
#๋ํ์์ ๋ฌด์
page.on('dialog', lambda dialog: asyncio.ensure_future(close_dialog(dialog)))
await page.goto('http://gw.roigames.co.kr/chtml/groupware/groupware_popup.php?file=gw_indolence_input&mode=attendance_in&employee_id=neosdc')
sleep(3)
#await page.screenshot({'path': 'example2.png'})
print(await page.content())
# >>> {'width': 800, 'height': 600, 'deviceScaleFactor': 1}
await browser.close()
asyncio.get_event_loop().run_until_complete(main())
|
# -*- coding: utf-8 -*-
"""
this is a tool file .It has load_file,save_file,logistic,softmax function
"""
import pickle
import numpy as np
def read_file(path):
with open(path, 'rb') as f:
file = f.read().decode('utf-8')
return file
def writer_file(path, obj):
with open(path, 'wb') as f:
f.write(obj.encode('utf-8'))
def read_file_encode(path, encode):
with open(path, 'rb') as f:
file = f.read().decode(encode)
return file
def writer_file_encode(path, obj, encode):
with open(path, 'wb') as f:
f.write(obj.encode(encode))
def read_stopwords(stop_words_file):
with open(stop_words_file, 'r') as f:
stopwords = f.read().decode('utf-8')
stopwords_list = stopwords.split('\n')
stopwords_list = [i for i in stopwords_list]
return stopwords_list
def read_bunch(bunch_path):
with open(bunch_path, 'rb') as f:
bunch = pickle.load(f)
return bunch
def write_bunch(path, bunchobj):
with open(path, 'wb') as f:
pickle.dump(bunchobj, f)
def read_pickle(path):
with open(path, 'rb') as f:
obj = pickle.load(f)
return obj
def write_pickle(path, obj):
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load_model(path):
with open(path, 'rb') as f:
model_obj = pickle.load(f)
return model_obj
def jieba_init(setting):
if 'JIEBA' not in setting:
return
if not setting.get('isJieba'):
return
import fileinput
import jieba
import jieba.analyse
dic_jieba = setting['JIEBA']
if 'user_word' in dic_jieba:
jieba.load_userdict(dic_jieba['user_word'])
# jieba.add_word('่ทฏๆ้')
if 'stop_word' in dic_jieba:
jieba.analyse.set_stop_words(dic_jieba['stop_word'])
with open(dic_jieba['stop_word']) as f:
stopwords = filter(lambda x: x, map(lambda x: x.strip().decode('utf-8'), f.readlines()))
stopwords.extend([' ', '\t', '\n'])
dic_jieba['stop_words'] = frozenset(stopwords)
if 'tag' in dic_jieba:
tag_file = dic_jieba['tag']
dic_jieba['tag'] = {}
for line in fileinput.input(tag_file):
line = line.strip("\n").strip("\r")
if not line:
continue
word = line.split('\t')
word[1] = word[1].decode('utf8')
dic_jieba['tag'][word[1]] = word[0]
class Logistic():
def sigmoid(self, x):
y = 1 / (1 + np.exp(-x))
return y
def init_w_b(self, dim):
w = np.zeros((dim, 1))
b = 0
return w, b
def propagate(self, w, b, X, Y):
"""
Xโโ๏ผnum๏ผๆ ทๆฌๆฐ๏ผ
Yโโ๏ผ็ฑปๅซ๏ผๆ ทๆฌๆฐ๏ผ
cost โโ logistic็ไผผ็ถ่ฎก็ฎๅบ็ๆๅคฑๅผ
"""
m = X.shape[1]
A = self.sigmoid(np.add(np.dot(w.T, X), b))
cost = -(np.dot(Y, np.log(A).T) + np.dot(1 - Y, np.log(1 - A).T)) / m # compute cost
dw = np.dot(X, (A - Y).T) / m
db = np.sum(A - Y) / m
cost = np.squeeze(cost)
grads = {"dw": dw,
"db": db}
return grads, cost
def optimize(self, w, b, X, Y, num_iterations, learning_rate):
costs = []
for i in range(int(num_iterations)):
grads, cost = self.propagate(w, b, X, Y)
dw = grads["dw"]
db = grads["db"]
w = w - learning_rate * dw
b = b - learning_rate * db
if i % 100 == 0:
costs.append(cost)
self.logger.info("Cost after iteration %i: %f" % (i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
class Access_model():
def dump_model(self, model_path, obj):
with open(model_path, 'w') as f:
pickle.dump(obj, f, protocol=2)
def load_model(self, model_path):
with open(model_path, 'r') as f:
clf = pickle.load(f)
return clf
# softmax็ๅฎ็ฐ
class Softmax():
"""softmax
parms: X: np.array(sample_nums, vector_dim)
y: np.array(label_nums, sample_nums)
w: np.array(label_nums, vector_dim)
loss: sum(-mul(y_true, log(A)))
grad: (y - y_hat)
"""
def softmax(self, X):
exps = np.exp(X)
return exps / np.sum(exps)
def stable_softmax(self, X):
exps = np.exp(X - np.max(X))
return exps / np.sum(exps)
def init_w(self, X):
m = X.shape[1]
w = np.random.uniform(0, 1, (10, m))
return w
def propagate(self, w, X, y):
m = X.shape[1]
A = self.stable_softmax(np.dot(w, X.T))
# loss = -1/m * np.sum(np.log(A)*y)
# logging.info(y.shape)
# logging.info(A.shape)
grad = -1/m * np.dot(y - A, X)
return grad
def optimize(self, w, X, y, num_iterations, learning_rate):
costs = []
for i in range(int(num_iterations)):
grad = self.propagate(w, X, y)
w = w - (learning_rate * grad)
if i % 100 == 0:
pass
def str2onehot(str, vocab):
onehot = np.zeros((len(str), len(vocab)))
for i, character in enumerate(str):
index = vocab.find(character)
onehot[i, index] = 1
return onehot
def onehot2str(onehot, vocab):
max_index = np.argmax(np.array(onehot), axis=1)
str = []
for i in range(max_index.shape[0]):
character = ''.join([vocab[x] for x in max_index[i]])
str.append(character)
return str
|
#!/usr/bin/env python3
import argparse
import csv
import sys
import os
import xopen
import fcntl
F_SETPIPE_SZ = 1031 if not hasattr(fcntl, "F_SETPIPE_SZ") else fcntl.F_SETPIPE_SZ
F_GETPIPE_SZ = 1032 if not hasattr(fcntl, "F_GETPIPE_SZ") else fcntl.F_GETPIPE_SZ
def isFloat(val):
if val is None:
return False
try:
float(val)
return True
except ValueError:
return False
defaultSliceTypeTranslator = {'all': slice(None)}
def SliceType(translator=defaultSliceTypeTranslator):
def str2slice(value):
if value in translator:
return translator[value]
try:
return int(value)
except ValueError:
tSection = [int(s) if s else None for s in value.split(':')]
if len(tSection) > 3:
raise ValueError(f'{value} is not a valid slice notation')
return slice(*tSection)
return str2slice
def isSliceType(value, translator=defaultSliceTypeTranslator):
if value is None:
return False
try:
SliceType(translator)(value)
return True
except Exception:
return False
parser = argparse.ArgumentParser(description="Expand compressed histogramm notation")
parser.add_argument("input", nargs="?", help="compressed histogramm csv")
parser.add_argument("--filter-columns", default=[], type=SliceType(), nargs='*', help='filter based on these columns')
parser.add_argument("--filter-mode", choices=['any', 'all'], default='any', help='either a value must match in any of the columns of all columns must contain a filter value')
parser.add_argument("--filter-data", default=[], type=str, nargs='*', help='filter based on this data')
parser.add_argument("--slice", type=str, default='1:', help="slice histogram (default '1:')",)
parser.add_argument("--delimiter", help="csv delimiter (default '%(default)s')", default=';')
parser.add_argument("--flatten", choices=['buckets', 'counts', 'items'], help="output flat histogramm", default=[], nargs="*")
parser.add_argument("--item-columns", nargs="*", help="set item column (default %(default)s)", default=None)
parser.add_argument("--select-buckets", nargs="*", help="select buckets", default=False)
parser.add_argument("--sorted-input", action="store_true", help="optimize for a sorted input", default=False)
parser.add_argument("-o", "--output", help="output file (default stdout)", default=None)
args = parser.parse_args()
if args.input and not os.path.exists(args.input):
print("ERROR: csv input file not found!")
parser.print_help()
sys.exit(1)
if (len(args.filter_columns) > 0 and len(args.filter_data) == 0) or (len(args.filter_columns) == 0 and len(args.filter_data) > 0):
raise Exception('Filtering requires --filter-columns and --filter-data!')
if args.item_columns is not None:
itemColumns = []
for x in args.item_columns:
if x.isnumeric():
itemColumns.append(slice(int(x), int(x) + 1))
else:
x = [int(y) if y.isnumeric() else None for y in x.split(':')]
if len(x) > 3 or all(y is None for y in x):
raise Exception('invalid item columns parameter')
itemColumns.append(slice(*x))
args.item_columns = itemColumns
if args.slice.isnumeric():
args.slice = [int(args.slice), int(args.slice) + 1]
else:
args.slice = [int(x) if x.isnumeric() else None for x in args.slice.split(':')]
if len(args.slice) != 2:
raise Exception('Invalid histogram slice')
args.slice[0] = args.slice[0] if args.slice[0] is not None else 0
if not args.input:
try:
fcntl.fcntl(sys.stdin.fileno(), F_SETPIPE_SZ, int(open("/proc/sys/fs/pipe-max-size", 'r').read()))
except Exception:
pass
fInput = sys.stdin
else:
fInput = xopen.xopen(args.input, 'r')
csvFile = csv.reader(fInput, delimiter=args.delimiter)
header = None
for header in csvFile:
if header[0].startswith('#'):
continue
break
if header is None:
raise Exception('Could not find a histogram header!')
args.slice[1] = args.slice[1] if args.slice[1] is not None else len(header)
if args.slice[0] == args.slice[1]:
raise Exception('Invalid histogram slice range')
itemsHeader = None if args.item_columns is None else [x for sx in [header[s] for s in args.item_columns] for x in sx]
if 'buckets' in args.flatten and not all(isFloat(x) for x in header[slice(*args.slice)]):
raise Exception('Flatten buckets only works with numeric header')
selector = None
if args.select_buckets:
selector = [i for i, x in enumerate(header[slice(*args.slice)]) if x in args.select_buckets]
outputFile = sys.stdout if not args.output else xopen.xopen(args.output, 'w')
if len(args.flatten) == 0:
outputFile.write(args.delimiter.join(header) + '\n')
elif 'items' not in args.flatten and any(x in args.flatten for x in ['counts', 'buckets']):
outputFile.write(args.delimiter.join(header[:args.slice[0]] + [x for x in ['counts', 'buckets'] if x in args.flatten]) + '\n')
elif 'items' in args.flatten and any(x in args.flatten for x in ['counts', 'buckets']):
outputFile.write(args.delimiter.join(((itemsHeader if itemsHeader is not None else ['all']) + [x for x in ['counts', 'buckets'] if x in args.flatten])) + '\n')
else:
outputFile.write(args.delimiter.join((itemsHeader if itemsHeader is not None else ['all']) + header[slice(*args.slice)]) + '\n')
flatHist = {}
flat = {}
def parseNormal(line, itemIndex, itemHeaders, itemValues):
outputFile.write(args.delimiter.join(line[:args.slice[0]] + itemValues + line[args.slice[1]:]) + '\n')
def parseCounts(line, itemIndex, itemHeaders, itemValues):
outputFile.write(args.delimiter.join(line[:args.slice[0]] + [str(sum([float(i) for i in itemValues if len(i) > 0]))] + line[args.slice[1]:]) + '\n')
def parseBuckets(line, itemIndex, itemHeaders, itemValues):
outputFile.write(args.delimiter.join(line[:args.slice[0]] + [str(sum([float(h) * float(i) for (h, i) in zip(itemHeaders, itemValues) if len(i) > 0]))] + line[args.slice[1]:]) + '\n')
def parseCountsBuckets(line, itemIndex, itemHeaders, itemValues):
outputFile.write(args.delimiter.join(line[:args.slice[0]] + [str(sum([float(i) for i in itemValues if len(i) > 0])), str(sum([float(h) * float(i) for (h, i) in zip(itemHeaders, itemValues) if len(i) > 0]))] + line[args.slice[1]:]) + '\n')
def parseItems(line, itemIndex, itemHeaders, itemValues):
global flatHist
if itemIndex not in flatHist:
flatHist[itemIndex] = [0] * len(itemValues)
flatHist[itemIndex] = [(p + float(v) if len(v) > 0 else p) for (p, v) in zip(flatHist[itemIndex], itemValues)]
def parseItemsBuckets(line, itemIndex, itemHeaders, itemValues):
global flat
if itemIndex not in flat:
flat[itemIndex] = {'counts': 0, 'buckets': 0}
flat[itemIndex]['buckets'] += sum([float(h) * float(i) for (h, i) in zip(itemHeaders, itemValues) if len(i) > 0])
def parseItemsCounts(line, itemIndex, itemHeaders, itemValues):
global flat
if itemIndex not in flat:
flat[itemIndex] = {'counts': 0, 'buckets': 0}
flat[itemIndex]['counts'] += sum([float(i) for i in itemValues if len(i) > 0])
def parseItemsCountsBuckets(line, itemIndex, itemHeaders, itemValues):
global flat
if itemIndex not in flat:
flat[itemIndex] = {'counts': 0, 'buckets': 0}
flat[itemIndex]['counts'] += sum([float(i) for i in itemValues if len(i) > 0])
flat[itemIndex]['buckets'] += sum([float(h) * float(i) for (h, i) in zip(itemHeaders, itemValues) if len(i) > 0])
def outputFlatHist(flush = True):
global flatHist
for itemIndex, itemFlat in flatHist.items():
outputFile.write(args.delimiter.join([itemIndex] + [str(f) for f in itemFlat]) + '\n')
if flush:
flatHist = {}
def outputFlat(flush = True):
global flat
for itemIndex, itemFlat in flat.items():
outputFile.write(args.delimiter.join([itemIndex] + [str(itemFlat[x]) for x in ['counts', 'buckets'] if x in args.flatten]) + '\n')
if flush:
flat = {}
if 'items' in args.flatten:
if all(x in args.flatten for x in ['counts', 'buckets']):
parser = parseItemsCountsBuckets
elif 'counts' in args.flatten:
parser = parseItemsCounts
elif 'buckets' in args.flatten:
parser = parseItemsBuckets
else:
parser = parseItems
else:
if all(x in args.flatten for x in ['counts', 'buckets']):
parser = parseCountsBuckets
elif 'counts' in args.flatten:
parser = parseCounts
elif 'buckets' in args.flatten:
parser = parseBuckets
else:
parser = parseNormal
applyFilter = len(args.filter_columns) > 0
filterSlices = [s if isinstance(s, slice) else slice(s, s + 1) for s in args.filter_columns]
filterFunc = any if args.filter_mode == 'any' else all
itemHeaders = header[slice(*args.slice)]
if selector is not None:
itemHeaders = [itemHeaders[i] for i in selector]
lastItemIndex = None
optFlatItems = 'items' in args.flatten and args.sorted_input and args.item_columns is not None and all(x == y for (x, y) in zip(itemsHeader, header[:len(itemsHeader)]))
flatOutputFunc = outputFlatHist if not any(x in args.flatten for x in ['counts', 'buckets']) else outputFlat
for i, line in enumerate(csvFile):
if line[0].startswith('#'):
continue
if len(line) < 2:
continue
if applyFilter and not filterFunc(v in args.filter_data for lv in [line[slc] for slc in filterSlices] for v in lv):
continue
itemValues = line[slice(*args.slice)]
if selector is not None:
itemValues = [itemValues[i] for i in selector]
itemsIndex = 'all' if args.item_columns is None else args.delimiter.join([x for sx in [line[s] for s in args.item_columns] for x in sx])
if optFlatItems and itemsIndex != lastItemIndex:
flatOutputFunc()
lastItemIndex = itemsIndex
parser(line, itemsIndex, itemHeaders, itemValues)
if 'items' in args.flatten:
flatOutputFunc()
if (args.output):
outputFile.close()
|
from tests.modules.FlaskModule.API.user.BaseUserAPITest import BaseUserAPITest
class UserQueryStatsTest(BaseUserAPITest):
test_endpoint = '/api/user/stats'
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
def test_no_auth(self):
with self._flask_app.app_context():
response = self.test_client.get(self.test_endpoint)
self.assertEqual(401, response.status_code)
def test_post_no_auth(self):
with self._flask_app.app_context():
response = self.test_client.post(self.test_endpoint)
self.assertEqual(405, response.status_code)
def test_delete_no_auth(self):
with self._flask_app.app_context():
response = self.test_client.delete(self.test_endpoint)
self.assertEqual(405, response.status_code)
def test_get_endpoint_invalid_http_auth(self):
with self._flask_app.app_context():
response = self._get_with_user_http_auth(self.test_client, username='invalid', password='invalid')
self.assertEqual(401, response.status_code)
def test_get_endpoint_invalid_token_auth(self):
with self._flask_app.app_context():
response = self._get_with_user_token_auth(self.test_client, token='invalid')
self.assertEqual(401, response.status_code)
def test_post_endpoint_invalid_token_auth(self):
with self._flask_app.app_context():
response = self._post_with_user_token_auth(self.test_client, token='invalid')
self.assertEqual(405, response.status_code)
def test_post_endpoint_invalid_http_auth(self):
with self._flask_app.app_context():
response = self._post_with_user_http_auth(self.test_client, username='invalid', password='invalid')
self.assertEqual(405, response.status_code)
def test_delete_endpoint_invalid_http_auth(self):
with self._flask_app.app_context():
response = self._delete_with_user_http_auth(self.test_client, username='invalid', password='invalid')
self.assertEqual(405, response.status_code)
def test_delete_endpoint_invalid_token_auth(self):
with self._flask_app.app_context():
response = self._delete_with_user_token_auth(self.test_client, token='invalid')
self.assertEqual(405, response.status_code)
def test_query_no_params_as_admin(self):
with self._flask_app.app_context():
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin')
self.assertEqual(400, response.status_code)
def test_query_user_group_stats(self):
with self._flask_app.app_context():
params = {'id_user_group': 1}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin',
params=params)
self.assertEqual(200, response.status_code)
self.assertTrue(response.is_json)
self.assertGreater(len(response.json), 0)
response = self._get_with_user_http_auth(self.test_client, username='user4', password='user4',
params=params)
self.assertEqual(403, response.status_code)
def test_query_user_stats(self):
with self._flask_app.app_context():
params = {'id_user': 1}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin',
params=params)
self.assertEqual(200, response.status_code)
self.assertTrue(response.is_json)
self.assertGreater(len(response.json), 0)
response = self._get_with_user_http_auth(self.test_client, username='user4', password='user4',
params=params)
self.assertEqual(403, response.status_code)
def test_query_site_stats(self):
with self._flask_app.app_context():
params = {'id_site': 1}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin',
params=params)
self.assertEqual(200, response.status_code)
self.assertTrue(response.is_json)
self.assertGreater(len(response.json), 0)
response = self._get_with_user_http_auth(self.test_client, username='user4', password='user4',
params=params)
self.assertEqual(403, response.status_code)
def test_query_project_stats(self):
with self._flask_app.app_context():
params = {'id_project': 1}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin',
params=params)
self.assertEqual(200, response.status_code)
self.assertTrue(response.is_json)
self.assertGreater(len(response.json), 0)
response = self._get_with_user_http_auth(self.test_client, username='user4', password='user4',
params=params)
self.assertEqual(403, response.status_code)
def test_query_participant_group_stats(self):
with self._flask_app.app_context():
params = {'id_group': 1}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin',
params=params)
self.assertEqual(200, response.status_code)
self.assertTrue(response.is_json)
self.assertGreater(len(response.json), 0)
response = self._get_with_user_http_auth(self.test_client, username='user4', password='user4',
params=params)
self.assertEqual(403, response.status_code)
def test_query_session_stats(self):
with self._flask_app.app_context():
params = {'id_session': 1}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin',
params=params)
self.assertEqual(200, response.status_code)
self.assertTrue(response.is_json)
self.assertGreater(len(response.json), 0)
response = self._get_with_user_http_auth(self.test_client, username='user4', password='user4',
params=params)
self.assertEqual(403, response.status_code)
def test_query_participant_stats(self):
with self._flask_app.app_context():
params = {'id_participant': 1}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin',
params=params)
self.assertEqual(200, response.status_code)
self.assertTrue(response.is_json)
self.assertGreater(len(response.json), 0)
response = self._get_with_user_http_auth(self.test_client, username='user4', password='user4',
params=params)
self.assertEqual(403, response.status_code)
def test_query_device_stats(self):
with self._flask_app.app_context():
params = {'id_device': 1}
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin',
params=params)
self.assertEqual(200, response.status_code)
self.assertTrue(response.is_json)
self.assertGreater(len(response.json), 0)
response = self._get_with_user_http_auth(self.test_client, username='user4', password='user4',
params=params)
self.assertEqual(403, response.status_code)
|
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions, status, generics
from quizzz.communities.permissions import IsCommunityMember, IsCommunityAdmin
from quizzz.common.permissions import IsSafeMethod, IsAuthenticated
from .models import Tournament, Round
from .serializers import (
TournamentSerializer,
ListedRoundSerializer,
ListedQuizSerializer,
EditableRoundSerializer,
)
from quizzz.quizzes.models import Quiz
class TournamentListOrCreate(APIView):
"""
Create a new tournament or list group's tournaments.
"""
permission_classes = [
IsAuthenticated,
(IsSafeMethod & IsCommunityMember) | IsCommunityAdmin,
]
def get(self, request, community_id):
tournaments = Tournament.objects.filter(community_id=community_id).all()
serializer = TournamentSerializer(tournaments, many=True)
return Response(serializer.data)
def post(self, request, community_id):
serializer = TournamentSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save(community_id=community_id)
return Response(serializer.data, status=status.HTTP_201_CREATED)
class TournamentDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve/update/delete existing tournament.
"""
permission_classes = [
IsAuthenticated,
(IsSafeMethod & IsCommunityMember) | IsCommunityAdmin,
]
queryset = Tournament.objects.all()
lookup_url_kwarg = "tournament_id"
serializer_class = TournamentSerializer
class RoundListOrCreate(APIView):
"""
Create a new round or list tournament rounds.
"""
permission_classes = [
IsAuthenticated,
(IsSafeMethod & IsCommunityMember) | IsCommunityAdmin,
]
def get(self, request, community_id, tournament_id):
rounds = Round.objects\
.filter(tournament_id=tournament_id)\
.select_related('quiz').select_related('quiz__user')\
.prefetch_related(Round.get_user_plays_prefetch_object(request.user.id))\
.all()
serializer = ListedRoundSerializer(rounds, many=True, context={'request': request})
return Response(serializer.data)
def post(self, request, community_id, tournament_id):
serializer = EditableRoundSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
round_obj = serializer.save(tournament_id=tournament_id)
round_obj.load_user_plays(request.user.id)
detailed_serializer = ListedRoundSerializer(round_obj, context={'request': request})
return Response(detailed_serializer.data, status=status.HTTP_201_CREATED)
class RoundDetail(generics.RetrieveDestroyAPIView):
"""
Retrieve/update/delete existing round.
"""
permission_classes = [
IsAuthenticated,
(IsSafeMethod & IsCommunityMember) | IsCommunityAdmin,
]
queryset = Round.objects.all()
lookup_url_kwarg = "round_id"
def get_serializer_class(self):
if self.request.method == 'DELETE':
return EditableRoundSerializer
def get(self, request, community_id, round_id):
obj = get_object_or_404(Round.objects.filter(pk=round_id))
self.check_object_permissions(self.request, obj)
obj.load_user_plays(request.user.id)
serializer = ListedRoundSerializer(obj, context={'request': request})
return Response({
"round": serializer.data,
"standings": obj.get_standings(),
})
def put(self, request, community_id, round_id):
obj = get_object_or_404(Round.objects.filter(pk=round_id))
self.check_object_permissions(self.request, obj)
serializer = EditableRoundSerializer(obj, data=request.data)
if serializer.is_valid(raise_exception=True):
round_obj = serializer.save(tournament_id=obj.tournament_id)
round_obj.load_user_plays(request.user.id)
detailed_serializer = ListedRoundSerializer(round_obj, context={'request': request})
return Response(detailed_serializer.data)
class QuizPool(APIView):
"""
List group's available quizzes.
"""
permission_classes = [ IsAuthenticated, IsCommunityAdmin ]
def get(self, request, community_id):
quizzes = Quiz.objects\
.filter(community_id=community_id)\
.filter(is_finalized=True)\
.filter(round__id=None)\
.order_by('-time_created')\
.all()
serializer = ListedQuizSerializer(quizzes, many=True)
return Response(serializer.data)
class TournamentStandings(APIView):
permission_classes = [ IsAuthenticated, IsCommunityMember ]
@method_decorator(cache_page(30))
def get(self, request, community_id, tournament_id):
tournament = get_object_or_404(Tournament.objects.filter(pk=tournament_id))
standings = tournament.get_standings()
return Response(standings) |
from flask import render_template, redirect, url_for, flash, request
from werkzeug.urls import url_parse
from flask_login import login_user, logout_user, current_user
from flask_babel import _
from app import db
from app.auth import bp
from app.auth.forms import LoginForm, RegistrationForm, \
ResetPasswordRequestForm, ResetPasswordForm
from app.models import User
from app.auth.email import send_password_reset_email
@bp.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data) or \
not user.verify_totp(form.token.data):
flash(_('Invalid username, token or password'))
return redirect(url_for('auth.login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('main.index')
return redirect(next_page)
return render_template('auth/login.html', title=_('Sign In'), form=form)
@bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.index'))
@bp.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash(_('Congratulations, you are now a registered user!'))
session['username'] = user.username
return redirect(url_for('two_factor_setup'))
return render_template('auth/register.html', title=_('Register'),
form=form)
@bp.route('/twofactor')
def two_factor_setup():
if 'username' not in session:
return redirect(url_for('index'))
user = User.query.filter_by(username=session['username']).first()
if user is None:
return redirect(url_for('index'))
# since this page contains the sensitive QR code, make sure the browser
# does not cache it
return render_template('two-factor-setup.html'), 200, {
'Cache-Control': 'no-cache, no-store, must-revalidate',
'Pragma': 'no-cache',
'Expires': '0'}
@bp.route('/qrcode')
def qrcode():
if 'username' not in session:
abort(404)
user = User.query.filter_by(username=session['username']).first()
if user is None:
abort(404)
# for added security, remove username from session
del session['username']
# render QR code for OTP auth
url = pyqrcode.create(user.get_totp_uri())
stream = BytesIO()
url.svg(stream, scale=5)
return stream.getvalue(), 200, {
'Content-Type': 'image/svg+xml',
'Cache-Control': 'no-cache, no-store, must-revalidate',
'Pragma': 'no-cache',
'Expires': '0'}
@bp.route('/reset_password_request', methods=['GET', 'POST'])
def reset_password_request():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash(
_('Check your email for the instructions to reset your password'))
return redirect(url_for('auth.login'))
return render_template('auth/reset_password_request.html',
title=_('Reset Password'), form=form)
@bp.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
return redirect(url_for('main.index'))
user = User.verify_reset_password_token(token)
if not user:
return redirect(url_for('main.index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash(_('Your password has been reset.'))
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
|
from abc import abstractmethod
from datetime import datetime
from decimal import Decimal as D
from decimal import InvalidOperation
from typing import Any, Optional, TypeVar
from exchange.exceptions import DateTimeParseException
from exchange.operation_type import OperationType as OT
from flask_restplus.fields import Raw
class CustomField(Raw):
def __init__(self, *args: Any, **kwargs: Any):
super(CustomField, self).__init__(*args, **kwargs)
def validate_empty(self) -> bool:
if self.required:
return False
return True
T = TypeVar('T')
@abstractmethod
def validate(self, value: T) -> bool:
pass
class String(CustomField):
__schema_example__ = 'string'
def validate(self, value: CustomField.T) -> bool:
if not value:
return self.validate_empty()
return isinstance(value, str)
class DateTime(CustomField):
__schema_format__ = 'date-time'
__schema_example__ = '2016-06-06 11:22:33'
dt_format = '%Y-%m-%d %H:%M:%S'
def from_str(self, value: str) -> Optional[datetime]:
try:
return None if not value else datetime.strptime(value, self.dt_format)
except BaseException:
raise DateTimeParseException()
def validate(self, value: CustomField.T) -> bool:
if not value or not isinstance(value, str):
return self.validate_empty()
try:
self.from_str(value)
except DateTimeParseException:
return False
return True
class Decimal(CustomField):
__schema_type__ = 'number'
__schema_format__ = 'decimal'
__schema_example__ = '0.0'
def validate(self, value: CustomField.T) -> bool:
if value is None:
return self.validate_empty()
if not isinstance(value, str):
return False
try:
D(value)
return True
except InvalidOperation:
return False
class OperationType(CustomField):
__schema_type__ = 'string'
__schema_example__ = 'BUY'
def validate(self, value: CustomField.T) -> bool:
if not isinstance(value, str):
return False
try:
return bool(OT[value])
except KeyError:
return False
class Integer(CustomField):
__schema_type__ = 'integer'
__schema_format__ = 'int'
__schema_example__ = 0
T = TypeVar('T')
def validate(self, value: T) -> bool:
if value is None:
return self.validate_empty()
if not isinstance(value, int):
return False
return True
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
a=input("Cien. liet., ludzu, ievadi skaitli: ")
a = int (a)
print("Liet., Tu esi ievadijis skaitli: %d"%(a))
aa = a * a
print("Liet., Tu Esi Ievadijis skaitli: %d"%(a))
aa = a*a
|
################################################################################
### Init
################################################################################
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
import os
import numpy as np
import tensorflow as tf
import argparse
import compression_model
import pc_io
import multiprocessing
import gzip
from tqdm import tqdm
np.random.seed(42)
tf.set_random_seed(42)
# Use CPU
# For unknown reasons, this is 3 times faster than GPU
os.environ['CUDA_VISIBLE_DEVICES'] = ''
################################################################################
### Script
################################################################################
TYPE = np.uint16
DTYPE = np.dtype(TYPE)
SHAPE_LEN = 3
def load_compressed_file(file):
with gzip.open(file, "rb") as f:
x_shape = np.frombuffer(f.read(DTYPE.itemsize * SHAPE_LEN), dtype=TYPE)
y_shape = np.frombuffer(f.read(DTYPE.itemsize * SHAPE_LEN), dtype=TYPE)
string = f.read()
return x_shape, y_shape, string
def load_compressed_files(files, batch_size=32):
files_len = len(files)
with multiprocessing.Pool() as p:
logger.info('Loading data into memory (parallel reading)')
data = np.array(list(tqdm(p.imap(load_compressed_file, files, batch_size), total=files_len)))
return data
def input_fn(features, batch_size):
with tf.device('/cpu:0'):
zero = tf.constant(0)
dataset = tf.data.Dataset.from_generator(lambda: features, (tf.string))
dataset = dataset.map(lambda t: (t, zero))
dataset = dataset.batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='decompress.py',
description='Decompress a file.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'input_dir',
help='Input directory.')
parser.add_argument(
'input_pattern',
help='Mesh detection pattern.')
parser.add_argument(
'output_dir',
help='Output directory.')
parser.add_argument(
'checkpoint_dir',
help='Directory where to save/load model checkpoints.')
parser.add_argument(
'--batch_size', type=int, default=1,
help='Batch size.')
parser.add_argument(
'--read_batch_size', type=int, default=1,
help='Batch size for parallel reading.')
parser.add_argument(
'--num_filters', type=int, default=32,
help='Number of filters per layer.')
parser.add_argument(
'--preprocess_threads', type=int, default=16,
help='Number of CPU threads to use for parallel decoding.')
parser.add_argument(
'--output_extension', default='.ply',
help='Output extension.')
args = parser.parse_args()
assert args.batch_size > 0, 'batch_size must be positive'
DATA_FORMAT = 'channels_first'
args.input_dir = os.path.normpath(args.input_dir)
len_input_dir = len(args.input_dir)
assert os.path.exists(args.input_dir), "Input directory not found"
input_glob = os.path.join(args.input_dir, args.input_pattern)
files = pc_io.get_files(input_glob)
assert len(files) > 0, "No input files found"
filenames = [x[len_input_dir+1:] for x in files]
output_files = [os.path.join(args.output_dir, x + '.ply') for x in filenames]
compressed_data = load_compressed_files(files, args.read_batch_size)
x_shape = compressed_data[0][0]
y_shape = compressed_data[0][1]
assert np.all([np.all(x[0] == x_shape) for x in compressed_data]), 'All x_shape must be equal'
assert np.all([np.all(x[1] == y_shape) for x in compressed_data]), 'All y_shape must be equal'
compressed_strings = (x[2] for x in compressed_data)
estimator = tf.estimator.Estimator(
model_fn=compression_model.model_fn,
model_dir=args.checkpoint_dir,
params={
'num_filters': args.num_filters,
'checkpoint_dir': args.checkpoint_dir,
'data_format': DATA_FORMAT,
'decompress': True,
'x_shape': x_shape,
'y_shape': y_shape
})
# hook = tf.train.ProfilerHook(save_steps=1, output_dir='./decompress_profiler')
result = estimator.predict(
input_fn=lambda: input_fn(compressed_strings, args.batch_size),
predict_keys=['x_hat_quant'])
# hooks=[hook])
len_files = len(files)
i = 0
for ret, ori_file, output_file in zip(result, files, output_files):
logger.info(f'{i}/{len_files} - Writing {ori_file} to {output_file}')
output_dir, _ = os.path.split(output_file)
os.makedirs(output_dir, exist_ok=True)
# Remove the geometry channel
pa = np.argwhere(ret['x_hat_quant'][0]).astype('float32')
pc_io.write_df(output_file, pc_io.pa_to_df(pa))
i += 1
|
#!/usr/bin/python
#
# git log --pretty="%H %P" | this program
# See option descriptions at bottom
#
# This little program cranks through a series of patches, trying to determine
# which trees each flowed through on its way to the mainline. It does a
# 'git describe' on each, so don't expect it to be fast for large numbers
# of patches.
#
# One warning: it is easily confused by local branches, tags, etc. For
# best results, run it on a mainline tree with no added frobs. Using
# "git clone --reference" is a relatively easy way to come up with such
# a tree without redownloading the whole mess.
#
import sys, subprocess, argparse, pickle
import graphviz
import patterns
Mergepat = patterns.patterns['ExtMerge']
IntMerge = patterns.patterns['IntMerge']
IntMerge2 = patterns.patterns['IntMerge2']
Mergelist = { }
class Merge:
def __init__(self, id, tree = None):
self.id = id
self.commits = [ ]
self.merges = [ ]
self.tree = tree or '?'
self.internal = False
self.signed = False
if tree is None:
self.getdesc()
Mergelist[id] = self
def normalize_tree(self, tree):
colonslash = tree.find('://')
if colonslash > 0:
tree = tree[colonslash+3:]
if tree.find('git.kernel.org') >= 0:
stree = tree.split('/')
return '$KORG/%s/%s' % (stree[-2], stree[-1])
return tree
def getdesc(self):
command = ['git', 'log', '-1', '--show-signature', self.id]
p = subprocess.Popen(command, cwd = Repo, stdout = subprocess.PIPE,
bufsize = 1)
#
# Sometimes we don't match a pattern; that means that the
# committer radically modified the merge message. A certain
# Eric makes them look like ordinary commits... Others use
# it to justify backmerges of the mainline. Either way, the
# best response is to treat it like an internal merge.
#
self.internal = True
for line in p.stdout.readlines():
#
# Note if there's a GPG signature
#
if line.startswith('gpg:'):
self.signed = True
continue
#
# Maybe it's a merge of an external tree.
#
m = Mergepat.search(line)
if m:
self.tree = self.normalize_tree(m.group(3))
self.internal = False
break
#
# Or maybe it's an internal merge.
#
m = IntMerge.search(line) or IntMerge2.search(line)
if m:
self.internal = True
break
p.wait()
def add_commit(self, id):
self.commits.append(id)
def add_merge(self, merge):
self.merges.append(merge)
#
# Read the list of commits from the input stream and find which
# merge brought in each.
#
def ingest_commits(src):
count = 0
expected = 'nothing yet'
for line in src.readlines():
sline = line[:-1].split()
commit = sline[0]
is_merge = (len(sline) > 2)
if (commit == expected) and not is_merge:
mc = last_merge
else:
mc = Mergelist[find_merge(sline[0])] # Needs try
if is_merge:
mc.add_merge(Merge(commit))
else:
mc.add_commit(commit)
count += 1
if (count % 50) == 0:
sys.stderr.write('\r%5d ' % (count))
sys.stderr.flush()
expected = sline[1]
last_merge = mc
print
#
# Figure out which merge brought in a commit.
#
MergeIDs = { }
def find_merge(commit):
command = ['git', 'describe', '--contains', commit]
p = subprocess.Popen(command, cwd = Repo, stdout = subprocess.PIPE,
bufsize = 1)
desc = p.stdout.readline().decode('utf8')
p.wait()
#
# The description line has the form:
#
# tag~N^M~n...
#
# the portion up to the last ^ describes the merge we are after;
# in the absence of an ^, assume it's on the main branch.
#
uparrow = desc.rfind('^')
if uparrow < 0:
return 'mainline'
#
# OK, now get the real commit ID of the merge. Maybe we have
# it stashed?
#
try:
return MergeIDs[desc[:uparrow]]
except KeyError:
pass
#
# Nope, we have to dig it out the hard way.
#
command = ['git', 'log', '--pretty=%H', '-1', desc[:uparrow]]
p = subprocess.Popen(command, cwd = Repo, stdout = subprocess.PIPE,
bufsize = 1)
merge = p.stdout.readline().decode('utf8').strip()
#
# If we get back the same commit, we're looking at one of Linus's
# version number tags.
#
if merge == commit:
merge = 'mainline'
MergeIDs[desc[:uparrow]] = merge
p.wait()
return merge
#
# Internal merges aren't interesting from our point of view. So go through,
# find them all, and move any commits from such into the parent.
#
def zorch_internals(merge):
new_merges = [ ]
for m in merge.merges:
zorch_internals(m)
if m.internal:
merge.commits += m.commits
new_merges += m.merges
else:
new_merges.append(m)
merge.merges = new_merges
#
# Figure out how many commits flowed at each stage.
#
def count_commits(merge):
merge.ccount = len(merge.commits) + 1 # +1 to count the merge itself
for m in merge.merges:
merge.ccount += count_commits(m)
return merge.ccount
#
# ...and how many flowed between each pair of trees
#
Treecounts = { }
SignedTrees = set()
def tree_stats(merge):
try:
tcount = Treecounts[merge.tree]
except KeyError:
tcount = Treecounts[merge.tree] = { }
for m in merge.merges:
if m.signed:
SignedTrees.add(m.tree)
mcount = tcount.get(m.tree, 0)
tcount[m.tree] = mcount + m.ccount
tree_stats(m)
#
# Maybe we only want so many top-level trees
#
def trim_trees(limit):
srcs = Treecounts['mainline']
srcnames = srcs.keys()
srcnames.sort(lambda t1, t2: srcs[t2] - srcs[t1])
nextra = len(srcnames) - limit
zapped = 0
for extra in srcnames[limit:]:
zapped += srcs[extra]
del srcs[extra]
srcs['%d other trees' % (nextra)] = zapped
#
# Take our map of the commit structure and boil it down to how many commits
# moved from one tree to the next.
#
def dumptree(start, indent = ''):
int = ''
if start.internal:
int = 'I: '
print '%s%s%s: %d/%d %s' % (indent, int, start.id[:10],
len(start.merges), len(start.commits),
start.tree)
for merge in start.merges:
dumptree(merge, indent + ' ')
def dumpflow(tree, indent = '', seen = []):
try:
srcs = Treecounts[tree]
except KeyError:
return
srctrees = srcs.keys()
srctrees.sort(lambda t1, t2: srcs[t2] - srcs[t1])
for src in srctrees:
if src in seen:
print 'Skip', src, srcs[src], seen
else:
if src in SignedTrees:
print '%s%4d ** %s' % (indent, srcs[src], src)
else:
print '%s%4d %s' % (indent, srcs[src], src)
dumpflow(src, indent = indent + ' ', seen = seen + [tree])
def SigStats(tree):
srcs = Treecounts[tree]
spulls = upulls = scommits = ucommits = 0
for src in srcs.keys():
if src in SignedTrees:
spulls += 1
scommits += srcs[src]
else:
upulls += 1
ucommits += srcs[src]
print '%d repos total, %d signed, %d unsigned' % (spulls + upulls,
spulls, upulls)
print ' %d commits from signed, %d from unsigned' % (scommits, ucommits)
#
# Graphviz.
#
def GV_out(file):
graph = graphviz.Digraph('mainline', filename = file, format = 'svg')
graph.body.extend(['label="Patch flow into the mainline"',
'concentrate=true',
'rankdir=LR' ])
graph.attr('node', fontsize="20", color="blue", penwidth='4',
shape='ellipse')
graph.node('mainline')
graph.attr('node', fontsize="14", color="black", shape='polygon',
sides='4')
if DoSigned:
GV_out_node_signed(graph, 'mainline')
else:
GV_out_node(graph, 'mainline')
graph.view()
def GV_fixname(name):
return name.replace(':', '/') # or Graphviz chokes
def GV_color(count):
if count >= RedThresh:
return 'red'
if count >= YellowThresh:
return 'orange'
return 'black'
#
# Output nodes with traffic coloring
#
def GV_out_node(graph, node, seen = []):
try:
srcs = Treecounts[node]
except KeyError: # "applied by linus"
return
srctrees = srcs.keys()
srctrees.sort(lambda t1, t2: srcs[t2] - srcs[t1])
for src in srctrees:
if src not in seen:
graph.edge(GV_fixname(src), GV_fixname(node),
taillabel='%d' % srcs[src], labelfontsize="14",
color = GV_color(srcs[src]), penwidth='2')
GV_out_node(graph, src, seen + [node])
#
# Output nodes showing signature status
#
def GV_out_node_signed(graph, node, seen = []):
try:
srcs = Treecounts[node]
except KeyError: # "applied by linus"
return
srctrees = srcs.keys()
srctrees.sort(lambda t1, t2: srcs[t2] - srcs[t1])
for src in srctrees:
color = 'red'
if src in SignedTrees:
color = 'black'
if src not in seen:
graph.attr('node', color=color)
graph.edge(GV_fixname(src), GV_fixname(node),
taillabel='%d' % srcs[src], labelfontsize="14",
color = color, penwidth='2')
GV_out_node_signed(graph, src, seen + [node])
#
# argument parsing stuff.
#
def setup_args():
p = argparse.ArgumentParser()
p.add_argument('-d', '--dump', help = 'Dump merge list to file',
required = False, default = '')
p.add_argument('-g', '--gvoutput', help = 'Graphviz output',
required = False, default = '')
p.add_argument('-l', '--load', help = 'Load merge list from file',
required = False, default = '')
p.add_argument('-o', '--output', help = 'Output file',
required = False, default = '-')
p.add_argument('-r', '--repo', help = 'Repository location',
required = False, default = '/home/corbet/kernel')
p.add_argument('-t', '--trim', help = 'Trim top level to this many trees',
required = False, default = 0, type = int)
p.add_argument('-R', '--red', help = 'Red color threshold',
required = False, default = 800, type = int)
p.add_argument('-Y', '--yellow', help = 'Yellow color threshold',
required = False, default = 200, type = int)
p.add_argument('-s', '--signed', help = 'Display signed trees',
action='store_true', default = False)
return p
p = setup_args()
args = p.parse_args()
Repo = args.repo
RedThresh = args.red
YellowThresh = args.yellow
DoSigned = args.signed
#
# Find our commits.
#
if args.load:
dumpfile = open(args.load, 'r')
Mergelist = pickle.loads(dumpfile.read())
dumpfile.close
Mainline = Mergelist['mainline']
else:
Mainline = Merge('mainline', tree = 'mainline')
ingest_commits(sys.stdin)
if args.dump:
dumpfile = open(args.dump, 'w')
dumpfile.write(pickle.dumps(Mergelist))
dumpfile.close()
#
# Now generate the flow graph.
#
#dumptree(Mainline)
zorch_internals(Mainline)
#dumptree(Mainline)
Treecounts['mainline'] = { 'Applied by Linus': len(Mainline.commits) }
print 'total commits', count_commits(Mainline)
tree_stats(Mainline)
if args.trim:
trim_trees(args.trim)
print 'Tree flow'
dumpflow('mainline')
if args.gvoutput:
GV_out(args.gvoutput)
if DoSigned:
SigStats('mainline')
|
from random import random
from . import Agent
from util.collections import CircularList
from util.listops import sublists, listhash
from util.interpolation import linear_latch
class ActionChainAgent(Agent):
"""docstring for RandomAgent"""
def __init__(self, chain_length):
super(ActionChainAgent, self).__init__(
name='ActionChainAgent', version='1.2')
self.q = dict() # state-action values: q[state][action]
self.chain = CircularList(chain_length)
# e=1 until frame 5k, then interpolate down to e=0.05 in frame 10k,
# and keep it there for the remaining time
self.e_params = (5000, 10000, 1.0, 0.05)
self.e = 0.5
self.nframes = 0
self.learning_rate = 0.1
self.discount = 0.9
self.last_action = None
def update_e(self):
self.e = linear_latch(self.nframes, *self.e_params)
def select_action(self):
# Always take random action first
action = self.get_random_action()
# Greedy action
if random() > self.e and self.chain.full:
res = self.get_greedy_action(self.available_actions)
if res is not None:
action = res
self.chain.append(action)
return action
def receive_reward(self, reward):
for chain in sublists(self.chain):
# Consider the previous moves to be the current state
state = chain[1:]
action = chain[0]
self.update_chain(state, action, reward)
self.on_frame_end()
def on_frame_end(self):
self.nframes += 1
self.update_e()
def on_episode_start(self):
pass
def on_episode_end(self):
pass
def update_chain(self, state, action, reward):
lhstate = listhash(state)
if not lhstate in self.q:
self.q[lhstate] = dict()
if not action in self.q[lhstate]:
self.q[lhstate][action] = reward
else:
val = self.q[lhstate][action]
self.q[lhstate][action] = val + self.learning_rate * \
(reward - self.discount * val)
def get_greedy_action(self, available_actions):
# Do a tree search in the previously seen states
# that match the current state
best_action = None
best_value = None
for state in sublists(self.chain):
lhstate = listhash(state)
if lhstate in self.q:
s = self.q[lhstate]
for a in available_actions:
if a in s:
val = s[a]
if val > best_value:
best_action = a
best_value = val
return best_action
def reset(self):
self.e = 0.5
self.nframes = 0
self.last_action = None
self.q = dict()
self.chain.clear()
def get_settings(self):
settings = {'chain_length': self.chain.capacity(),
'e_params': self.e_params,
'learning_rate': self.learning_rate,
'discount': self.discount
}
settings.update(super(ActionChainAgent, self).get_settings())
return settings
|
"""
Authors: Cristhian Castillo and Kevin Zarama
Icesi University, 2019
This script represent a client in the model Client-Server for a Socket Chatroom
"""
import socket
import sys
import errno
from random import randrange
"""
HEADER INFO
"""
HEADER_LENGTH = 10
"""
HOST INFO
"""
HOST = "127.0.0.1"
PORT = 8080
nickname = input("Username: ")
"""
CIPHER DATA
"""
# Key for cesar cipher
key = randrange(20) + 1
hex_key = hex(key)
# Alphabet for Cesar Cipher
abc = "ABCDEFGHIJKLMNรOPQRSTUVWXYZabcdefghijklmnรฑopqrstuwxyz"
def encoded_message(msg):
"""
Encode a message using Cesar Cipher
:param msg: Is the message to Cipher
:return: The encrypted Message
"""
encoded_msg = ""
for letter in msg:
if letter in abc:
index = abc.index(letter)
aux = index + key
if aux >= len(abc):
encoded_msg += abc[key - (len(abc) - index)]
else:
encoded_msg += abc[aux]
else:
encoded_msg += letter
return encoded_msg
def decoded_message(msg, cesar_key):
"""
Decoded the encrypted message
:param msg: encrypted message for decoded
:param cesar_key: number of movements
:return: Messaged decoded
"""
message = ""
for letter in msg:
index = abc.index(letter)
aux = index - cesar_key
if aux < 0:
message += abc[len(abc) + aux]
else:
message += abc[aux]
return message
# Create a socket
# socket.SOCK_STREAM - TCP, conection-based.
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to a given ip and port of the Host
client_socket.connect((HOST, PORT))
# Connection to non-blocking state
client_socket.setblocking(False)
"""
User Information. And Encode header in bytes using utf-8
"""
username = nickname.encode('utf-8')
username_header = f"{len(username):<{HEADER_LENGTH}}".encode('utf-8')
client_socket.send(username_header + username)
while True:
# Wait for user to input a message
message = input(f'{nickname} > ')
encrypted_message = encoded_message(message)
# If message is not empty - send it
if encrypted_message:
# Encode message to bytes, prepare header and convert to bytes, like for username above, then send
encrypted_message = encrypted_message.encode('utf-8') + hex_key.encode('utf-8')
message_header = f"{len(encrypted_message):<{HEADER_LENGTH}}".encode('utf-8')
client_socket.send(message_header + encrypted_message)
try:
# Loop over received messages and print them
while True:
# Receive header containing username length, it's size is defined and constant
username_header = client_socket.recv(HEADER_LENGTH)
# If we received no data, server gracefully closed a connection.
if not len(username_header):
print('Conexiรณn cerrada por el servidor :(')
sys.exit()
# Convert header to int value
username_length = int(username_header.decode('utf-8').strip())
# Receive and decode username
username = client_socket.recv(username_length).decode('utf-8')
# Now do the same for message (as we received username, we received whole message, there's no need to check if it has any
# length)
message_header = client_socket.recv(HEADER_LENGTH)
message_length = int(message_header.decode('utf-8').strip())
encrypted_message = client_socket.recv(message_length).decode('utf-8')
# Get the key for cesar cipher and the message encoded
key_index = encrypted_message.index("0x")
msg = encrypted_message[0:key_index]
cesar_key = int(encrypted_message[key_index:], 0)
# decode the message
msg = decoded_message(msg, cesar_key)
# Print message
print(f'{username} > {msg}')
except IOError as e:
# This is normal on non blocking connections - when there are no incoming data error is going to be raised
# Some operating systems will indicate that using AGAIN, and some using WOULDBLOCK error code
# Check for both - if one of them - that's expected, means no incoming data, continue as normal
# If we got different error code - something happened
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
print('Error de lectura: {}'.format(str(e)))
sys.exit()
# We just did not receive anything
continue
except Exception as e:
# Any other exception - something happened, exit
print('Error de lectura: '.format(str(e)))
sys.exit()
|
class node:
def __init__(self,x):
self.value=x
self.next=None
class linkedList:
def __init__(self,n=None):
self.head=n
def insert(self,n):
if self.head==None:
self.head=n
return
node=self.head
if node==None:
node=n
while node.next!=None:
node=node.next
node.next=n
def printL(self):
node=self.head
while node!=None:
print(node.value)
node=node.next
print(' ')
def delete(self, val):
node=self.head
if node.value==val:
self.head=self.head.next
return
while node.next!=None:
if node.next.value==val:
node.next=node.next.next
return
node=node.next
class lrucache:
def __init__(self,size=3):
self.ll=linkedList()
self.pageSize=size
self.n=0
self.hash={}
def request(self,val):
if val in self.hash:
print("page hit")
elif self.n +1 > self.pageSize:
self.ll.delete(self.ll.head.value)
n=node(val)
self.ll.insert(n)
self.hash[val]=True
else:
n=node(val)
self.ll.insert(n)
self.hash[val]=True
self.n+=1
def printlru(self):
print(self.ll.printL())
l=lrucache(2)
l.request(1)
l.printlru()
l.request(2)
l.printlru()
l.request(3)
l.printlru()
l.request(4)
l.printlru()
l.request(4)
|
import numpy as np
from numpy import linalg as LA
from scipy.spatial.distance import cdist
# rejection sampling algorithm comes from LSE lecture notes
# alternatively see WOLFRAM: http://mathworld.wolfram.com/CirclePointPicking.html
# # http://mathworld.wolfram.com/HyperspherePointPicking.html
def unit_circumference_coordinates(r, n, coordinates):
# r: radius
# n: number of samples
x1 = np.random.uniform(-1, 1, n)
x2 = np.random.uniform(-1, 1, n)
index = np.where((x1 ** 2 + x2 ** 2) < 1) # accepted samples
x1 = x1[index]
x2 = x2[index]
# coordinates
x = ((x1) ** 2 - (x2) ** 2) / ((x1) ** 2 + (x2) ** 2) * r
y = (2 * (x1) * (x2)) / ((x1) ** 2 + (x2) ** 2) * r
a = coordinates[0]
b = coordinates[1] # 1x2 vector
a = a + x
b = b + y
return a, b
def hyper_sphere_coordindates(n_search_samples, x, h, l, p):
delta_x = np.random.randn(n_search_samples, x.shape[1]) # http://mathworld.wolfram.com/HyperspherePointPicking.html
d = np.random.rand(n_search_samples) * (h - l) + l # length range [l, h)
norm_p = np.linalg.norm(delta_x, ord=p, axis=1)
d_norm = np.divide(d, norm_p).reshape(-1, 1) # rescale/normalize factor
delta_x = np.multiply(delta_x, d_norm)
x_tilde = x + delta_x # x tilde
return x_tilde, d
def Laugel_Search(ncounterfactuals, out, search_samples, clf):
# this function IS NOT GENERAL: works for "give me credit"
x_tilde_star_list = []
# Set parameters
p = 2
threshold = 200
for i in range(ncounterfactuals):
# Test data
test_data_replicated = np.repeat(out['test_counter'][1][i, :].reshape(1, -1), search_samples, axis=0)
test_data_c_replicated = np.repeat(out['test_counter'][2][i, :].reshape(1, -1), search_samples, axis=0)
l = 0
step = 0.5
h = l + step
# counter to stop
count = 0
counter_step = 1
while True:
count = count + counter_step
if (count > threshold) is True:
x_tilde_star = None
break
# STEP 1 of Algorithm
# sample points on hyper sphere around test point
x_tilde, _ = hyper_sphere_coordindates(search_samples, test_data_replicated, h, l, p)
# one way: #x_tilde = np.ceil(x_tilde); another x_tilde = np.around(x_tilde,1)
x_tilde = np.c_[test_data_c_replicated, x_tilde]
# STEP 2 of Algorithm
# compute l_1 distance
distances = np.abs((x_tilde - np.c_[test_data_c_replicated, test_data_replicated])).sum(axis=1)
# counterfactual labels
y_tilde = clf.predict(x_tilde)
cla_index = np.where(y_tilde != 1)
x_tilde_candidates = x_tilde[cla_index]
candidates_dist = distances[cla_index]
if len(candidates_dist) == 0: # no candidate generated
l = h
h = l + step
else: # certain candidates generated
min_index = np.argmin(candidates_dist)
x_tilde_star = x_tilde_candidates[min_index]
break
x_tilde_star_list.append(x_tilde_star)
X_test_counterfactual = np.array(x_tilde_star_list)
return X_test_counterfactual
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for visualization library.
IF ANY OF THESE TESTS BREAK PLEASE UPDATE THE CODE IN THE VIZ NOTEBOOK
******************************************************************************
Any fixes you have to make to this test or visualization.py to fix this test
might have to be reflected in the visualization notebook, for example if the
name of the hparams_set changes.
If you need help testing the changes please contact llion@.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensor2tensor.utils import trainer_lib
from tensor2tensor.visualization import visualization
import tensorflow as tf
def get_data_dir():
pkg, _ = os.path.split(__file__)
pkg, _ = os.path.split(pkg)
return os.path.join(pkg, 'test_data')
problem_name = 'translate_ende_wmt32k'
model_name = 'transformer'
hparams_set = 'transformer_tiny'
class VisualizationTest(tf.test.TestCase):
def setUp(self):
super(VisualizationTest, self).setUp()
self.data_dir = get_data_dir()
def test_build_model_greedy(self):
inputs, targets, outputs, _ = visualization.build_model(
hparams_set, model_name, self.data_dir, problem_name, beam_size=1)
self.assertAllEqual((1, None, 1, 1), inputs.shape.as_list())
self.assertAllEqual((1, None, 1, 1), targets.shape.as_list())
self.assertAllEqual((None, None), outputs.shape.as_list())
def test_build_model_beam(self):
inputs, targets, outputs, _ = visualization.build_model(
hparams_set, model_name, self.data_dir, problem_name, beam_size=8)
self.assertAllEqual((1, None, 1, 1), inputs.shape.as_list())
self.assertAllEqual((1, None, 1, 1), targets.shape.as_list())
self.assertAllEqual((None, None), outputs.shape.as_list())
def test_get_vis_data_from_string(self):
visualizer = visualization.AttentionVisualizer(
hparams_set, model_name, self.data_dir, problem_name, beam_size=8)
input_sentence = 'I have two dogs.'
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
_, inp_text, out_text, att_mats = (
visualizer.get_vis_data_from_string(sess, input_sentence))
self.assertAllEqual(
[u'I_', u'have_', u'two_', u'dogs_', u'._', u'<EOS>'], inp_text)
hparams = trainer_lib.create_hparams(
hparams_set, data_dir=self.data_dir, problem_name=problem_name)
enc_atts, dec_atts, encdec_atts = att_mats
self.assertAllEqual(hparams.num_hidden_layers, len(enc_atts))
enc_atts = enc_atts[0]
dec_atts = dec_atts[0]
encdec_atts = encdec_atts[0]
batch_size = 1
num_heads = hparams.num_heads
inp_len = len(inp_text)
out_len = len(out_text)
self.assertAllEqual(
(batch_size, num_heads, inp_len, inp_len), enc_atts.shape)
self.assertAllEqual(
(batch_size, num_heads, out_len, out_len), dec_atts.shape)
self.assertAllEqual(
(batch_size, num_heads, out_len, inp_len), encdec_atts.shape)
if __name__ == '__main__':
tf.test.main()
|
'''
Created on Mar 13, 2016
Codejam template
@author: Ozge
'''
from itertools import product
filepath = ''
fileprefix = 'C-small-attempt1' #Change
filepathname = filepath + fileprefix
infilename = filepathname + '.in'
outfilename = filepathname + '.out'
lines = open(infilename, 'rU').read().split("\n")
outfile = open(outfilename, 'w+')
tcases = int(lines[0]) #this never chaneges
linestart = 1 # this might change if there are parameters N, M, L etc
def converttoint(binary, base):
decimal = 0
for digit in binary:
decimal = decimal*base + int(digit)
return decimal
def isprime(n):
if n == 2 or n == 3: return True
if n < 2 or n%2 == 0: return False
if n < 9: return True
if n%3 == 0: return False
r = int(n**0.5)
f = 5
while f <= r:
if n%f == 0: return False
if n%(f+2) == 0: return False
f +=6
return True
def generateBinary(N):
binarylist=["".join(seq) for seq in itertools.product("01", repeat=N-2)]
print(binarylist)
newlist=[]
for i in binarylist:
newlist.append('1'+i+'1')
return newlist
def solve(N, J):
# binarylist= generateBinary(N) #['100011','111111','111001']#
counter=1
result=[] #this is binary result
# for i in binarylist:
for binary in product((0, 1), repeat=N-2):
binary = (1,) + binary + (1,)
if counter>J:
break
else:
binaryelement=''.join([str(b) for b in binary])
if solveinner(binaryelement):
result.append(binaryelement)
counter=counter+1
return result
def solveinner(listelement):
isPrime=False
for j in range(2, 11):
if isPrime is False:
number=converttoint(listelement, j)
isPrime=isprime(number)
else:
return False #the binary is a prime
if isPrime is False:
return True # the number is not prime in any base
def createoutput(binarylist):
dict={}
for i in binarylist:
dict[i]=finddivisors(i)
return dict
def finddivisors(binarynumber):
divisors=[]
for j in range(2, 11):
number=converttoint(binarynumber, j)
#divs = [n for n in range(1,number+1) if number % n == 0]
counter=0
for n in range(1, number+1):
if counter==1:
break
else:
if number % n == 0 and n!=1 and n!=number:
divisors.append(n)
counter+=1
return divisors
#print(createoutput(solve(16,50)))
#print(createoutput(solve(6, 3)))
for testcase in range(1, tcases+1): #change the value to the line number where the first case starts
N, J = [int(x) for x in lines[testcase].split()]
out = createoutput(solve(N,J)) #Assign solved value
# casestr = 'Case #'+str(testcase)+': '+str(out)
outstr= 'Case #'+str(testcase)+': '+'\n'
for key, value in out.items():
#print(key, " ".join(map(str, value)))
outstr=outstr+key+' '+" ".join(map(str, value))+'\n'
# print (outstr)
outfile.write(outstr+"\n") |
inp1=eval(input("Enter the first number:"))
inp2=eval(input("Enter the second number:"))
print("The arithametic operation are as follows:")
print(inp1,"+",inp2,"=",inp1+inp2)
print(inp1,"-",inp2,"=",inp1-inp2)
print(inp1,"*",inp2,"=",inp1*inp2)
print(inp1,"/",inp2,"=",inp1/inp2)
print(inp1,"//",inp2,"=",inp1//inp2)
print(inp1,"%",inp2,"=",inp1%inp2) |
"""
The MIT License (MIT)
Copyright (c) 2016 Intel Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
import unittest
import os
import sys
import json
from utils import file2tile
config_path = os.path.join(os.path.realpath(
sys.argv[-1]), "utils/example_configs/icgc_config.json")
test_header = [
"icgc_mutation_id",
"project_code",
"icgc_donor_id",
"icgc_sample_id",
"matched_icgc_sample_id",
"variation_calling_algorithm",
"assembly_version",
"chromosome",
"chromosome_start",
"chromosome_end",
"reference_genome_allele",
"mutated_to_allele",
"quality_score",
"probability",
"total_read_count",
"mutant_allele_read_count",
"chromosome_strand"]
test_data = [
"pytest",
"ALL-US",
"test_person",
"target_id",
"source_id",
"caller",
"GRCh37",
"1",
"100",
"150",
"T",
"A",
"0.35",
"0.9",
"100",
"90",
"0|1"]
class TestFile2Tile(unittest.TestCase):
@classmethod
def setUpClass(self):
with open(config_path, 'r') as readFP:
config_json = json.load(readFP)
config_json["TileDBConfig"] = os.path.join(os.path.realpath(
sys.argv[-1]), "utils/example_configs/tiledb_config.json")
config_json["TileDBAssembly"] = os.path.join(
os.path.realpath(sys.argv[-1]), "utils/example_configs/hg19.json")
config_json["VariantSetMap"]["VariantConfig"] = os.path.join(
os.path.realpath(sys.argv[-1]), "utils/example_configs/icgc_variants.json")
with open(config_path, 'w') as writeFP:
writeFP.write(json.dumps(config_json))
@pytest.fixture(autouse=True)
def set_tmpdir(self, tmpdir):
self.tmpdir = tmpdir
def test_initFilePointers(self):
input_file = self.tmpdir.join("in.file")
input_file.write("# testing\n")
output_file = self.tmpdir.join("out.file")
output_file.write("")
with open(str(input_file), 'r') as inFP, open(str(output_file), 'w') as outFP:
f2t = file2tile.File2Tile(config_path)
f2t.initFilePointers(inFP, outFP)
assert f2t.inFile == inFP
assert f2t.outFile == outFP
assert f2t.inFile.closed == False
assert f2t.outFile.closed == False
f2t.closeFilePointers()
assert f2t.inFile.closed
assert f2t.outFile.closed
assert inFP.closed
assert outFP.closed
def test_getHeader(self):
input_file = self.tmpdir.join("in.txt")
with open(str(input_file), 'w') as inFP:
inFP.write("# Comment line\n")
inFP.write("\t".join(test_header))
inFP.write("\n")
output_file = self.tmpdir.join("out.txt")
output_file.write("")
with open(str(input_file), 'r') as inFP, open(str(output_file), 'w') as outFP:
f2t = file2tile.File2Tile(config_path)
f2t.initFilePointers(inFP, outFP)
assert f2t.header is None
f2t.getHeader()
assert isinstance(f2t.header, list)
assert f2t.header[0] == "icgc_mutation_id"
assert f2t.header[-1] == "chromosome_strand"
def test_getHeader_negative_testing(self):
fields_to_remove = [
"icgc_sample_id",
"matched_icgc_sample_id",
"variation_calling_algorithm",
"assembly_version",
"chromosome",
"chromosome_start",
"chromosome_end",
"reference_genome_allele",
"mutated_to_allele",
"quality_score",
"probability",
"total_read_count",
"mutant_allele_read_count",
"chromosome_strand"]
for field_to_remove in fields_to_remove:
self.helper(field_to_remove, self.tmpdir)
def helper(self, field_to_remove, tmpdir):
input_file = tmpdir.join("in.txt")
# Skip call set header
incorrect_header = test_header[:]
incorrect_header.remove(field_to_remove)
with open(str(input_file), 'w') as inFP:
inFP.write("# Comment line\n")
inFP.write("\t".join(incorrect_header))
inFP.write("\n")
output_file = tmpdir.join("out.txt")
output_file.write("")
with open(str(input_file), 'r') as inFP, open(str(output_file), 'w') as outFP:
f2t = file2tile.File2Tile(config_path)
f2t.initFilePointers(inFP, outFP)
assert f2t.header is None
with pytest.raises(ValueError) as exec_info:
f2t.getHeader()
assert "{0} is not a valid field in input file's header".format(
field_to_remove) in str(exec_info.value)
def test_parseNextLine(self):
input_file = self.tmpdir.join("in.txt")
with open(str(input_file), 'w') as inFP:
inFP.write("# Comment line\n")
inFP.write("\t".join(test_header))
inFP.write("\n")
inFP.write("\t".join(test_data))
inFP.write("\n")
output_file = self.tmpdir.join("out.txt")
output_file.write("")
with open(str(input_file), 'r') as inFP, open(str(output_file), 'w') as outFP:
f2t = file2tile.File2Tile(config_path)
f2t.initFilePointers(inFP, outFP)
assert f2t.header is None
f2t.getHeader()
f2t.parseNextLine()
assert f2t.IndividualId == test_data[2]
assert f2t.TargetSampleId == test_data[3]
assert f2t.SourceSampleId == test_data[4]
assert f2t.CallSetName == test_data[5]
assert f2t.VariantSetName == "Blood"
assert f2t.TileDBPosition == [
int(test_data[8]) - 1, int(test_data[9]) - 1]
assert f2t.TileDBValues == dict({"REF": "T", "ALT": ["A"], "QUAL": "0.35", "AF": [
"0.9"], "AN": "100", "AC": ["90"], "GT": ["0|1"]})
assert f2t.parseNextLine() == False
def test_parseNextLine_empty_value(self):
input_file = self.tmpdir.join("in.txt")
with open(str(input_file), 'w') as inFP:
inFP.write("# Comment line\n")
inFP.write("\t".join(test_header))
inFP.write("\n")
empty_qual = test_data[:]
empty_qual[-2] = ""
inFP.write("\t".join(empty_qual))
inFP.write("\n")
output_file = self.tmpdir.join("out.txt")
output_file.write("")
with open(str(input_file), 'r') as inFP, open(str(output_file), 'w') as outFP:
f2t = file2tile.File2Tile(config_path)
f2t.initFilePointers(inFP, outFP)
assert f2t.header is None
f2t.parseNextLine()
assert f2t.IndividualId == test_data[2]
assert f2t.TargetSampleId == test_data[3]
assert f2t.SourceSampleId == test_data[4]
assert f2t.CallSetName == test_data[5]
assert f2t.VariantSetName == "Blood"
assert f2t.TileDBPosition == [
int(test_data[8]) - 1, int(test_data[9]) - 1]
assert f2t.TileDBValues == dict({"REF": "T", "ALT": ["A"], "QUAL": "0.35", "AF": [
"0.9"], "AN": "100", "AC": ["*"], "GT": ["0|1"]})
def test_parseNextLine_GT(self):
input_file = self.tmpdir.join("in.txt")
with open(str(input_file), 'w') as inFP:
inFP.write("# Comment line\n")
inFP.write("\t".join(test_header))
inFP.write("\n")
inFP.write("\t".join(test_data))
inFP.write("\n")
output_file = self.tmpdir.join("out.txt")
output_file.write("")
with open(config_path, 'r') as fp:
config_json = json.load(fp)
config_json["Seperators"]["GT"] = "|"
config_json["GTMapping"]["0"] = "y"
config_json["GTMapping"]["1"] = "x"
test_config = self.tmpdir.join("test_config.json")
with open(str(test_config), 'w') as fp:
fp.write(json.dumps(config_json))
with open(str(input_file), 'r') as inFP, open(str(output_file), 'w') as outFP:
f2t = file2tile.File2Tile(str(test_config))
f2t.initFilePointers(inFP, outFP)
assert f2t.header is None
f2t.getHeader()
f2t.parseNextLine()
assert f2t.IndividualId == test_data[2]
assert f2t.TargetSampleId == test_data[3]
assert f2t.SourceSampleId == test_data[4]
assert f2t.CallSetName == test_data[5]
assert f2t.VariantSetName == "Blood"
assert f2t.TileDBPosition == [
int(test_data[8]) - 1, int(test_data[9]) - 1]
assert f2t.TileDBValues == dict({"REF": "T", "ALT": ["A"], "QUAL": "0.35", "AF": [
"0.9"], "AN": "100", "AC": ["90"], "GT": ["y", "x"]})
def test_parseNextLine_variantname_static(self):
input_file = self.tmpdir.join("in.txt")
with open(str(input_file), 'w') as inFP:
inFP.write("# Comment line\n")
inFP.write("\t".join(test_header))
inFP.write("\n")
inFP.write("\t".join(test_data))
inFP.write("\n")
inFP.write("\t".join(test_data))
inFP.write("\n")
output_file = self.tmpdir.join("out.txt")
output_file.write("")
with open(config_path, 'r') as fp:
config_json = json.load(fp)
config_json["VariantSetMap"]["Dynamic"] = False
config_json["VariantSetMap"]["VariantSet"] = "my_test_variant"
test_config = self.tmpdir.join("test_config.json")
with open(str(test_config), 'w') as fp:
fp.write(json.dumps(config_json))
with open(str(input_file), 'r') as inFP, open(str(output_file), 'w') as outFP:
f2t = file2tile.File2Tile(str(test_config))
f2t.initFilePointers(inFP, outFP)
assert f2t.header is None
f2t.getHeader()
assert f2t.VariantSetName is None
f2t.parseNextLine()
f2t.parseNextLine()
assert f2t.IndividualId == test_data[2]
assert f2t.TargetSampleId == test_data[3]
assert f2t.SourceSampleId == test_data[4]
assert f2t.CallSetName == test_data[5]
assert f2t.VariantSetName == "my_test_variant"
assert f2t.TileDBPosition == [
int(test_data[8]) - 1, int(test_data[9]) - 1]
assert f2t.TileDBValues == dict({"REF": "T", "ALT": ["A"], "QUAL": "0.35", "AF": [
"0.9"], "AN": "100", "AC": ["90"], "GT": ["0|1"]})
def test_parseNextLine_variantname_dynamic_name_static_lookup(self):
input_file = self.tmpdir.join("in.txt")
with open(str(input_file), 'w') as inFP:
inFP.write("# Comment line\n")
inFP.write("\t".join(test_header))
inFP.write("\n")
inFP.write("\t".join(test_data))
inFP.write("\n")
inFP.write("\t".join(test_data))
inFP.write("\n")
output_file = self.tmpdir.join("out.txt")
output_file.write("")
with open(config_path, 'r') as fp:
config_json = json.load(fp)
config_json["VariantSetMap"]["VariantLookup"] = False
test_config = self.tmpdir.join("test_config.json")
with open(str(test_config), 'w') as fp:
fp.write(json.dumps(config_json))
with open(str(input_file), 'r') as inFP, open(str(output_file), 'w') as outFP:
f2t = file2tile.File2Tile(str(test_config))
f2t.initFilePointers(inFP, outFP)
assert f2t.header is None
f2t.getHeader()
f2t.parseNextLine()
assert f2t.IndividualId == test_data[2]
assert f2t.TargetSampleId == test_data[3]
assert f2t.SourceSampleId == test_data[4]
assert f2t.CallSetName == test_data[5]
assert f2t.VariantSetName == "ALL-US"
assert f2t.TileDBPosition == [
int(test_data[8]) - 1, int(test_data[9]) - 1]
assert f2t.TileDBValues == dict({"REF": "T", "ALT": ["A"], "QUAL": "0.35", "AF": [
"0.9"], "AN": "100", "AC": ["90"], "GT": ["0|1"]})
def test_parseNextLine_callset_static(self):
input_file = self.tmpdir.join("in.txt")
with open(str(input_file), 'w') as inFP:
inFP.write("# Comment line\n")
inFP.write("\t".join(test_header))
inFP.write("\n")
inFP.write("\t".join(test_data))
inFP.write("\n")
inFP.write("\t".join(test_data))
inFP.write("\n")
output_file = self.tmpdir.join("out.txt")
output_file.write("")
with open(config_path, 'r') as fp:
config_json = json.load(fp)
config_json["CallSetId"]["Dynamic"] = False
config_json["CallSetId"]["CallSetName"] = "my_test_callset"
test_config = self.tmpdir.join("test_config.json")
with open(str(test_config), 'w') as fp:
fp.write(json.dumps(config_json))
with open(str(input_file), 'r') as inFP, open(str(output_file), 'w') as outFP:
f2t = file2tile.File2Tile(str(test_config))
f2t.initFilePointers(inFP, outFP)
assert f2t.header is None
f2t.getHeader()
assert f2t.VariantSetName is None
f2t.parseNextLine()
f2t.parseNextLine()
assert f2t.IndividualId == test_data[2]
assert f2t.TargetSampleId == test_data[3]
assert f2t.SourceSampleId == test_data[4]
assert f2t.CallSetName == "my_test_callset"
assert f2t.VariantSetName == "Blood"
assert f2t.TileDBPosition == [
int(test_data[8]) - 1, int(test_data[9]) - 1]
assert f2t.TileDBValues == dict({"REF": "T", "ALT": ["A"], "QUAL": "0.35", "AF": [
"0.9"], "AN": "100", "AC": ["90"], "GT": ["0|1"]})
def test_parseNextLine_assembly_static(self):
input_file = self.tmpdir.join("in.txt")
with open(str(input_file), 'w') as inFP:
inFP.write("# Comment line\n")
inFP.write("\t".join(test_header))
inFP.write("\n")
inFP.write("\t".join(test_data))
inFP.write("\n")
inFP.write("\t".join(test_data))
inFP.write("\n")
output_file = self.tmpdir.join("out.txt")
output_file.write("")
with open(config_path, 'r') as fp:
config_json = json.load(fp)
config_json["Position"]["assembly"]["Dynamic"] = False
config_json["Position"]["assembly"]["assemblyName"] = "test_assembly"
test_config = self.tmpdir.join("test_config.json")
with open(str(test_config), 'w') as fp:
fp.write(json.dumps(config_json))
with open(str(input_file), 'r') as inFP, open(str(output_file), 'w') as outFP:
f2t = file2tile.File2Tile(str(test_config))
f2t.initFilePointers(inFP, outFP)
assert f2t.header is None
f2t.getHeader()
assert f2t.VariantSetName is None
f2t.parseNextLine()
f2t.parseNextLine()
assert f2t.ChromosomePosition[0] == "test_assembly"
assert f2t.IndividualId == test_data[2]
assert f2t.TargetSampleId == test_data[3]
assert f2t.SourceSampleId == test_data[4]
assert f2t.CallSetName == test_data[5]
assert f2t.VariantSetName == "Blood"
assert f2t.TileDBPosition == [
int(test_data[8]) - 1, int(test_data[9]) - 1]
assert f2t.TileDBValues == dict({"REF": "T", "ALT": ["A"], "QUAL": "0.35", "AF": [
"0.9"], "AN": "100", "AC": ["90"], "GT": ["0|1"]})
def test_parseNextLine_individual(self):
input_file = self.tmpdir.join("in.txt")
with open(str(input_file), 'w') as inFP:
inFP.write("# Comment line\n")
inFP.write("\t".join(test_header))
inFP.write("\n")
inFP.write("\t".join(test_data))
inFP.write("\n")
inFP.write("\t".join(test_data))
inFP.write("\n")
output_file = self.tmpdir.join("out.txt")
output_file.write("")
with open(config_path, 'r') as fp:
config_json = json.load(fp)
del config_json["IndividualId"]
test_config = self.tmpdir.join("test_config.json")
with open(str(test_config), 'w') as fp:
fp.write(json.dumps(config_json))
with open(str(input_file), 'r') as inFP, open(str(output_file), 'w') as outFP:
f2t = file2tile.File2Tile(str(test_config))
f2t.initFilePointers(inFP, outFP)
assert f2t.header is None
f2t.getHeader()
assert f2t.VariantSetName is None
f2t.parseNextLine()
f2t.parseNextLine()
assert f2t.IndividualId == "Individual_{0}".format(test_data[4])
assert f2t.TargetSampleId == test_data[3]
assert f2t.SourceSampleId == test_data[4]
assert f2t.CallSetName == test_data[5]
assert f2t.VariantSetName == "Blood"
assert f2t.TileDBPosition == [
int(test_data[8]) - 1, int(test_data[9]) - 1]
assert f2t.TileDBValues == dict({"REF": "T", "ALT": ["A"], "QUAL": "0.35", "AF": [
"0.9"], "AN": "100", "AC": ["90"], "GT": ["0|1"]})
|
from mock import Mock, MagicMock, patch, call, mock_open
# To run unittests on python 2.6 please use unittest2 library
try:
import unittest2 as unittest
except ImportError:
import unittest
import re, jenkinsapi
from jenkinsapi.artifact import Artifact
from jenkinsapi.build import Build
from jenkinsapi.custom_exceptions import ArtifactBroken
class ArtifactTest(unittest.TestCase):
def setUp(self):
self._build = build = Mock()
build.buildno = 9999
job = self._build.job
job.jenkins.baseurl = 'http://localhost'
job.name = 'TestJob'
self._artifact = Artifact('artifact.zip', 'http://localhost/job/TestJob/9999/artifact/artifact.zip', build)
@patch('jenkinsapi.artifact.os.path.exists', spec=True, return_value=True)
@patch('jenkinsapi.artifact.os.path.isdir', spec=True, return_value=True)
def test_save_to_dir(self, mock_isdir, mock_exists):
artifact = self._artifact
artifact.save = Mock(spec=Artifact.save, return_value='/tmp/artifact.zip')
self.assertEqual(artifact.save_to_dir('/tmp'), '/tmp/artifact.zip')
mock_exists.assert_called_once_with('/tmp')
mock_isdir.assert_called_once_with('/tmp')
artifact.save.assert_called_once_with('/tmp/artifact.zip', False)
@patch('jenkinsapi.artifact.os.path.exists', spec=True, return_value=True)
@patch('jenkinsapi.artifact.os.path.isdir', spec=True, return_value=True)
def test_save_to_dir_strict(self, mock_isdir, mock_exists):
artifact = self._artifact
artifact.save = Mock(return_value='/tmp/artifact.zip')
self.assertEqual(artifact.save_to_dir('/tmp', strict_validation=True), '/tmp/artifact.zip')
mock_exists.assert_called_once_with('/tmp')
mock_isdir.assert_called_once_with('/tmp')
artifact.save.assert_called_once_with('/tmp/artifact.zip', True)
@patch('jenkinsapi.artifact.open', mock_open(), create=True)
@patch('jenkinsapi.artifact.Fingerprint', spec=True)
def test_verify_download_valid_positive(self, MockFingerprint):
# mock_open() only mocks out f.read(), which reads all content at a time.
# However, _verify_download() reads the file in chunks.
f = jenkinsapi.artifact.open.return_value
f.read.side_effect = [b'chunk1', b'chunk2', b''] # empty string indicates EOF
fp = MockFingerprint.return_value
fp.validate_for_build.return_value = True
fp.unknown = False
self.assertTrue(self._artifact._verify_download('/tmp/artifact.zip', False))
MockFingerprint.assert_called_once_with(
'http://localhost',
'097c42989a9e5d9dcced7b35ec4b0486', # MD5 of 'chunk1chunk2'
self._build.job.jenkins)
fp.validate_for_build.assert_called_once_with('artifact.zip', 'TestJob', 9999)
@patch('jenkinsapi.artifact.Fingerprint', spec=True)
def test_verify_download_valid_negative(self, MockFingerprint):
artifact = self._artifact
artifact._md5sum = Mock(return_value='097c42989a9e5d9dcced7b35ec4b0486')
fp = MockFingerprint.return_value
fp.validate_for_build.return_value = True
fp.unknown = True # negative
self.assertTrue(self._artifact._verify_download('/tmp/artifact.zip', False)) # not strict
@patch('jenkinsapi.artifact.Fingerprint', spec=True)
def test_verify_download_valid_negative_strict(self, MockFingerprint):
artifact = self._artifact
artifact._md5sum = Mock(return_value='097c42989a9e5d9dcced7b35ec4b0486')
fp = MockFingerprint.return_value
fp.validate_for_build.return_value = True
fp.unknown = True # negative
with self.assertRaisesRegexp(ArtifactBroken, re.escape(
'Artifact 097c42989a9e5d9dcced7b35ec4b0486 seems to be broken, check http://localhost')):
self._artifact._verify_download('/tmp/artifact.zip', True) # strict
@patch('jenkinsapi.artifact.open', mock_open(), create=True)
@patch('jenkinsapi.artifact.Fingerprint', spec=True)
def test_verify_download_invalid(self, MockFingerprint):
f = jenkinsapi.artifact.open.return_value
f.read.side_effect = [b'chunk1', b'chunk2', b''] # empty string indicates EOF
fp = MockFingerprint.return_value
fp.validate_for_build.return_value = False
fp.unknown = False
with self.assertRaisesRegexp(ArtifactBroken, re.escape(
'Artifact 097c42989a9e5d9dcced7b35ec4b0486 seems to be broken, check http://localhost')):
self._artifact._verify_download('/tmp/artifact.zip', False)
MockFingerprint.assert_called_once_with(
'http://localhost',
'097c42989a9e5d9dcced7b35ec4b0486', # MD5 of 'chunk1chunk2'
self._build.job.jenkins)
fp.validate_for_build.assert_called_once_with('artifact.zip', 'TestJob', 9999)
@patch('jenkinsapi.artifact.os.path.exists', spec=True, return_value=True)
def test_save_has_valid_local_copy(self, mock_exists):
artifact = self._artifact
artifact._verify_download = Mock(return_value=True)
self.assertEqual(artifact.save('/tmp/artifact.zip'), '/tmp/artifact.zip')
mock_exists.assert_called_once_with('/tmp/artifact.zip')
artifact._verify_download.assert_called_once_with('/tmp/artifact.zip', False)
@patch('jenkinsapi.artifact.os.path.exists', spec=True, return_value=True)
def test_save_has_invalid_local_copy_download_again(self, mock_exists):
artifact = self._artifact
artifact._verify_download = Mock(side_effect=[ArtifactBroken, True])
artifact._do_download = Mock(return_value='/tmp/artifact.zip')
self.assertEqual(artifact.save('/tmp/artifact.zip', True), '/tmp/artifact.zip')
mock_exists.assert_called_once_with('/tmp/artifact.zip')
artifact._do_download.assert_called_once_with('/tmp/artifact.zip')
self.assertEqual(artifact._verify_download.mock_calls, [call('/tmp/artifact.zip', True)] * 2)
@patch('jenkinsapi.artifact.os.path.exists', spec=True, return_value=True)
def test_save_has_invalid_local_copy_download_but_invalid(self, mock_exists):
artifact = self._artifact
artifact._verify_download = Mock(side_effect=[ArtifactBroken, ArtifactBroken])
artifact._do_download = Mock(return_value='/tmp/artifact.zip')
with self.assertRaises(ArtifactBroken):
artifact.save('/tmp/artifact.zip', True)
mock_exists.assert_called_once_with('/tmp/artifact.zip')
artifact._do_download.assert_called_once_with('/tmp/artifact.zip')
self.assertEqual(artifact._verify_download.mock_calls, [call('/tmp/artifact.zip', True)] * 2)
@patch('jenkinsapi.artifact.os.path.exists', spec=True, return_value=False)
def test_save_has_no_local_copy(self, mock_exists):
artifact = self._artifact
artifact._do_download = Mock(return_value='/tmp/artifact.zip')
artifact._verify_download = Mock(return_value=True)
self.assertEqual(artifact.save('/tmp/artifact.zip'), '/tmp/artifact.zip')
mock_exists.assert_called_once_with('/tmp/artifact.zip')
artifact._do_download.assert_called_once_with('/tmp/artifact.zip')
artifact._verify_download.assert_called_once_with('/tmp/artifact.zip', False)
|
import glob
import time
import os
from primitives.track import Track
from primitives.grid import Grid
from cv_toolkit.cams import FisheyeCamera
from cv_toolkit.transform.camera import UndistortionTransform
from cv_toolkit.transform.common import PixelCoordinateTransform
from ..filtering.measurements import MeasurementDB
import field_toolkit.approx as field_approx
class ApproximationPipeline(object):
def __init__(self, config=None):
if config is not None:
self.load(config)
else:
self._config = None
def load(self, config):
self._config = config
# Load camera from file
self._camera = FisheyeCamera.from_file(config.camFile)
# Set input and track directories
self._inputDir = config.inputDir
self._trackDir = f"{self._inputDir}/tracks"
# Initialize grid for measurement filtering
self._measurementGrid = Grid(*self._camera.imgSize, *config.measurementGridDim)
# Initialize measurement filtering database
self._mDB = MeasurementDB(self._measurementGrid, **config.getFilteringParams())
# Initialize transformations
self._unTrans = UndistortionTransform(self._camera)
self._pxTrans = PixelCoordinateTransform(self._camera.imgSize)
# Initialize approximation object
if config.approximationMethod == 'simple':
self._gp = field_approx.gp.GPApproximator()
elif config.approximationMethod == 'coregionalized':
self._gp = field_approx.gp.CoregionalizedGPApproximator()
elif config.approximationMethod == 'sparse':
self._gp = field_approx.gp.SparseGPApproximator()
elif config.approximationMethod == 'integral':
self._gp = field_approx.gp.IntegralGPApproximator()
else:
print("Error: Unknown approximation method")
exit()
def initialize(self):
# Initialize output folders
self._runDir = f"{self._inputDir}/approx_{time.strftime('%Y_%m_%d_%H_%M_%S')}"
if not os.path.exists(self._runDir):
os.makedirs(self._runDir)
self._measurementDir = f"{self._runDir}/measurements"
if not os.path.exists(self._measurementDir):
os.makedirs(self._measurementDir)
# Save config file
self._config.save(f"{self._runDir}/approx_config.yaml")
# Clear any previous measurements from database or gp approx
self._mDB.clearMeasurements()
self._gp.clearMeasurements()
def run(self):
startTime = time.time()
# Load training tracks
trackFiles = []
for subset in self._config.trainingSets:
trackFiles.extend(glob.glob(f"{self._trackDir}/{subset}/track_*.json"))
print(f"Loading {len(trackFiles)} tracks")
tracks = Track.from_file_list(trackFiles)
transformedTracks = self._pxTrans.transformTracks(self._unTrans.transformTracks(tracks))
for t in transformedTracks:
self._mDB.addMeasurements(t.measureVelocity(**self._config.getMeasurementParams(), **self._config.measurementMethodParams))
self._trainingMeasurements = self._mDB.getMeasurements(self._config.measurementsPerCell)
if len(self._trainingMeasurements) > 0:
self._gp.clearMeasurements()
self._gp.addMeasurements(self._trainingMeasurements)
self._fieldApprox = self._gp.approximate()
totalTime = time.time() - startTime
print(f"Approximation complete in {totalTime} seconds")
def saveApproximation(self):
self._fieldApprox.save(f"{self._runDir}/approx.field")
def saveMeasurements(self):
# Saves tracks used for approximation
for i, m in enumerate(self._trainingMeasurements):
m.save(f"{self._measurementDir}/measurement_{i}.json")
# Todo save measurement database images/data
@property
def runDir(self):
return self._runDir |
# -*- encoding: utf-8 -*-
import logging
import arrow
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
logger = logging.getLogger('common')
FORMAT_CHOICES = (('csv', 'csv'),)
PARTY_TYPE_CHOICES = (('id', 'id'), ('ip', 'ip'))
VIEW_CHOICES = (('simple', 'simple'), ('detail', 'detail'))
def validate_date_format(value):
try:
value = value.replace('yyyy', 'YYYY').\
replace('dd', 'DD')
arrow.now().format(value)
except:
raise ValidationError(u'ไธๆญฃใชๆฅๆใใฉใผใใใใงใใ')
class ListAPIForm(forms.Form):
apiKey = forms.CharField(
max_length=40, required=True,
validators=[
RegexValidator(regex='^{}$'.format(settings.API_KEY),
message=u'ไธๆญฃใชใญใผใงใใ')],
widget=forms.TextInput(attrs={'size': 40}))
dateFrom = forms.DateField(
input_formats=['%Y%m%d'], required=False,
widget=forms.DateInput(attrs={'size': 8}, format='%Y%m%d'))
dateTo = forms.DateField(
input_formats=['%Y%m%d'], required=False,
widget=forms.DateInput(attrs={'size': 8}, format='%Y%m%d'))
partyType = forms.ChoiceField(PARTY_TYPE_CHOICES, required=False)
view = forms.ChoiceField(VIEW_CHOICES, required=False)
dateFormat = forms.CharField(
max_length=128, required=False, validators=[validate_date_format],
widget=forms.TextInput(attrs={'size': 128}))
format = forms.ChoiceField(FORMAT_CHOICES, required=False)
def clean_dateFormat(self):
value = self.cleaned_data['dateFormat']
value = value.replace('yyyy', 'YYYY').\
replace('dd', 'DD')
return value
|
from flask import Flask, Response, send_from_directory, render_template
app = Flask('app', static_url_path='')
@app.route('/style.css')
def stylecss():
print("hi")
return send_from_directory('.', path='style.css')
@app.route('/style2.css')
def style2css():
print("hi")
return send_from_directory('.', path='style2.css')
@app.route('/images/CharlesOnlyFans1.jpg')
def Charles1():
return send_from_directory('.', path='CharlesOnlyFans1.jpg')
@app.route('/images/CharlesOnlyFans2.jpg')
def Charles2():
return send_from_directory('.', path='CharlesOnlyFans2.jpg')
@app.route('/images/CharlesOnlyFans3.jpg')
def Charles3():
return send_from_directory('.', path='CharlesOnlyFans3.jpg')
@app.route('/images/default-logo.png')
def logo():
return send_from_directory('.', path='images/default-logo.png')
@app.route('/')
def hello_world():
response = Response("<!Perhaps this should be in Firefox?>")
response.headers['link'] = '<style.css>; rel=stylesheet;'
return response
@app.route('/macktubbies.png')
def macktub():
return send_from_directory('.', path='images/macketubbies.png')
@app.route('/rickroll')
def astley():
response = Response(render_template('index.html'))
response.headers['link'] = '<style2.css>; rel=stylesheet;'
return response
# if __name__ == "__main__":
# app.run(debug=True) |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 1 13:50:59 2019
@author: Luke
"""
import pystan
import pandas as pd
import matplotlib as plt
import scipy
df = pd.read_csv('synthetic_data.csv')
player_names = df.Player.unique()
unpooled_model = """data {
int<lower=0> nholes;
vector[nholes] players;
vector[nholes] holes;
vector[nholes] dist;
}
parameters {
vector[nholes] p;
vector[nholes] h;
real avg;
real<lower=0,upper=100> sigma;
}
transformed parameters {
vector[nholes] d_hat;
for (i in 1:nholes)
d_hat[i] <- avg + p[i] + holes[i];
}
model {
dist ~ normal(d_hat, sigma);
}"""
unpooled_data = {'nholes': 2000,
'players': df['Player'], # Stan counts starting at 1
'holes': df['Hole'],
'dist': df['Distance']}
sm = pystan.StanModel(model_code=unpooled_model)
unpooled_fit = sm.sampling(data=unpooled_data, iter=1000, chains=2)
unpooled_estimates = pd.Series(unpooled_fit['a'].mean(0), index=player_names)
unpooled_se = pd.Series(unpooled_fit['a'].std(0), index=player_names)
order = unpooled_estimates.sort_values().index
plt.figure(figsize=(18, 6))
plt.scatter(range(len(unpooled_estimates)), unpooled_estimates[order])
for i, m, se in zip(range(len(unpooled_estimates)), unpooled_estimates[order], unpooled_se[order]):
plt.plot([i,i], [m-se, m+se], 'b-')
plt.xlim(-1,690);
plt.ylabel('Price estimate (log scale)');plt.xlabel('Ordered category');plt.title('Variation in category price estimates'); |
# Generated by Django 2.2.10 on 2021-11-11 12:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('poles_app', '0002_auto_20211111_1243'),
]
operations = [
migrations.RemoveField(
model_name='answer',
name='value',
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.