blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed0cf72b39a6f14b5f816f8ed59a8ae0a23a3e5e | d921253b98a922975709693c411218746af2f017 | /bgx/telebot/bgt_bot_api/bot_handlers.py | 2f583a0ac5a05ac6fa1d64bbed7bc09e7f3ffb96 | [
"Zlib",
"MIT",
"Apache-2.0"
] | permissive | bestonly125/DGT-Kawartha | 223f88e224c1464fa22a4512e4567ac7ce1bc78f | edfbc18f2c70e813805ec23c28fbc35bf7866ffc | refs/heads/master | 2022-11-22T13:18:21.204906 | 2020-07-24T09:03:57 | 2020-07-24T09:03:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,606 | py | # Copyright 2020 NTRLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
sudo pip3 install apiai
sudo pip3 install pytelegrambotapi
sudo pip3 install dialogflow
sudo pip3 install pysocks
"""
import asyncio
import queue
import re
import logging
import json
import base64
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
import bgt_bot_api.exceptions as errors
from bgt_bot_api import error_handlers
from google.protobuf.json_format import MessageToDict
from google.protobuf.message import DecodeError
from bgt_bot_api.messaging import DisconnectError
from bgt_bot_api.messaging import SendBackoffTimeoutError
from requests.exceptions import ConnectTimeout,ReadTimeout
from sawtooth_sdk.protobuf.validator_pb2 import Message
from sawtooth_sdk.protobuf import client_heads_pb2,client_topology_pb2
from sawtooth_sdk.protobuf import client_peers_pb2
import telebot
from telebot import apihelper
from bgt_bot_api.dflow import Dflow
LOGGER = logging.getLogger(__name__)
BotMessage = namedtuple('BotMessage', "message_id chat_id user_id user_first_name user_last_name intent confidence result batch_id")
FLUSH_TIMEOUT=3600
DEFAULT_TIMEOUT = 300
PROJECT_ID = 'small-talk-wfkygw'
SESSION_ID = '123456789'
language_code = 'ru'
TOKEN='1205652427:AAFr0eynwihWGyvObUA0QSjOfKMwiH3HkZs'
PROXIES = ['82.223.120.213:1080','138.201.6.102:1080','85.10.235.14:1080','217.69.10.129:32401','217.182.230.15:4485','96.96.33.133:1080','93.157.248.106:1080','81.17.20.50:1177','217.69.10.129:32401','1.179.185.253:8080']
class Tbot(object):
def __init__(self,loop, connection,tdb,token=TOKEN,project_id=PROJECT_ID,session_id=SESSION_ID,proxy=PROXIES,connects=None):
self._connects = connects
self._conn_n = 0
self._tdb = tdb
self._proxies = proxy if proxy else PROXIES
self._project_id = project_id if project_id else PROJECT_ID
self._session_id = session_id if session_id else SESSION_ID
self._proxy_pos = 1
self.set_proxy()
self._connection = connection
self._loop = loop
self._token = token
self._intent_handlers = {}
self._keyboard1 = telebot.types.ReplyKeyboardMarkup(True, True,True)
self._keyboard1.row('Привет', 'Пока','Sticker')
self._timeout = DEFAULT_TIMEOUT
self._bgt_queue = queue.Queue()
self.is_pause = False
LOGGER.info('USE proxy=%d from %d',self._proxy_pos,len(self._proxies))
try:
self._dflow = Dflow(self._project_id,self._session_id)
LOGGER.info('DFLOW OK')
except Exception as e:
LOGGER.info('DFLOW error %s',e)
self._dflow = None
def set_proxy(self):
proxy = self._proxies[self._proxy_pos]
#apihelper.proxy = {'https': 'socks5://{}'.format(proxy)}
self._proxy_pos += 1
self._proxy_pos = self._proxy_pos % len(self._proxies)
LOGGER.info("NEXT proxy %d",self._proxy_pos)
def send_message(self,chat_id,repl):
n = 3
while n > 0:
try:
if repl != '':
self._bot.send_message(chat_id,repl)
return
except ReadTimeout:
LOGGER.info('Cant send message err=Timeout')
except Exception as ex:
LOGGER.info('Cant send message err=%s',ex)
n += 1
def send_sticker(self,chat_id,sticker):
try:
self._bot.send_sticker(chat_id,sticker)
except ReadTimeout:
LOGGER.info('Cant send send_sticker err=Timeout')
def _start_bot(self):
bot = telebot.TeleBot(self._token)
#blog = logging.getLogger('TeleBot')
#blog.setLevel(logging.INFO)
self._bot = bot
keyboard1 = telebot.types.ReplyKeyboardMarkup(True, True,True,True)
keyboard1.row('Привет', 'Admins','Sticker','Wallet')
def send_message(chat_id,repl):
try:
if repl != '':
bot.send_message(chat_id,repl)
except ReadTimeout:
LOGGER.info('Cant send message err=Timeout')
@bot.message_handler(commands=['start'])
def start_message(message):
self.send_message(message.chat.id, 'Привет {}, ты написал мне /start'.format(message.from_user.first_name),reply_markup=keyboard1)
@bot.message_handler(commands=['info'])
def info_message(message):
chat = bot.get_chat(message.chat.id)
self.send_message(message.chat.id, 'Смотри {}, {}'.format(message.from_user.first_name,str(chat)))
@bot.message_handler(content_types=['sticker'])
def sticker_message(message):
LOGGER.info("sticker_message %s",message)
#bot.send_message(message.chat.id, message)
#bot.send_sticker(message.chat.id,message.sticker.file_id)
s_key = message.json['sticker']['file_unique_id'] #message.sticker.file_id
if s_key not in self._tdb :
LOGGER.info("NEW STICKER %s un=%s", s_key, message.sticker.file_id) #.file_unique_id)
self._tdb.put(s_key,{'type':'sticker','file_id':message.sticker.file_id,'name': message.sticker.set_name, 'is_animated':message.sticker.is_animated})
self.send_message(message.chat.id, 'Отличный стикер из {} сохраню'.format(message.sticker.set_name))
else:
self.send_message(message.chat.id, 'Спасибо за стикер из {},но такой есть'.format(message.sticker.set_name))
@bot.message_handler(content_types=['text'])
def send_text(message):
self.check_user(message.from_user)
if message.text == 'Привет1' or message.text == 'привет1':
self.send_message(message.chat.id, 'Привет, мой {} {}'.format('создатель' if message.from_user.first_name == 'Stan' else 'господин',message.from_user.first_name),reply_to_message_id=0)
photo = bot.get_user_profile_photos(message.chat.id,0,1)
p1 = photo.photos[0][0]
LOGGER.info('photo=%s',photo.photos[0][0])
file = bot.get_file(p1.file_id)
fnm = 'https://api.telegram.org/file/bot'+TOKEN+'/'+file.file_path
bot.send_photo(message.chat.id,p1.file_id)
LOGGER.info("Пришел {}".format(message.from_user.first_name))
try:
bot.pin_chat_message(message.chat.id,message.message_id)
except Exception as e:
LOGGER.info("cant pin message %s",e)
#f = open('p1.jpg', 'w')
#f.write(str(file))
#f.close
#
elif message.text == 'Sticker':
try:
self.send_message("@sticker", '@sticker :)')
except Exception as ex:
LOGGER.info("cant send message %s",ex)
elif message.text == 'Пока1' or message.text == 'пока1':
self.send_message(message.chat.id, 'Прощай, {}'.format('создатель' if message.from_user.first_name == 'Stan' else 'господин'))
try:
bot.set_chat_title("@Shiva64_bot","Хозяин покинул меня")
except :
LOGGER.info("cant set title")
elif message.text[0] == '@':
try:
LOGGER.info("GET CHAT %s",message.text[1:])
chat = bot.get_chat(message.text[1:])
self.send_message(message.chat.id, 'Смотри {}, {}'.format(message.from_user.first_name,str(chat)))
except Exception as e:
self.send_message(message.chat.id, 'Смотри {}, {}'.format(message.from_user.first_name,e))
# get chat info
else : #elif message.text[0] == '?':
if message.text == 'Привет':
LOGGER.info("message=%s",message)
resp = self._dflow.detect_intent_text(message.text,language_code) if self._dflow else None
if resp:
response = resp.query_result.fulfillment_text
confidence = round(resp.query_result.intent_detection_confidence,2)
intent = resp.query_result.intent.display_name
if intent != '':
repl = "{}({})".format(response,confidence) if response != '' else ''
else:
repl = 'Так, погоди, не врубаюсь!'
if self.can_talk(intent):
self.send_message(message.chat.id,repl)
LOGGER.info("DFLOW QUERY %s param=%s RESULT=%s",type(resp.query_result),type(resp.query_result.parameters),resp.query_result)
for param,val in resp.query_result.parameters.items():
LOGGER.info("PARAM %s='%s'(%s) ",param,val,type(val))
if intent != '' and intent in self._intent_handlers:
minfo = BotMessage(message.message_id,message.chat.id,message.from_user.id,message.from_user.first_name,message.from_user.last_name,intent,confidence,resp.query_result,None)
self.intent_handler(minfo)
else:
if not self.is_pause:
self.send_message(message.chat.id,'Я Вас не совсем понял {}!'.format(message.from_user.first_name))
# start polling
LOGGER.info('START BOT via=%s',apihelper.proxy)
self._stop = False
#self._bot.polling()
try:
LOGGER.info('ME=%s',bot.get_me())
except Exception as ex:
LOGGER.info('Cant get ME(%s)',ex)
async def _polling(self):
"""
get message from bot and do something useful
"""
self._attemp = 0
self._timeout = 1
def shift_proxy():
self.set_proxy()
if self._attemp > len(self._proxies):
self._stop = True
self._attemp += 1
while not self._stop:
await self.process_queue()
try:
updates = self._bot.get_updates(offset=(self._bot.last_update_id+1),timeout=self._timeout) #get_me() # Execute an API call
self._attemp = 0
except ConnectTimeout:
LOGGER.info('Get updates ConnectTimeout')
if self._timeout < 6:
self._timeout += 1
shift_proxy()
updates = None
except Exception as ex :
LOGGER.info('Get updates except=%s',ex)
shift_proxy()
updates = None
# Do some other operations...
#LOGGER.info('get_updates DONE=%s',updates)
if updates:
LOGGER.info('UPDATE={}'.format(len(updates)))
self.check_member_add_left(updates)
try:
self._bot.process_new_updates(updates)
except Exception as ex :
LOGGER.info('Process updates except=%s',ex)
LOGGER.info('last update=%s qsize=%s',self._bot.last_update_id,self._bgt_queue.qsize())
#self._bot.get_updates(offset=(self._bot.last_update_id+1),timeout=0.1)
else:
pass
#LOGGER.info('No updates')
def check_user(self,from_user):
u_key = '{}'.format(from_user.id)
if u_key not in self._tdb :
LOGGER.info("NEW USER %s un=%s", u_key, from_user.first_name)
self._tdb.put(u_key,{'type':'user','name':from_user.first_name,'last_name': from_user.last_name})
return True
return False
def is_user_with_name(self,name):
try:
return self._tdb.contains_key(name,index='name')
except Exception as ex:
return False
def check_member_add_left(self,updates):
# for update in updates:'new_chat_member': None, 'new_chat_members': None, 'left_chat_member'
for update in updates:
if update.message.new_chat_member is not None:
# new_chat_member {'id': 1205652427, 'is_bot': True, 'first_name': 'Mongoose', 'username': 'Shiva64_bot', 'last_name': None, 'language_code': None}
if self.check_user(update.message.new_chat_member):
# make new wallet
new_chat_member = update.message.new_chat_member
LOGGER.info('new_chat_member7 %s',new_chat_member)
minfo = BotMessage(update.message.message_id,update.message.chat.id,new_chat_member.id,new_chat_member.first_name,new_chat_member.last_name,'smalltalk.agent.create_wallet',1.0,None,None)
self.intent_handler(minfo)
if update.message.new_chat_members is not None:
#new_chat_members [<telebot.types.User object at 0x7fd4b19e2d68>]
LOGGER.info('new_chat_members %s',update.message.new_chat_members)
if update.message.left_chat_member is not None:
left_chat_member = update.message.left_chat_member
LOGGER.info('del left_chat_member %s from DB',left_chat_member)
self._tdb.delete(str(left_chat_member.id))
def add_intent_handler(self,intent_name,intent_handler):
"""
add handler for intention
"""
self._intent_handlers[intent_name] = intent_handler
def intent_handler(self,minfo):
# put intention into queue
self._bgt_queue.put(minfo)
LOGGER.info('RUN HANDLER FOR=%s size=%s',minfo.intent,self._bgt_queue.qsize())
async def intent_hello(self,minfo):
"""
Reply on hello
"""
self._bot.send_message(minfo.chat_id, 'Чем могу помочь, мой {} {}?'.format('создатель' if minfo.user_first_name == 'Stan' else 'господин',minfo.user_first_name),reply_to_message_id=0)
try:
photo = self._bot.get_user_profile_photos(minfo.user_id,0,1)
p1 = photo.photos[0][0]
LOGGER.info('photo=%s',photo.photos[0][0])
#file = self._bot.get_file(p1.file_id)
#fnm = 'https://api.telegram.org/file/bot'+TOKEN+'/'+file.file_path
self._bot.send_photo(minfo.chat_id,p1.file_id)
except Exception as ex:
LOGGER.info("Cant get user photo mess (%s)",ex)
LOGGER.info("Пришел {}".format(minfo.user_first_name))
try:
self._bot.pin_chat_message(minfo.chat_id,minfo.message_id)
except Exception as ex:
LOGGER.info("Cant pin message %s",ex)
async def intent_bye(self,minfo):
self.send_message(minfo.chat_id, 'Заходи еще {}'.format('создатель' if minfo.user_first_name == 'Stan' else 'господин'))
async def intent_help(self,minfo):
LOGGER.info('INTENT HELP chat_id=%s confidence=%s\n',minfo.chat_id,minfo.confidence)
response = await self._query_validator(
Message.CLIENT_HEADS_GET_REQUEST,
client_heads_pb2.ClientHeadsGetResponse,
client_heads_pb2.ClientHeadsGetRequest(head_id=''))
self.send_message(minfo.chat_id, 'Посмотри: {}'.format(response))
LOGGER.info('response HELP=%s\n',response)
async def intent_chat_admins(self,minfo):
#self._bot.send_message(minfo.chat_id, 'Посмотрю : {}'.format(response))
try:
repl = self._bot.get_chat_administrators(minfo.chat_id)
LOGGER.info('admins :%s\n',repl)
except Exception as ex:
#{"ok":false,"error_code":400,"description":"Bad Request:
LOGGER.info("cant get admins %s",ex)
async def intent_get_users(self,minfo):
users = ''
with self._tdb.cursor(index='name') as curs:
#values = list(curs.iter())
for val in curs.iter():
if val['type'] == 'user':
users += val['name']+','
self.send_message(minfo.chat_id, 'Я знаю вот кого : {}'.format(users))
async def intent_hold_on(self,minfo):
LOGGER.info('INTENT HOLD ON chat_id=%s confidence=%s\n',minfo.chat_id,minfo.confidence)
async def intent_needs_advice(self,minfo):
LOGGER.info('INTENT NEEDS_ADVICE chat_id=%s confidence=%s\n',minfo.chat_id,minfo.confidence)
response = await self._query_validator(
Message.CLIENT_PEERS_GET_REQUEST,
client_peers_pb2.ClientPeersGetResponse,
client_peers_pb2.ClientPeersGetRequest())
self.send_message(minfo.chat_id, 'Посмотри: {}'.format(response))
async def intent_pause(self,minfo):
LOGGER.info('INTENT PAUSE chat_id=%s confidence=%s\n',minfo.chat_id,minfo.confidence)
self.is_pause = True
async def intent_unpause(self,minfo):
LOGGER.info('INTENT UNPAUSE chat_id=%s confidence=%s\n',minfo.chat_id,minfo.confidence)
self.is_pause = False
@staticmethod
def _parse_response(proto, response):
"""Parses the content from a validator response Message.
"""
try:
content = proto()
content.ParseFromString(response.content)
return content
except (DecodeError, AttributeError):
LOGGER.error('Validator response was not parsable: %s', response)
return None
#raise errors.ValidatorResponseInvalid()
async def _query_validator(self, request_type, response_proto,payload, error_traps=None):
"""
Sends a request to the validator and parses the response.
"""
LOGGER.debug('Sending %s request to validator',self._get_type_name(request_type))
payload_bytes = payload.SerializeToString()
response = await self._send_request(request_type, payload_bytes)
"""
#response = self._loop.run_until_complete(self._send_request(request_type, payload_bytes))
resp = []
async def send_request():
return await self._send_request(request_type, payload_bytes)
async def send_task(resp):
task = self._loop.create_task(send_request())
response = await task
resp.append(response)
LOGGER.debug('Sending request finished %s',response)
return None
#self._loop.run_until_complete(send_task(resp))
response = resp.pop()
"""
LOGGER.debug('response %s',type(response))
#task = asyncio.ensure_future(self._send_request(request_type, payload_bytes))
#response = asyncio.wait(task)
#response = await self._send_request(request_type, payload_bytes)
#response = self._send_request(request_type, payload_bytes)
content = self._parse_response(response_proto, response)
if content is not None:
LOGGER.debug(
'Received %s response from validator with status %s',
self._get_type_name(response.message_type),
self._get_status_name(response_proto, content.status))
self._check_status_errors(response_proto, content, error_traps)
return self._message_to_dict(content)
async def _send_request(self, request_type, payload):
"""Uses an executor to send an asynchronous ZMQ request to the
validator with the handler's Connection
"""
try:
return await self._connection.send( # await
message_type=request_type,
message_content=payload,
timeout=self._timeout)
except DisconnectError:
LOGGER.warning('Validator disconnected while waiting for response')
# reconnect
self.change_gateway(self._conn_n)
#raise errors.ValidatorDisconnected()
except asyncio.TimeoutError:
LOGGER.warning('Timed out while waiting for validator response')
self.change_gateway(self._conn_n)
#raise errors.ValidatorTimedOut()
except SendBackoffTimeoutError:
LOGGER.warning('Failed sending message - Backoff timed out')
raise errors.SendBackoffTimeout()
def change_gateway(self,num):
url = self._connects[num]
try:
self._connection.reopen(url)
self._conn_n = num
except:
pass
return self._conn_n == num
@staticmethod
def _check_status_errors(proto, content, error_traps=None):
"""Raises HTTPErrors based on error statuses sent from validator.
Checks for common statuses and runs route specific error traps.
"""
if content.status == proto.OK:
return
try:
if content.status == proto.INTERNAL_ERROR:
raise errors.UnknownValidatorError()
except AttributeError:
# Not every protobuf has every status enum, so pass AttributeErrors
pass
try:
if content.status == proto.NOT_READY:
raise errors.ValidatorNotReady()
except AttributeError:
pass
try:
if content.status == proto.NO_ROOT:
raise errors.HeadNotFound()
except AttributeError:
pass
try:
if content.status == proto.INVALID_PAGING:
raise errors.PagingInvalid()
except AttributeError:
pass
try:
if content.status == proto.INVALID_SORT:
raise errors.SortInvalid()
except AttributeError:
pass
# Check custom error traps from the particular route message
if error_traps is not None:
for trap in error_traps:
trap.check(content.status)
@staticmethod
def _message_to_dict(message):
"""Converts a Protobuf object to a python dict with desired settings.
"""
return MessageToDict(
message,
including_default_value_fields=True,
preserving_proto_field_name=True)
@staticmethod
def _get_type_name(type_enum):
return Message.MessageType.Name(type_enum)
@staticmethod
def _get_status_name(proto, status_enum):
try:
return proto.Status.Name(status_enum)
except ValueError:
return 'Unknown ({})'.format(status_enum)
def _drop_empty_props(self, item):
"""Remove properties with empty strings from nested dicts.
"""
if isinstance(item, list):
return [self._drop_empty_props(i) for i in item]
if isinstance(item, dict):
return {
k: self._drop_empty_props(v)
for k, v in item.items() if v != ''
}
return item
def _drop_id_prefixes(self, item):
"""Rename keys ending in 'id', to just be 'id' for nested dicts.
"""
if isinstance(item, list):
return [self._drop_id_prefixes(i) for i in item]
if isinstance(item, dict):
return {
'id' if k.endswith('id') else k: self._drop_id_prefixes(v)
for k, v in item.items()
}
return item
def can_talk(self,intent):
return not self.is_pause or (intent == "smalltalk.agent.unpause")
async def validator_task(self):
try:
LOGGER.debug("validator_task:queue...")
while True:
await self.process_queue()
# pylint: disable=broad-except
except Exception as exc:
LOGGER.exception(exc)
LOGGER.critical("validator_task thread exited with error.")
async def process_queue(self):
try:
request = self._bgt_queue.get(timeout=0.01)
LOGGER.debug("VALIDATOR_TASK: intent=%s qsize=%s pause=%s",request.intent,self._bgt_queue.qsize(),self.is_pause)
if self.can_talk(request.intent):
await self._intent_handlers[request.intent](request)
except queue.Empty:
pass
except errors.ValidatorDisconnected:
LOGGER.debug("VALIDATOR Disconnected")
self.send_message(request.chat_id, 'Похоже BGT временно не доступен (:')
except KeyError as key:
LOGGER.debug("VALIDATOR_TASK: ignore=%s (no handler %s)",request.intent,key)
#LOGGER.debug("VALIDATOR_TASK:queue=%s EMPTY",self._bgt_queue.qsize())
#return
def start(self):
async def main_task():
LOGGER.info('START MAIN...')
while True:
await asyncio.sleep(FLUSH_TIMEOUT)
def bot_poll():
LOGGER.info('START BOT via=%s',PROXIES[0])
self._bot.polling()
LOGGER.info('STOP BOT')
self._pool = ThreadPoolExecutor(max_workers=2) #ProcessPoolExecutor(max_workers=2)
self._start_bot()
#self._pool = ProcessPoolExecutor(max_workers=2)
#self._pool.submit(self._start_bot())
#self._pool.start()
#task = loop.create_task(self.validator_task())
#task1 = loop.create_task(self._polling())
#loop.run_in_executor(self._pool,task1)
#loop.run_in_executor(self._pool,task)
LOGGER.info('START ...')
#self._bgt_queue.put('smalltalk.agent.can_you_help')
self._loop.run_until_complete(self._polling()) #main_task())
#loop.run_until_complete(main_task())
#LOGGER.info('START')
#self._start_bot_start_bot()
LOGGER.info('STOP')
self._loop.close()
LOGGER.info('STOP DONE')
#
"""
{'content_type': 'sticker', 'message_id': 17, 'from_user': {'id': 456125525, 'is_bot': False, 'first_name': 'Stan', 'username': 'Thou_shalt', 'last_name': 'P', 'language_code': 'ru'}, 'date': 1587126688, 'chat': {'type': 'private', 'last_name': 'P', 'first_name': 'Stan', 'username': 'Thou_shalt', 'id': 456125525, 'title': None, 'all_members_are_administrators': None, 'photo': None, 'description': None, 'invite_link': None, 'pinned_message': None, 'sticker_set_name': None, 'can_set_sticker_set': None}, 'forward_from_chat': None, 'forward_from_message_id': None, 'forward_from': None, 'forward_date': None, 'reply_to_message': None, 'edit_date': None, 'media_group_id': None, 'author_signature': None, 'text': None, 'entities': None, 'caption_entities': None, 'audio': None, 'document': None, 'photo': None, 'sticker': {'file_id': 'CAACAgIAAxkBAAMRXpmhoAlC4ghzi1DpcbrNLuIJbaMAAgMAA8A2TxOkKe7mffPAeBgE', 'width': 512, 'height': 512, 'thumb': <telebot.types.PhotoSize object at 0x7f7e51f3f2b0>, 'emoji': '😨', 'set_name': 'HotCherry', 'mask_position': None, 'file_size': 12727, 'is_animated': True}, 'video': None, 'video_note': None, 'voice': None, 'caption': None, 'contact': None, 'location': None, 'venue': None, 'animation': None, 'new_chat_member': None, 'new_chat_members': None, 'left_chat_member': None, 'new_chat_title': None, 'new_chat_photo': None, 'delete_chat_photo': None, 'group_chat_created': None, 'supergroup_chat_created': None, 'channel_chat_created': None, 'migrate_to_chat_id': None, 'migrate_from_chat_id': None, 'pinned_message': None, 'invoice': None, 'successful_payment': None, 'connected_website': None, 'json': {'message_id': 17, 'from': {'id': 456125525, 'is_bot': False, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'language_code': 'ru'}, 'chat': {'id': 456125525, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'type': 'private'}, 'date': 1587126688, 'sticker': {'width': 512, 'height': 512, 'emoji': '😨', 'set_name': 'HotCherry', 'is_animated': True, 'thumb': {'file_id': 'AAMCAgADGQEAAxFemaGgCULiCHOLUOlxus0u4gltowACAwADwDZPE6Qp7uZ988B4AAHthQ8ABAEAB20AA8eUAAIYBA', 'file_unique_id': 'AQAE7YUPAATHlAAC', 'file_size': 4448, 'width': 128, 'height': 128}, 'file_id': 'CAACAgIAAxkBAAMRXpmhoAlC4ghzi1DpcbrNLuIJbaMAAgMAA8A2TxOkKe7mffPAeBgE', 'file_unique_id': 'AgADAwADwDZPEw', 'file_size': 12727}}}
################################
{'update_id': 674365978, 'message': {'content_type': 'text', 'message_id': 1723, 'from_user': <telebot.types.User object at 0x7fc7d2dcb240>, 'date': 1587888825, 'chat': <telebot.types.Chat object at 0x7fc7d2dcb128>, 'forward_from_chat': None, 'forward_from_message_id': None, 'forward_from': None, 'forward_date': None, 'reply_to_message': None, 'edit_date': None, 'media_group_id': None, 'author_signature': None, 'text': 'Как успехи', 'entities': None, 'caption_entities': None, 'audio': None, 'document': None, 'photo': None, 'sticker': None, 'video': None, 'video_note': None, 'voice': None, 'caption': None, 'contact': None, 'location': None, 'venue': None, 'animation': None, 'new_chat_member': None, 'new_chat_members': None, 'left_chat_member': None, 'new_chat_title': None, 'new_chat_photo': None, 'delete_chat_photo': None, 'group_chat_created': None, 'supergroup_chat_created': None, 'channel_chat_created': None, 'migrate_to_chat_id': None, 'migrate_from_chat_id': None, 'pinned_message': None, 'invoice': None, 'successful_payment': None, 'connected_website': None, 'json': {'message_id': 1723, 'from': {'id': 456125525, 'is_bot': False, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'language_code': 'ru'}, 'chat': {'id': 456125525, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'type': 'private'}, 'date': 1587888825, 'text': 'Как успехи'}}, 'edited_message': None, 'channel_post': None, 'edited_channel_post': None, 'inline_query': None, 'chosen_inline_result': None, 'callback_query': None, 'shipping_query': None, 'pre_checkout_query': None}
################################
sticker_message {'content_type': 'sticker', 'message_id': 1725, 'from_user': {'id': 456125525, 'is_bot': False, 'first_name': 'Stan', 'username': 'Thou_shalt', 'last_name': 'P', 'language_code': 'ru'}, 'date': 1587888938, 'chat': {'type': 'private', 'last_name': 'P', 'first_name': 'Stan', 'username': 'Thou_shalt', 'id': 456125525, 'title': None, 'all_members_are_administrators': None, 'photo': None, 'description': None, 'invite_link': None, 'pinned_message': None, 'sticker_set_name': None, 'can_set_sticker_set': None}, 'forward_from_chat': None, 'forward_from_message_id': None, 'forward_from': None, 'forward_date': None, 'reply_to_message': None, 'edit_date': None, 'media_group_id': None, 'author_signature': None, 'text': None, 'entities': None, 'caption_entities': None, 'audio': None, 'document': None, 'photo': None, 'sticker': {'file_id': 'CAACAgIAAxkBAAIGvV6lQyo1b6Yvtzi3uKcGj47RiUdcAALCAQACVp29Cpl4SIBCOG2QGQQ', 'width': 512, 'height': 512, 'thumb': <telebot.types.PhotoSize object at 0x7fc7d005ef60>, 'emoji': '👍', 'set_name': 'TheVirus', 'mask_position': None, 'file_size': 7420, 'is_animated': True}, 'video': None, 'video_note': None, 'voice': None, 'caption': None, 'contact': None, 'location': None, 'venue': None, 'animation': None, 'new_chat_member': None, 'new_chat_members': None, 'left_chat_member': None, 'new_chat_title': None, 'new_chat_photo': None, 'delete_chat_photo': None, 'group_chat_created': None, 'supergroup_chat_created': None, 'channel_chat_created': None, 'migrate_to_chat_id': None, 'migrate_from_chat_id': None, 'pinned_message': None, 'invoice': None, 'successful_payment': None, 'connected_website': None, 'json': {'message_id': 1725, 'from': {'id': 456125525, 'is_bot': False, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'language_code': 'ru'}, 'chat': {'id': 456125525, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'type': 'private'}, 'date': 1587888938, 'sticker': {'width': 512, 'height': 512, 'emoji': '👍', 'set_name': 'TheVirus', 'is_animated': True, 'thumb': {'file_id': 'AAMCAgADGQEAAga9XqVDKjVvpi-3OLe4pwaPjtGJR1wAAsIBAAJWnb0KmXhIgEI4bZB6wdWRLgADAQAHbQADeiIAAhkE', 'file_unique_id': 'AQADesHVkS4AA3oiAAI', 'file_size': 6186, 'width': 128, 'height': 128}, 'file_id': 'CAACAgIAAxkBAAIGvV6lQyo1b6Yvtzi3uKcGj47RiUdcAALCAQACVp29Cpl4SIBCOG2QGQQ', 'file_unique_id': 'AgADwgEAAladvQo', 'file_size': 7420}}}
+++++++++++++++++++++++++++++++++=
{'message_id': 1798, 'from': {'id': 456125525, 'is_bot': False, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'language_code': 'ru'}, 'chat': {'id': 456125525, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'type': 'private'}, 'date': 1587914988, 'sticker': {'width': 512, 'height': 512, 'emoji': '👍', 'set_name': 'TheVirus', 'is_animated': True, 'thumb': {'file_id': 'AAMCAgADGQEAAgcGXqWo7KbDR7NPdeq-Ish0T_k2e2wAAsIBAAJWnb0KmXhIgEI4bZB6wdWRLgADAQAHbQADeiIAAhkE', 'file_unique_id': 'AQADesHVkS4AA3oiAAI', 'file_size': 6186, 'width': 128, 'height': 128}, 'file_id': 'CAACAgIAAxkBAAIHBl6lqOymw0ezT3XqviLIdE_5NntsAALCAQACVp29Cpl4SIBCOG2QGQQ', 'file_unique_id': 'AgADwgEAAladvQo', 'file_size': 7420}}
"""
| [
"sparsov@sinergo.ru"
] | sparsov@sinergo.ru |
c3ae2bfc05c00cd24db90c05ec43f7f437e5c692 | bcd8edab7d3ee62094493fa69ecb40b69f847f21 | /osmchadjango/supervise/serializers.py | 1e35141c0dd28c01eab2fdda6218c3dfe321fad7 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | juusokor/osmcha-django | 83238c1f79da36a524ae237d913fc7f14501a295 | 5daafa015d5e341aa8ad6f847be2b7cc1a204e2b | refs/heads/master | 2020-03-30T12:55:11.729196 | 2018-06-30T01:00:33 | 2018-06-30T01:00:33 | 151,248,061 | 0 | 0 | NOASSERTION | 2018-10-02T12:07:00 | 2018-10-02T12:06:59 | null | UTF-8 | Python | false | false | 1,696 | py | from django.urls import reverse
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from rest_framework_gis.fields import GeometryField
from rest_framework.fields import (
SerializerMethodField, HiddenField, CurrentUserDefault, DateTimeField,
ReadOnlyField
)
from rest_framework.validators import ValidationError, UniqueTogetherValidator
from rest_framework.serializers import ModelSerializer
from .models import AreaOfInterest, BlacklistedUser
class AreaOfInterestSerializer(GeoFeatureModelSerializer):
changesets_url = SerializerMethodField()
user = HiddenField(default=CurrentUserDefault())
geometry = GeometryField(read_only=True)
class Meta:
model = AreaOfInterest
geo_field = 'geometry'
fields = [
'id', 'name', 'filters', 'geometry', 'date', 'changesets_url', 'user'
]
validators = [
UniqueTogetherValidator(
queryset=AreaOfInterest.objects.all(),
fields=('name', 'user')
)
]
def get_changesets_url(self, obj):
return reverse('supervise:aoi-list-changesets', args=[obj.id])
def validate(self, data):
if data.get('filters') is None and data.get('geometry') is None:
raise ValidationError(
'Set a value to the filters field or to the geometry to be able to save the AoI'
)
return data
class BlacklistSerializer(ModelSerializer):
date = DateTimeField(read_only=True)
added_by = ReadOnlyField(source='added_by.username')
class Meta:
model = BlacklistedUser
fields = ('uid', 'username', 'date', 'added_by')
| [
"wille@wille.blog.br"
] | wille@wille.blog.br |
d8bcd182c09698fa6afd891cf4a54a6978ca1923 | 3def50d84e20710cf817b394c8d6a4fde0957fdf | /cherrypy/classtest/test3.py | 2abd2a5ca159bfd8748c86e0b7c1ad39ec981447 | [] | no_license | RimantasSmitas/BananaPi | a50f4f1a2b0a2a0e9a2322ccddd9e1e0d39887f1 | bc7f00394a6981beae99a37212b671da21247d5d | refs/heads/master | 2021-08-22T22:12:50.893610 | 2017-12-01T12:27:23 | 2017-12-01T12:27:23 | 109,330,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | py | import signal, os
import RPi.GPIO as GPIO
import time
AIN1 = 17
AIN2 = 27
PWMA = 2
BIN1 = 10
BIN2 = 22
PWMB = 3
# Pin for linesensor:
A = 6 #left
B = 13 #middle
C = 5 #right
# Pin Setup:
GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme
GPIO.setup(A, GPIO.IN) # sensor set as input
GPIO.setup(B, GPIO.IN)
GPIO.setup(C, GPIO.IN)
GPIO.setwarnings(False)
GPIO.setup(AIN1, GPIO.OUT)
GPIO.setup(AIN2, GPIO.OUT)
GPIO.setup(PWMA, GPIO.OUT)
GPIO.setup(BIN1, GPIO.OUT)
GPIO.setup(BIN2, GPIO.OUT)
GPIO.setup(PWMB, GPIO.OUT)
rightmotor = GPIO.PWM(PWMA, 50) # channel=12 frequency=50Hz
rightmotor.start(0)
def handler(signum, frame): #stop when ctrl-c is recieved
print ('Signal handler called with signal', signum)
print ('exiting')
GPIO.output(PWMA, GPIO.LOW)
GPIO.cleanup()
exit(0)
# When recieving ctrl-C
signal.signal(signal.SIGINT, handler)
rightmotor = GPIO.PWM(PWMA, 50)
leftmotor = GPIO.PWM(PWMB, 50)
def backward():
GPIO.output(AIN1, GPIO.LOW)
GPIO.output(AIN2, GPIO.HIGH)
GPIO.output(PWMA, GPIO.HIGH)
GPIO.output(BIN1, GPIO.LOW)
GPIO.output(BIN2, GPIO.HIGH)
GPIO.output(PWMB, GPIO.HIGH)
def forward():
GPIO.output(AIN1, GPIO.HIGH)
GPIO.output(AIN2, GPIO.LOW)
GPIO.output(PWMA, GPIO.HIGH)
GPIO.output(BIN1, GPIO.HIGH)
GPIO.output(BIN2, GPIO.LOW)
GPIO.output(PWMB, GPIO.HIGH)
def right():
GPIO.output(AIN1, GPIO.HIGH)
GPIO.output(AIN2, GPIO.LOW)
GPIO.output(PWMA, GPIO.HIGH)
GPIO.output(BIN1, GPIO.LOW)
GPIO.output(BIN2, GPIO.HIGH)
GPIO.output(PWMB, GPIO.HIGH)
def left():
GPIO.output(AIN1, GPIO.HIGH)
GPIO.output(AIN2, GPIO.LOW)
GPIO.output(PWMA, GPIO.HIGH)
GPIO.output(BIN1, GPIO.LOW)
GPIO.output(BIN2, GPIO.HIGH)
GPIO.output(PWMB, GPIO.HIGH)
def stop():
GPIO.output(AIN1, GPIO.LOW)
GPIO.output(AIN2, GPIO.HIGH)
GPIO.output(PWMA, GPIO.LOW)
GPIO.output(BIN1, GPIO.LOW)
GPIO.output(BIN2, GPIO.HIGH)
GO.output(PWMB, GPIO.LOW)
rightmotor.ChangeDutyCycle(0)
leftmotor.ChangeDutyCycle(0)
def check(A,B,C):
while True:
if B==0 and A==1 and C==1:
forward()
elif A==0:
left()
elif C==0:
right()
elif A==0 and B==0 and C==0:
stop()
forward()
| [
"rimantas.smitas@gmail.com"
] | rimantas.smitas@gmail.com |
deddb67ccdc66d674d6db8ca868bdc5dd8bf4d97 | b3a34ddd1cfaf709d7635af83d4e1e489b4cca5d | /gui/menus/skinmenu.py | a7add4ec33732fc7fb74fbfcac93b04b561dbce0 | [] | no_license | o2mation/PyBlock-pyside2 | b97672dae977f8930966fa81c18d975d3bc74d44 | 16883fd499dbe04e2de7b7b166a0e37b8c51ab42 | refs/heads/master | 2020-04-09T05:10:52.976343 | 2018-12-09T12:16:25 | 2018-12-09T12:16:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,149 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from PySide2.QtCore import *
from PySide2.QtGui import *
from qframer.resources import *
from qframer import FMenu
class SkinMenu(FMenu):
skinIDSin = Signal(str)
def __init__(self, parent=None):
super(SkinMenu, self).__init__(parent)
self.parent = parent
self.menuItems = [
{
'name': self.tr('BW'),
'icon': u'',
'shortcut': u'',
'trigger': 'BW',
"checkable": True
},
{
'name': self.tr('BB'),
'icon': u'',
'shortcut': u'',
'trigger': 'BB',
"checkable": True
},
{
'name': self.tr('GB'),
'icon': u'',
'shortcut': u'',
'trigger': 'GB',
"checkable": True
},
{
'name': self.tr('GG'),
'icon': u'',
'shortcut': u'',
'trigger': 'GG',
"checkable": True
},
{
'name': self.tr('GBG'),
'icon': u'',
'shortcut': u'',
'trigger': 'GBG',
"checkable": True
},
{
'name': self.tr('GGG'),
'icon': u'',
'shortcut': u'',
'trigger': 'GGG',
"checkable": True
},
{
'name': self.tr('Bg1'),
'icon': u'',
'shortcut': u'',
'trigger': 'Bg1',
"checkable": True
},
{
'name': self.tr('Bg2'),
'icon': u'',
'shortcut': u'',
'trigger': 'Bg2',
"checkable": True
},
{
'name': self.tr('Bg3'),
'icon': u'',
'shortcut': u'',
'trigger': 'Bg3',
"checkable": True
},
{
'name': self.tr('Bg4'),
'icon': u'',
'shortcut': u'',
'trigger': 'Bg4',
"checkable": True
},
{
'name': self.tr('Blank'),
'icon': u'',
'shortcut': u'',
'trigger': 'Blank',
"checkable": True
},
]
self.creatMenus(self.menuItems)
self.skinNames = [item['name'] for item in self.menuItems]
self.initConnect()
getattr(self, '%sAction' % self.skinNames[1]).setChecked(True)
def initConnect(self):
for key, action in self.qactions.items():
action.triggered.connect(self.updateChecked)
def updateChecked(self):
for key, action in self.qactions.items():
if self.sender() is action:
action.setChecked(True)
self.skinIDSin.emit(key)
else:
action.setChecked(False)
| [
"ding465398889.com"
] | ding465398889.com |
ddd1834a9a3c5f54f03e78756765e853829e09fb | 4995398f861cd76fb24a653b19356a235d040efb | /Question3.py | 54b9feb1d5b0bf3d20ff664786f1769ba57389a2 | [] | no_license | adiXcodr/CryptoAlgos | 474496e1b8776686b6d1fd695d320062b2e547fe | 687e2fa44b7d13b8f212ad24e737f34c84606f02 | refs/heads/master | 2022-12-25T12:28:14.574901 | 2020-09-30T17:34:23 | 2020-09-30T17:34:23 | 299,993,862 | 0 | 0 | null | 2020-09-30T17:10:22 | 2020-09-30T17:10:21 | null | UTF-8 | Python | false | false | 241 | py | def exponent(a,b,n):
answer= 1
x=a
while(b>0):
if(b%2!=0):
answer= (answer*x)%n
b= b-1
x= (x*x)%n
b=b/2
print('Answer=', answer)
a,b, n= [int(i) for i in input("Enter a, b and n \n").split()]
exponent(a,b,n)
| [
"subhasishgoswami00@gmail.com"
] | subhasishgoswami00@gmail.com |
c47c76430fce530bce285bc74114852f6d5a7805 | 2e9a218381689ea51a74b168fbf5fbc0bb3528bc | /templates/src2/static/application.py | 723b17daa0e2233cea040d6eafd31b048c9ed56d | [] | no_license | wanghaoze/emotion_site | 53c6ae1e8b7582f768cc20ccbf676a40e404da3f | f5c55ca511939730554dfe86f4de0f4c57d94aa4 | refs/heads/master | 2023-02-15T02:45:59.135139 | 2021-01-06T11:18:39 | 2021-01-06T11:18:39 | 274,715,256 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/more")
def more():
return render_template("more.html")
app.run(port=23222) | [
"1070390210@qq.com"
] | 1070390210@qq.com |
dd50f84b06ac961202384fb9c1485f29dc7f0cc4 | df1f8da187fcfd1a4128051bfd5a3be2f567f62f | /0x01-python-if_else_loops_functions/example2.py | dab1b828477926f42dfdb654937aed1fcc898974 | [] | no_license | stostat/holbertonschool-higher_level_programming | d784f24408d69f68dfdefc46fe9e406a98e927e1 | 658fee8bd3b323de2fbc09bf02d8fc8439ed5a3f | refs/heads/master | 2023-03-02T02:59:42.902612 | 2021-02-10T04:33:43 | 2021-02-10T04:33:43 | 259,482,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | #!/usr/bin/python3
def suma(a, b):
return(a + b)
a = 3
b = 5
print(suma(45, b))
| [
"santmendieta@icloud.com"
] | santmendieta@icloud.com |
6fb26cc59bc2266d5eb08648c217a21c5a0b1243 | 327963ae013b5a94cad2ac18813e839923ccf89e | /publisher/__init__.py | 5d36d1e19a67553d5ffbce5ea332bad8e706721a | [] | no_license | encresearch/publisher | 3948849f99081752f9c6096216e64ecf4d102ed4 | 575283846a76ec9723e8165fbe13566a9a7ae561 | refs/heads/master | 2022-07-12T06:20:34.270817 | 2020-07-05T18:54:36 | 2020-07-05T18:54:36 | 136,961,324 | 0 | 0 | null | 2022-06-22T03:29:27 | 2018-06-11T17:50:43 | Python | UTF-8 | Python | false | false | 24 | py | from publisher import *
| [
"noreply@github.com"
] | noreply@github.com |
1a2cdc108a8596ada3e9babc8b331eb452075700 | 36297b58bedf27422e149e9eb10a39e3f29c0014 | /src/day_6_part_2.py | 5d4336f002f223ff2a1d3dda809ea964525490d9 | [] | no_license | marflodin/adventofcode-2020 | fa4711c9ab7010bdc10be58a6c8a4407371b589f | e825a41398ea2f0a527e673c004bcc0b09486aaf | refs/heads/main | 2023-01-24T17:30:48.340870 | 2020-12-09T10:12:55 | 2020-12-09T10:12:55 | 318,784,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | with open('../resources/input_day_6.txt', 'r') as input_file:
entries = []
entry = {"persons": 0}
for line in input_file.readlines():
if line in ['\n', '\r\n']:
entries.append(entry)
entry = {"persons": 0}
else:
entry["persons"] = entry["persons"] + 1
for element in range(0, len(line.strip())):
old_value = entry.get(line[element], 0)
entry[line[element]] = old_value + 1
entries.append(entry)
if __name__ == "__main__":
result = 0
for item in entries:
for key in item:
if key == "persons":
pass
elif item[key] == item["persons"]:
result += 1
else:
pass
print(f"result: {result}") | [
"martin.flodin@klarna.com"
] | martin.flodin@klarna.com |
4b3ce1a2e5181d7331f3835d137f5487c45ecfc0 | 86221678ec413034bb4e34af9a0cbbf6ac76f21a | /2017CS50407/question2/main.py | 64ee7b790c40802f1d78a3a8e0d3fceacaf19044 | [] | no_license | Dwijesh522/ml_assignments | 7f198b2e44e9194b517018cbe6d5b5dd02913f0a | d474771340ff0fb78e48213a70e854e372ed75ab | refs/heads/master | 2022-04-02T20:21:16.621225 | 2020-02-10T12:42:26 | 2020-02-10T12:42:26 | 236,195,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,512 | py | import pandas as pd
import stochastic_gradient_descent as sgd
import plotting
import numpy as np
import math
def sample():
# number of training examples
m = 1000000
theta_not = np.array([[3], [1], [2]])
# sampling x1 ~ N(3, 4)
x1 = np.random.normal(3, math.sqrt(4), (m, 1))
# sampling x2 ~ N(-1, 4)
x2 = np.random.normal(-1, math.sqrt(4), (m, 1))
# getting X matrix: each sample in a row, so (m x 3) matrix
x0 = np.ones((m, 1))
X = np.concatenate((x0, x1, x2), axis=1)
# getting mu and sigma to sample y
mu = np.dot(X, theta_not)
variance = 2
# sampling Y ~ N( 3+x1+2x2, 2 )
Y = np.random.normal(mu, math.sqrt(variance))
# initializing theta space with zeros
theta = np.zeros((X.shape[1], 1))
return X, theta, Y, theta_not
if __name__ == "__main__":
X, theta, Y, theta_not = sample()
# shape of x: 1000000 x 3
# shape of y: 1000000 x 1
# shape of theta: 3 x 1
# default learning rate: 0.009
theta, theta0_space, theta1_space, theta2_space = sgd.stochastic_gradient_descent(X, theta, Y, 0.001, 0.004, 1000000)
print("optimal theta:\n {}".format(theta))
# plotting.plot_training_data_hypothesis(X, theta, Y)
plotting.plot_theta_movement(theta0_space, theta1_space, theta2_space)
# plotting.plot_cost_function(cost_theta_samples[:, 0], cost_theta_samples[:, 1], cost_theta_samples[:, 2])
# plotting.draw_contours(X, theta, Y, cost_theta_samples[:, 0], cost_theta_samples[:, 1])
| [
"iddwijesh@gmail.com"
] | iddwijesh@gmail.com |
36fd0e06456b26396cb9e10b5c02751e6cb688b7 | f1c7f2597b10f8487b8fa5f20ba7019d876f3c71 | /accounts/forms.py | bb406314b11b7f5a1ebb3f11148b6458b2120277 | [] | no_license | inancd/sitorbis | fbb6a7f3f3978f26af95a9e45eeae67c0e84c82b | 762be77179739966dbaf978299b5d53846cc83d6 | refs/heads/master | 2020-09-10T18:31:19.523974 | 2020-07-14T19:33:16 | 2020-07-14T19:33:16 | 208,810,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,127 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate
from django.db.models.signals import post_save
from django.core.exceptions import ValidationError
from PIL import Image
from .models import MediaModel
from accounts.models import Account, Profile
class RegistrationForm(UserCreationForm):
email = forms.EmailField(max_length=60, help_text='Required. Add a valid email address')
username = forms.CharField(max_length=80)
password1 = forms.CharField(widget=forms.PasswordInput(), help_text='', label='Password')
password2 = forms.CharField(widget=forms.PasswordInput(), help_text='', label='Password Confirm')
is_terms = forms.BooleanField(label='', widget=forms.CheckboxInput(attrs={'class': 'form-check-input'}))
class Meta:
model = Account
fields = ('email', 'username', 'fullname', 'password1', 'password2', 'is_terms')
def clean_username(self):
data = self.cleaned_data.get('username').replace(" ", "").lower()
if len(data) > 5:
print('Yapacağanız işe sokayım')
return data
class AccountauthenticationForm(forms.ModelForm):
password = forms.CharField(label='Password', widget=forms.PasswordInput)
class Meta:
model = Account
fields = ('email', 'password')
def clean(self):
if self.is_valid():
email = self.cleaned_data['email']
password = self.cleaned_data['password']
if not authenticate(email=email, password=password):
raise forms.ValidationError("Invalid login")
Private = (
('', 'Private...'),
('ML', 'Male'),
('WM', 'Woman'),
('PR', 'Custom')
)
class ProfileForm(forms.ModelForm):
profile_picture = forms.ImageField()
sex = forms.ChoiceField(choices=Private, label='Private', required=False)
class Meta:
model = Profile
fields = ('sex', 'profile_picture')
class SocialMediaUpdateForm(forms.ModelForm):
class Meta:
model = MediaModel
fields = ('instagram', 'facebook', 'twitter', 'linkedin')
| [
"inanc@sitorbis.com"
] | inanc@sitorbis.com |
95a4c662c519fb403e510e71fc25a5e18b70a8a4 | ef10ed497ef6f41a976cc30526ee2d4a7c8241c1 | /Memoria/figures/elliptic_inv_doubl.py | 824106fd450fc6edac48dc2f2fa85003fb97bdf3 | [] | no_license | lglaria/TFG | 3a1020336efb3982b96e3bad844aab9748ead6a2 | d415b4a916793ea157788ef06d15e8648fb77704 | refs/heads/master | 2020-03-20T06:18:53.501013 | 2019-09-15T17:14:00 | 2019-09-15T17:14:00 | 137,244,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings("ignore",category=RuntimeWarning) #tendremos algunas raices negativas
X = np.linspace(-2.5, 5, 5000)
Y_0 = np.linspace(-10, 10, 5000)
Z = (X**3 - 3*X + 5)**(0.5)
Y = 2*X
plt.plot(X, Z, color = 'black')
plt.plot(X,-Z, color = 'black')
plt.plot(X,[0]*5000, color = 'red')
#curva en implicita: Y**2 -X**3 +3*X -5 = 0
#pendiente de la recta tangente = -parcial_x/parcial y
#-(-3X**2 + 3)/2Y
# (-1, 7**0.5) --> m = 0
#intercept = 5**0.5
pendiente = 0
intercept = float(7**0.5)
Y_line = intercept + pendiente*X
dif = Z-Y_line
flag = False
k = 0
#La funcion de flag y k es no marcar mas de un punto en el mismo entorno, el redondeo hace que dif valga cero en entornos de los puntos de interseccion
for i in range(len(dif-1)):
if round(dif[i],2) == 0 and not flag and k == 0:
plt.plot(X[i], intercept, 'ro')
plt.annotate('P',(X[i], intercept),xytext=(-10,10), textcoords = 'offset points')
plt.plot([X[i]]*5000, Y_0,'--')
plt.plot(X[i], -intercept, 'ro')
plt.annotate('-P',(X[i], -intercept),xytext=(-15,-10), textcoords = 'offset points')
flag = True
elif round(dif[i],2) == 0 and flag and k >= 150:
plt.plot(X[i], intercept, 'ro')
plt.annotate('S',(X[i], intercept),xytext=(-10,10), textcoords = 'offset points')
plt.plot([X[i]]*5000, Y_0,'--')
plt.plot(X[i], -intercept, 'ro')
plt.annotate('2P',(X[i], -intercept),xytext=(-15,-10), textcoords = 'offset points')
flag = False
elif flag== True:
k += 1
plt.plot(X,Y_line)
plt.axis('off')
plt.savefig('elliptic1.eps', format = 'eps')
plt.show()
| [
"lglaria@ucm.es"
] | lglaria@ucm.es |
05300260d3005417fd9466a8faef01221aa10d7c | 4435e08efbc07ecf9295c2714570eae12033b19b | /application.py | c876a1d13776347a0c39166632ae91fbb37e8943 | [] | no_license | shovanch/twitter-sentiment-analysis | 54c7cdc19540824cb88517b86104c9b7147bad63 | c901dc4b2eecd9a66e3ad262b915957891c52d43 | refs/heads/master | 2021-06-23T10:55:07.361101 | 2017-09-04T19:50:44 | 2017-09-04T19:50:44 | 82,047,013 | 14 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | from flask import Flask, redirect, render_template, request, url_for
import helpers
import os, sys
from analyzer import Analyzer
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/search")
def search():
# validate screen_name
screen_name = request.args.get("screen_name", "").lstrip("@")
if not screen_name:
return redirect(url_for("index"))
# get screen_name's tweets
tweets = helpers.get_user_timeline(screen_name.lstrip("@"), count = 100)
# if invalid or protected screen name
if tweets == None:
return redirect(url_for("index"))
# load absolute path of word lists
positives = os.path.join(sys.path[0], "positive-words.txt")
negatives = os.path.join(sys.path[0], "negative-words.txt")
# instantiate analyzer
analyzer = Analyzer(positives, negatives)
positive, negative, neutral = 0.0, 0.0, 0.0
for tweet in tweets:
score = analyzer.analyze(tweet)
if score > 0.0:
positive += 1.0
elif score < 0.0:
negative += 1.0
else:
neutral += 1.0
# generate chart
chart = helpers.chart(positive, negative, neutral)
# render results
return render_template("search.html", chart=chart, screen_name=screen_name)
| [
"shovanc.web@gmail.com"
] | shovanc.web@gmail.com |
8cca05cab03643862b4a0e964a10db0b3ad575a8 | f9c52c4001e1d6af8b07c24cab041c86fa68dcfa | /info/stocks.py | 35f822cf6d3bd2b157b0f0d88cb27a6297533bc6 | [] | no_license | jesse-lin/rembot | 5c8eee2c19c0eb65760baeb374264acad100475d | 0dd97331a2279a6338b5127d3712d0ac9f9f73fc | refs/heads/master | 2023-01-12T10:57:11.719600 | 2020-11-16T23:42:46 | 2020-11-16T23:42:46 | 261,308,238 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,725 | py | import discord
import os
import sys
from discord.ext import commands
import json
import datetime
import yfinance as yf
import lxml
from variables import FILEPATH
async def load_stocks(ctx, data, tickers):
for s in tickers:
ss = s.upper()
# stock = requests.get(f'https://query1.finance.yahoo.com/v7/finance/chart/{s.upper()}').json()
stock = yf.Ticker(ss)
info_json = f'{stock.history(period="5d").to_json()}'
info = json.loads(info_json)
embed = discord.Embed(title=ss, description = f':clock1: A 5-day history of {ss}')
embed.set_thumbnail(url=data['thumbnail'])
for key in info:
if key != 'Dividends' and key != 'Stock Splits':
str1 = ''
for val in info[key]:
newtime = datetime.date.fromtimestamp(int(val)/1e3).isoformat()
str1 += f'**{newtime}:** '
if key=='Open' or key=='High' or key=='Low' or key=='Close':
str1 += '$'
str1 += f'{info[key][val]}\n'
embed.add_field(name=key, value=str1, inline=False)
try:
await ctx.send(embed=embed)
except discord.errors.HTTPException:
await ctx.send(f':no_entry_sign: **Data not found for {ss}, try again.**')
async def add_stocks(ctx, user, tickers):
counter = False
with open(f'{FILEPATH}/data/mem.json', 'r') as fm:
mdata = json.load(fm)
guild_id = f'{ctx.guild.id}'
for elem in tickers:
sym = elem.upper()
ticker = yf.Ticker(sym)
info_json = f'{ticker.history(period="5d").to_json()}'
info_check = json.loads(info_json)
if info_check["Open"]:
if sym not in mdata[guild_id][user]['stocks']:
mdata[guild_id][user]['stocks'].append(sym)
await ctx.send(f':white_check_mark: **{sym} added.**')
else:
await ctx.send(f":warning: **{sym} already in list.**")
else:
await ctx.send(f":no_entry_sign: **Data not found for {sym}, "
"sorry.**")
with open(f'{FILEPATH}/data/mem.json', 'w') as fm:
json.dump(mdata, fm, indent=4)
await list_stocks(ctx, user, mdata[guild_id][user]['stocks'])
async def del_stocks(ctx, user, tickers):
with open(f'{FILEPATH}/data/mem.json', 'r') as fm:
mdata = json.load(fm)
guild_id = f'{ctx.guild.id}'
for elem in tickers:
sym = elem.upper()
if sym in mdata[guild_id][user]['stocks']:
mdata[guild_id][user]['stocks'].remove(sym)
await ctx.send(f':white_check_mark: **{sym} removed.**')
else:
ctx.send(f':warning: **{sym} not in list.**')
with open(f'{FILEPATH}/data/mem.json', 'w') as fm:
json.dump(mdata, fm, indent=4)
await list_stocks(ctx, user, mdata[guild_id][user]['stocks'])
async def clear_stocks(ctx, user):
with open(f'{FILEPATH}/data/mem.json', 'r') as fm:
mdata = json.load(fm)
guild_id = f'{ctx.guild.id}'
mdata[guild_id][user]['stocks'].clear()
with open(f'{FILEPATH}/data/mem.json', 'w') as fm:
json.dump(mdata, fm, indent=4)
await ctx.send(':white_check_mark: **OK, done.**')
async def list_stocks(ctx, user, tickers):
str1 = ''
embed = discord.Embed(title=f"{ctx.author}'s symbol list",
description=f":chart_with_upwards_trend: **A quick list of stock symbols "
f"for {ctx.author}**")
embed.set_thumbnail(url=ctx.author.avatar_url)
for sym in tickers:
str1 += f'{sym}\n'
embed.add_field(name='Symbols', value=str1, inline=False)
await ctx.send(embed=embed)
class Stocks(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(name='stock')
async def stock_market(self, ctx, *, kwargs: str=None):
with open(f'{FILEPATH}/data/bot.json', 'r') as f:
data = json.load(f)
if 'stock' not in data['commands']:
info = dict()
info['Name'] = 'stock'
info['Description'] = 'Shows 5-day history for stock symbols.'
usage = "!stock\n!stock list\n" \
"!stock <list of symbols separated by spaces>\n" \
"!stock add <list of symbols separated by spaces>\n" \
"!stock delete <list of symbols separated by spaces>"
info['Usage'] = usage
data['commands']['stock'] = info
with open(f'{FILEPATH}/data/bot.json', 'w') as f:
json.dump(data, f, indent=4)
with open(f'{FILEPATH}/data/mem.json', 'r') as fm:
mdata = json.load(fm)
user = f'{ctx.author.id}'
guild_id = f'{ctx.guild.id}'
if guild_id not in mdata:
mdata[guild_id] = dict()
if user not in mdata[guild_id]:
mdata[guild_id][user] = dict()
if 'stocks' not in mdata[guild_id][user]:
mdata[guild_id][user]['stocks'] = list()
with open(f'{FILEPATH}/data/mem.json', 'w') as fm:
json.dump(mdata, fm, indent=4)
if kwargs==None:
if mdata[guild_id][user]['stocks']:
await load_stocks(ctx, data, mdata[guild_id][user]['stocks'])
else:
await ctx.send(f':no_entry_sign: **You have no stock symbols saved.**')
elif kwargs=="list":
if mdata[guild_id][user]['stocks']:
tickers = mdata[guild_id][user]['stocks']
await list_stocks(ctx, user, tickers)
else:
with open(f'{FILEPATH}/data/mem.json', 'w') as fm:
json.dump(mdata, fm, indent=4)
await ctx.send(f':no_entry_sign: **You have no stock symbols saved.**')
else:
tickers = kwargs.split(' ')
if kwargs.startswith("add"):
tickers.pop(0)
if not(tickers):
await ctx.send(":no_entry_sign: **Please enter at least one symbol "
"to be added.**")
else:
await add_stocks(ctx, user, tickers)
elif kwargs.startswith("delete"):
tickers.pop(0)
if not(tickers):
await ctx.send(":no_entry_sign: **Please enter at least one symbol "
"to be deleted.**")
else:
await del_stocks(ctx, user, tickers)
elif kwargs.startswith("clear"):
await clear_stocks(ctx, user)
else:
await load_stocks(ctx, data, tickers) | [
"jesse.lin67@gmail.com"
] | jesse.lin67@gmail.com |
bfec5c587e199e8661352e09e76eb119ef9d4709 | 7d1d30be1995f2780cbf8999f1891e967936c090 | /pttweaks/activity/tests/test_models.py | f91ed5f80aebe191788ab9c3ad566ab2ce0f26ee | [] | no_license | EastAgile/PT-tweaks | 118274f70c198fb8885f4a42136a5a1bdefc4e51 | 7d5742862e42672eb77441ef7a7250d7a3a9359e | refs/heads/master | 2022-12-10T20:07:04.859288 | 2019-08-08T05:29:41 | 2019-08-08T05:29:41 | 164,597,129 | 0 | 1 | null | 2022-12-08T05:49:40 | 2019-01-08T08:06:59 | Python | UTF-8 | Python | false | false | 319 | py | from django.test import SimpleTestCase
from robber import expect
from activity.factories import ActivityChangeLogFactory
class ActivityChangeLogTestCase(SimpleTestCase):
def test_model_str(self):
activity = ActivityChangeLogFactory.build(story_id='123123')
expect(str(activity)).to.eq('123123')
| [
"open-source@eastagile.com"
] | open-source@eastagile.com |
9b67b1df199c4ca9a43fea2600a53521d9cf8882 | 0167bc775b59c5c5161ba02da4eb06a90393dbaf | /inheritance.py | 5d98af1eee3f73443180e2a7a19252875ae3af99 | [] | no_license | furqann/learn-py | a0ee95c48617a6a2891ff410e6d40699e8749761 | 4c4649dcb70e944a809d7be816d509261c0d4e97 | refs/heads/master | 2020-05-05T07:16:34.050416 | 2019-08-18T18:51:35 | 2019-08-18T18:51:35 | 179,819,569 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # Inheritance
class Mammal:
def walk(self):
print("Walking started")
class Dog(Mammal):
# Python does not allow empty classes we need to use a keyword pass
def bark(self):
print("Woof Woof")
def walk(self):
print("Dog walking")
class Cat(Mammal):
def meow(self):
print("Meow Meow")
dog = Dog()
dog.walk() | [
"furqanahmad21@hotmail.com"
] | furqanahmad21@hotmail.com |
701bbcc1ebabd3addf948ffbf0e6693a5ce886d3 | f313176349f441bb8f72759d0f84f320d274d5d7 | /xjerab13/blogs_download2/web_rss/webrss/shared_func.py | a6cc6f24de1469de10940c0698e5cf7f427253d2 | [] | no_license | iotrusina/M-Eco-WP3-package | 641876974268cda458bf096e702b0bfe18596d67 | 83d76dfd2afdae93856e094879464327abf1faa0 | refs/heads/master | 2021-01-01T05:35:57.200860 | 2012-10-03T19:57:32 | 2012-10-03T19:57:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,296 | py | import os, sys
import time
import logging
#from bdllib.pubmod.BeautifulSoup import name2codepoint
import re
log = logging.getLogger("blog_download")
'''Some shared universal funcions'''
def makePath(path):
'''Creating absolute path based on program position in filesystem.'''
if os.path.isabs(path):
return path
else:
return os.path.join(sys.path[0], path)
def createFolderStruc(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def writeToFile(content, filename, folder , extension = "", timestamp = True, overwrite = False):
'''Write content data to file.
Keyword arguments:
timestamp - append filestam to filename (default = Enabled)
'''
rx = re.compile('[^\d\w]')
filename = rx.sub("_",filename)
if timestamp:
filename += time.strftime("%Y%m%dT%H%M%S")
if not extension.startswith("."):
extension = "." + extension
createFolderStruc(folder)
path = os.path.join(folder, filename)
if overwrite:
path = path + extension
g = open(path, "wt")
g.write(content)
g.close()
if overwrite:
return
cnt = 1
newname = ""
if not overwrite and os.path.exists(path + extension):
while os.path.exists(path + "." + str(cnt) + extension):
cnt += 1
newname = os.path.splitext(path)[0] + "." + str(cnt) + extension
else:
newname = os.path.splitext(path)[0] + extension
os.rename(path, newname)
msg = "Data saved to: %s", newname
log.info(msg)
def toUnicode(text,decoding="utf-8"):
'''Convert text to unicode'''
if isinstance(text, str):
return unicode(text, decoding)
elif isinstance(text, unicode):
return text
else:
return text
def fromUnicode(text,encoding = "utf-8"):
'''Convert text from unicode to str'''
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
return text
def unescape(text):
'''Convert HTML escape chars to regular chars'''
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError, e:
#print e
log.error("html escape to unicode error %s", e)
pass
else:
# named entity
try:
#text = unichr(name2codepoint[text[1:-1]])
pass
except KeyError, e:
#print e
log.error("name to unicode error %s", e)
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def pbar(string):
#print '\r'+string+" "*(80-len(string))
sys.stdout.write(string+" "*(79-len(string))+'\r')
| [
"otrusina@gmail.com"
] | otrusina@gmail.com |
4b62a941576cc59defc792dd09df58d2eb7e386b | ef11a06c906f37fa98c3de38aa1307110269b2f4 | /Notes/Fall2019/Ch4C.py | 6d229ca716a445032f72e9b2a630d709bd76b422 | [] | no_license | fwparkercode/IntroProgrammingNotes | 0d389d2d281122303da48ab2c1648750e594c04f | ad64777208d2f84f87e4ab45695adbfe073eae18 | refs/heads/master | 2021-07-16T07:13:55.665243 | 2020-06-09T12:45:38 | 2020-06-09T12:45:38 | 170,581,913 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,842 | py | # Chapter 4 - Loops and Random Numbers
# Random numbers
import random
# Randrange function - random.randrange(start, end, count_by)
print(random.randrange(10)) # generates a random int from 0 to 9
print(random.randrange(5, 10)) # random int from 5 to 9
print(random.randrange(50, 100, 5)) # random int from 50 to 99 counting by 5s
# make a random number between -10 and - 5
print(random.randrange(-10, -4))
# random even number from 28 to 36
print(random.randrange(28, 37, 2))
# random function - random.random()
# generates a random float from 0 to 1
print(random.random())
# to generate any other float use random.random() * spread + offset
# random float from 0 to 10
print(random.random() * 10)
# random float from 10 to 15
print(random.random() * 5 + 10)
# random float from -5 to 0
print(random.random() * 5 - 5)
# FOR LOOPS
# for the example below: i is the index, range is from 0 to 9
for i in range(10):
print("Taco Tuesday")
print("and Quesadillas")
print("Pulled Pork Wednesday")
# print twenty random integers from 0 to 100
for i in range(20):
print(random.randrange(101))
# Range function - range(start, end, count_by)
# works like random.randrange()
for i in range(10):
print(i)
for i in range(1, 11):
print(i)
for i in range(0, 101, 2):
print(i)
for i in range(50, 10, -5):
print(i)
# Nested loops
for i in range(3):
print("a")
for j in range(3):
print("b")
print("\n\n")
for i in range(3):
print("a")
for j in range(3):
print("b")
'''
for hours in range(24):
for minutes in range(60):
for seconds in range(60):
print(hours, minutes, seconds)
'''
for row in range(1, 21):
for seat in range(1, 21):
print("row", row, "seat", seat)
# Add all the numbers from 1 to 100
total = 0
for i in range(1, 101):
total += i
print(total)
# WHILE Loops
# use a FOR loop if you can.
# use a WHILE loop when you want to keep going until a condition exists
# count from 1 to 10
for i in range(1, 11):
print(i)
i = 1
while i <= 10:
print(i)
i += 1
# print multiples of 7 from 21 to 42
for i in range(21, 43, 7):
print(i)
i = 21
while i <= 42:
print(i)
i += 7
# what are all of the squared numbers under 100000
n = 1
while n ** 2 < 100000:
print(n, "squared is", n ** 2)
n += 1
# Beware the infinite loop
'''
n = 10
while n == 10:
print("TEN")
'''
'''
n = 10
while n > 0:
print(n)
n *= 2
'''
'''
while 4:
print("AHHHH")
'''
# GAME LOOP
done = False
print("Welcome to Dragon Quest 2!")
while not done:
answer = input("A dragon is blocking the exit. Do you want to wake it? ")
if answer.lower() == "yes" or answer.lower() == "y":
print("The dragon eats you!")
done = True
print("Thank you for playing")
| [
"alee@fwparker.org"
] | alee@fwparker.org |
f2cbb0fbc5d3f2af9927d29398c47bdbb8021629 | 4a7cd56690166ae9bf6f03bce8c5894858671147 | /app.py | baa64a02a74ce21b9c3d20a6e749aad8d56e05ad | [] | no_license | jkap86/DynastyDashboardFlask | a982e3a7e1a3a9b3ca7260cd1349c0346a948f3e | 9a3d0e1626691deeea656aeb9cf93041442fd11e | refs/heads/main | 2023-06-02T01:29:19.999049 | 2021-06-12T15:00:17 | 2021-06-12T15:00:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,479 | py | from flask import Flask, render_template, request, redirect, url_for, jsonify, session
from flask_session import Session
from flask_bootstrap import Bootstrap
import requests
import json
import datetime
from operator import itemgetter
from flask_wtf import FlaskForm
from wtforms import StringField, TextField, SubmitField
from wtforms.validators import DataRequired, AnyOf
import functools
app = Flask(__name__)
SESSION_TYPE = 'filesystem'
app.config.from_object(__name__)
Session(app)
Bootstrap(app)
app.config['SECRET_KEY'] = "1111"
def getUsername(userID):
url = requests.get('https://api.sleeper.app/v1/user/' + str(userID))
url = url.json()
return url['username']
def getUserIDfromUsername(username):
url = requests.get('https://api.sleeper.app/v1/user/' + username)
userID = url.json()['user_id']
return userID
def getUserIDfromRosterID(rosterID, rosters):
for roster in rosters:
if roster['roster_id'] == rosterID:
return roster['owner_id']
def getLeagues(username):
userID = getUserIDfromUsername(username)
leagues = requests.get('https://api.sleeper.app/v1/user/' + userID + '/leagues/nfl/2021')
leagues = leagues.json()
return leagues
def getRosters(league):
rosters = requests.get('https://api.sleeper.app/v1/league/' + league['league_id'] + '/rosters')
rosters = rosters.json()
return rosters
def getTransactions(league):
transactions = requests.get('https://api.sleeper.app/v1/league/' + league['league_id'] + '/transactions/1')
transactions = transactions.json()
return transactions
def getAvatar(username):
x = requests.get('https://api.sleeper.app/v1/user/' + username)
avatarID = x.json()['avatar']
avatar = 'https://sleepercdn.com/avatars/' + avatarID
return avatar
def getAvatarThumb(username):
x = requests.get('https://api.sleeper.app/v1/user/' + username)
avatarID = x.json()['avatar']
avatarThumb = 'https://sleepercdn.com/avatars/thumbs/' + avatarID
return avatarThumb
class ViewLeaguesForm(FlaskForm):
username = StringField('username', [DataRequired()], render_kw={'placeholder': 'username', 'class': 'form'})
class ViewCommonForm(FlaskForm):
leaguemateName = StringField('username', [DataRequired()], render_kw={'placeholder': 'username', 'class': 'form'})
leaguemateName2 = StringField('leaguemateName', [DataRequired()], render_kw={'placeholder': 'leaguemate name', 'class': 'form'})
class ViewTransactions(FlaskForm):
username3 = StringField('username3', [DataRequired()], render_kw={'placeholder': 'username', 'class': 'form'})
class ViewAllLeaguemates(FlaskForm):
username2 = StringField('username2', [DataRequired()], render_kw={'placeholder': 'username', 'class': 'form'})
class PlayerSearch(FlaskForm):
username4 = StringField('username4', [DataRequired()], render_kw={'placeholder': 'username', 'class': 'form'})
playerSearch = StringField('playerSearch', [DataRequired()], render_kw={'placeholder': 'player', 'class': 'form'})
@app.route('/', methods=['POST', 'GET'])
def index():
username = request.form.get('username')
username2 = request.form.get('username2')
username3 = request.form.get('username3')
username4 = request.form.get('username4')
playerSearch = request.form.get('playerSearch')
leaguemateName = request.form.get('leaguemateName')
leaguemateName2 = request.form.get('leaguemateName2')
allPlayers = open('allplayers.txt', 'r')
allPlayers = allPlayers.read()
allPlayers = json.loads(allPlayers)
players = []
allPlayersKeys = list(allPlayers.keys())
session['allPlayers'] = allPlayers
session['username'] = username
favTeam = request.form.get("team")
if request.form.get('submitButton') == 'view-transactions':
return redirect(url_for('transactions', username=username3))
elif request.form.get('submitButton') == 'submit':
return redirect(url_for('info', username=username))
elif request.form.get('submitButton') == 'view-common-leagues':
return redirect(url_for('leaguemates', leaguemateName=leaguemateName, leaguemateName2=leaguemateName2))
elif request.form.get('submitButton') == 'view-all-leaguemates':
return redirect(url_for('leaguematesAll', username=username2))
elif request.form.get('submitButton') == 'view-player-search':
return redirect(url_for('playerSearchResults', username=username4, playerSearch=playerSearch))
else:
form = ViewLeaguesForm(request.form)
form2 = ViewCommonForm(request.form)
form3 = ViewTransactions(request.form)
form4 = ViewAllLeaguemates(request.form)
form5 = PlayerSearch(request.form)
favTeam = request.form.get("team")
for key in allPlayersKeys:
players.append(allPlayers[key])
return render_template('index.html', allPlayers=players, favTeam=favTeam or 'was', form=form, form2=form2, form3=form3, form4=form4, form5=form5)
@app.route('/info/<username>', methods=["POST", "GET"])
@functools.lru_cache(maxsize=128)
def info(username):
username = username.strip()
leagueID = request.form.get('submitLeagueID')
if request.method == 'POST':
username = session.get('username')
if leagueID != None:
return redirect(url_for('roster', leagueID=leagueID, username=username))
else:
leagues = getLeagues(username)
userID = getUserIDfromUsername(username)
x = requests.get("https://api.sleeper.app/v1/user/" + userID)
avatarID = x.json()['avatar']
pwins = 0
plosses = 0
for league in leagues:
rosters = getRosters(league)
for roster in rosters:
if roster['owner_id'] == userID:
if roster['metadata']:
if 'record' in roster['metadata'].keys():
record = roster['metadata']['record']
pwins = pwins + record.count('W')
plosses = plosses + record.count('L')
return render_template('info.html', username=username, leagues=leagues, leaguesCount=len(leagues), pwins=pwins, plosses=plosses, avatar="https://sleepercdn.com/avatars/" + avatarID, favTeam=session.get('fav-team'))
@app.route('/transactions/<username>')
@functools.lru_cache(maxsize=128)
def transactions(username):
username = username.strip()
allPlayers = session.get('allPlayers')
transactionsAll = []
userID = getUserIDfromUsername(username)
for league in getLeagues(username):
rosters = getRosters(league)
transactions = getTransactions(league)
rosterIDList = list(filter(lambda x:x['owner_id'] == userID, rosters)) or ""
for ID in rosterIDList:
rosterID = ID
try:
for transaction in list(filter(lambda x:int(rosterID['roster_id']) in x['roster_ids'], transactions)):
transaction['status_updated'] = datetime.datetime.fromtimestamp(transaction['status_updated']/1000).strftime('%Y-%m-%d %H:%M:%S')
userKey = {}
draftPicks = list(filter(lambda x:x['roster_id'], transaction['draft_picks']))
for draftPick in draftPicks:
userKey[draftPick['roster_id']] = getUsername(getUserIDfromRosterID(draftPick['roster_id'], rosters))
transactionsAll.append({'transaction':transaction, 'league':league['name'], 'status':transaction['status'], 'date':transaction['status_updated'], 'roster-id':rosterID['roster_id'], 'draft-pick-count':len(transaction['draft_picks']), 'userKey':userKey })
except NameError:
pass
return render_template('transactions.html', username=username, allPlayers=allPlayers, transactionsAll=transactionsAll)
@app.route('/leaguemates/<leaguemateName>/<leaguemateName2>', methods=["POST", "GET"])
@functools.lru_cache(maxsize=128)
def leaguemates(leaguemateName, leaguemateName2):
leaguemateName = leaguemateName.strip()
leaguemateName2 = leaguemateName2.strip()
commonLeagues = []
favTeam = session.get('fav-team')
if request.method == 'POST':
return redirect(url_for('roster', username=request.form.get('submitLeaguemateName'), leagueID=request.form.get('submitLeagueID')))
else:
leaguemateLeagues = getLeagues(leaguemateName)
leaguemateLeagues2 = getLeagues(leaguemateName2)
for i in range(0, len(leaguemateLeagues)):
for j in range(0, len(leaguemateLeagues2)):
if leaguemateLeagues[i]['league_id'] == leaguemateLeagues2[j]['league_id']:
commonLeagues.append(leaguemateLeagues[i])
return render_template('leaguemates.html', leaguemateName=leaguemateName, leaguemateName2=leaguemateName2, commonLeagues=commonLeagues, commonLeaguesCount=len(commonLeagues), favTeam=favTeam)
@app.route('/leaguemates/<username>/all', methods=["POST", "GET"] )
@functools.lru_cache(maxsize=128)
def leaguematesAll(username):
username = username.strip()
leaguemates = []
avatarThumbs = {}
if request.method == 'POST':
leaguemateName = request.form.get("submitUsername")
if request.form['submitUsername'] != None:
return redirect(url_for('leaguematesAll', username=leaguemateName))
else:
for league in getLeagues(username):
usersLeague = requests.get('https://api.sleeper.app/v1/league/' + league['league_id'] + '/users')
for user in usersLeague.json():
leaguemates.append({'user-id': user['display_name'], 'league': league['name']})
if user['avatar']:
avatarThumbs[user['display_name']] = 'https://sleepercdn.com/avatars/thumbs/' + user['avatar']
myDict = {}
for d in leaguemates:
c = d['user-id']
myDict[c] = myDict.get(c,0)+1
avatar = getAvatar(username)
return render_template('leaguematesAll.html', username=username, leaguematesDict=myDict, avatar=avatar, avatarThumbs=avatarThumbs)
@app.route('/<username>/<playerSearch>')
@functools.lru_cache(maxsize=128)
def playerSearchResults(username ,playerSearch):
username = username.lower().strip()
allPlayers = session.get('allPlayers')
leaguesPlayers = []
leaguesWith = {}
leaguesOwned = []
leagues = getLeagues(username)
pwins = 0
plosses = 0
for league in leagues:
rosters = getRosters(league)
ownerName = 'Available'
for roster in rosters:
players = roster['players']
if players != None:
leaguesPlayers.append(players)
for lp in players:
try:
if playerSearch == " ".join([allPlayers[lp]['first_name'], allPlayers[lp]['last_name'], allPlayers[lp]['position'], str(allPlayers[lp]['team'])]):
ownerID = roster['owner_id']
info = requests.get('https://api.sleeper.app/v1/user/' + str(ownerID))
ownerName = info.json()['username']
if lp in roster['starters']:
leaguesWith[league['name']] = [ownerName, 'starter']
else:
leaguesWith[league['name']] = [ownerName, 'bench']
if ownerName == username:
leaguesOwned.append(league['name'])
if roster['metadata']:
if 'record' in roster['metadata'].keys():
record = roster['metadata']['record']
pwins = pwins + record.count('W')
plosses = plosses + record.count('L')
except KeyError:
pass
if ownerName == 'Available':
leaguesWith[league['name']] = ownerName
return render_template('playerSearchResults.html', playerSearch=playerSearch, username=username, leaguesCount=len(leaguesOwned), leaguesOwned=leaguesOwned, leaguesList=leaguesWith, pwins=pwins, plosses=plosses)
@app.route('/roster/<username>/<leagueID>', methods=["POST", "GET"])
@functools.lru_cache(maxsize=128)
def roster(leagueID, username):
allPlayers = session.get('allPlayers')
userID = getUserIDfromUsername(username)
league = requests.get("https://api.sleeper.app/v1/league/" + leagueID)
league = league.json()
leagueName = league['name']
rosters = requests.get('https://api.sleeper.app/v1/league/' + leagueID + '/rosters')
rosters = rosters.json()
leaguemates = []
for roster in rosters:
x = requests.get("https://api.sleeper.app/v1/user/" + roster['owner_id'])
avatarID = x.json()['avatar']
if roster['owner_id'] == userID:
players = roster['players'] or []
wins = roster['settings']['wins']
losses = roster['settings']['losses']
ties = roster['settings']['ties']
if roster['metadata']:
record = roster['metadata']['record']
pwins = record.count('W')
plosses = record.count('L')
else:
pwins = "n/a"
plosses = "Inaugural Season"
else:
leaguemates.append([getUsername(roster['owner_id']), avatarID])
playersNames = []
for player in players:
try:
playersNames.append(allPlayers[player]['position'] + " " + allPlayers[player]['first_name'] + " " + allPlayers[player]['last_name'] + " " + allPlayers[player]['team'])
except KeyError:
pass
except TypeError:
pass
playersNames.sort()
return render_template('roster.html', leagueName=leagueName, teamName=username, players=playersNames, playerCount=len(playersNames), leaguemates=leaguemates, wins=wins, losses=losses, ties=ties, pwins=pwins, plosses=plosses, leagueID=leagueID)
| [
"jkap86@gmail.com"
] | jkap86@gmail.com |
4076f7ee484c31f9c059c5913b8e1d33c17c6449 | 9398c7d6de89aabca4de4403e14c7e0457f8d462 | /moocOnline/apps/courses/migrations/0002_auto_20170327_1503.py | 67217e50217dab6fb85ba8a5744812e438782650 | [] | no_license | xuyafei9303/moocOnline_Django | 789636451aa5ba7252d420ae3488472df77c4f10 | b8d76162133bf3bd9b3fc62e632e1ac81928219c | refs/heads/master | 2021-01-20T07:13:33.033584 | 2017-05-02T02:10:34 | 2017-05-02T02:10:34 | 89,980,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-03-27 15:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='course',
name='degree',
field=models.CharField(choices=[('cj', '\u521d\u7ea7'), ('zj', '\u4e2d\u7ea7'), ('gj', '\u9ad8\u7ea7')], max_length=2, verbose_name='\u96be\u5ea6'),
),
]
| [
"1007386723@qq.com"
] | 1007386723@qq.com |
16cbf16dc6b7fe17bb9032ee14ac7d326eeaced8 | 3a570384a3fa9c4c7979d33b182556e1c637e9eb | /anwmisc/anw-pyui/Packages/anwp/gui/configinfo.py | 5c7bf27ee02e7b8c9821e1b50aacc8cfcb5713f6 | [] | no_license | colshag/ANW | 56a028af5042db92b5ead641dc542fcb4533344e | 46948d8d18a0639185dd4ffcffde126914991553 | refs/heads/master | 2020-03-27T00:22:49.409109 | 2018-10-27T06:37:04 | 2018-10-27T06:37:04 | 145,618,125 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,603 | py | # ---------------------------------------------------------------------------
# Armada Net Wars (ANW)
# configinfo.py
# Written by Chris Lewis
# ---------------------------------------------------------------------------
# This panel Displays User Config information
# ---------------------------------------------------------------------------
import pyui
import guibase
import anwp.func.globals
class ConfigInfoFrame(guibase.BaseFrame):
"""Displays User Config Information"""
def __init__(self, mode, app, title='User Configuration'):
self.app = app
self.width = 1024
try:
self.height = (app.height - mode.mainMenu.height - mode.mainFooter.height - 40)
except:
self.height = (app.height - 120)
try:
y = (mode.mainMenu.height)
except:
y = 40
x = 0
guibase.BaseFrame.__init__(self, mode, x, y, self.width, self.height, title)
self.setPanel(ConfigInfoPanel(self))
class ConfigInfoPanel(guibase.BasePanel):
"""Panel for User Config information"""
def __init__(self, frame):
guibase.BasePanel.__init__(self, frame)
numExtend = 1
x = (self.frame.app.height - 768) / (22 * numExtend)
cells = 28 + (numExtend * x)
self.setLayout(pyui.layouts.TableLayoutManager(8, cells))
# subject title
self.pctEmpire = pyui.widgets.Picture('')
self.addChild(self.pctEmpire, (0, 0, 1, 3))
self.lblTitle = pyui.widgets.Label(text='', type=1)
self.addChild(self.lblTitle, (1, 1, 3, 1))
self.btnSurrender = pyui.widgets.Button('Surrender Game', self.onSurrender)
self.addChild(self.btnSurrender, (6, 1, 2, 1))
n = 4
self.lbl = pyui.widgets.Label(text='CHANGE EMPIRE INFO:', type=1)
self.addChild(self.lbl, (0, n, 4, 1))
self.lbl = pyui.widgets.Label(text='Email Address:', type=2)
self.addChild(self.lbl, (0, n+1, 2, 1))
self.txtEmail = pyui.widgets.Edit('',50)
self.addChild(self.txtEmail, (2, n+1, 4, 1))
self.btnEmail = pyui.widgets.Button('Change Email', self.onChangeEmail)
self.addChild(self.btnEmail, (6, n+1, 2, 1))
self.lbl = pyui.widgets.Label(text='Login Password:', type=2)
self.addChild(self.lbl, (0, n+2, 2, 1))
self.txtPassword = pyui.widgets.Edit('',20)
self.addChild(self.txtPassword, (2, n+2, 2, 1))
self.btnEmail = pyui.widgets.Button('Change Password', self.onChangePassword)
self.addChild(self.btnEmail, (6, n+2, 2, 1))
# starship captains
n = n+4
self.lbl = pyui.widgets.Label(text='SELECT STARSHIP CAPTAIN:', type=1)
self.addChild(self.lbl, (0, n, 4, 1))
self.lstCaptains = pyui.widgets.ListBox(self.onCaptainSelected,None,100,100,0)
self.addChild(self.lstCaptains, (0, n+1, 8, 16+x))
n = n+18+x
self.lbl = pyui.widgets.Label(text='Selected Captain Name:', type=2)
self.addChild(self.lbl, (0, n, 2, 1))
self.txtName = pyui.widgets.Edit('',20)
self.addChild(self.txtName, (2, n, 2, 1))
self.btnName = pyui.widgets.Button('Change Captain Name', self.onChangeName)
self.addChild(self.btnName, (4, n, 2, 1))
self.pack
self.populate()
def buildCaptainsData(self):
"""Display all Captains in Empire"""
d = {}
# sort captains by experience level
##captains = anwp.func.funcs.sortDictByChildObjValue(self.frame.mode.game.myCaptains, 'experience', True, {})
for captainID, myCaptainDict in self.frame.mode.game.myCaptains.iteritems():
d[captainID] = '%s - RANK:%s' % (myCaptainDict['name'], myCaptainDict['rank'])
return d
def onCaptainSelected(self, item):
"""Select item from List"""
if not item:
self.btnName.disable()
else:
if self.lstCaptains.selected <> -1:
self.btnName.enable()
self.txtName.setText(self.frame.mode.game.myCaptains[item.data]['name'])
def onChangeEmail(self, item):
"""Change Email Address"""
try:
d = {}
d['emailAddress'] = self.txtEmail.text
serverResult = self.frame.mode.game.server.setEmpire(self.frame.mode.game.authKey, d)
if serverResult == 1:
self.frame.mode.game.myEmpire['emailAddress'] = self.txtEmail.text
self.frame.mode.modeMsgBox('Empire Email Address Changed')
else:
self.frame.mode.modeMsgBox(serverResult)
except:
self.frame.mode.modeMsgBox('onChangeEmail->Connection to Server Lost, Login Again')
def onSurrender(self, item):
"""Surrender Game"""
self.frame.mode.modeYesNoBox('Do you really want to surrender the game?', 'surrenderYes', 'surrenderNo')
def onChangeName(self, item):
"""Change Selected Captain Name"""
try:
id = self.lstCaptains.getSelectedItem().data
serverResult = self.frame.mode.game.server.setCaptainName(self.frame.mode.game.authKey, id, self.txtName.text)
if serverResult == 1:
self.frame.mode.game.myCaptains[id]['name'] = self.txtName.text
self.frame.mode.modeMsgBox('Captain name Changed')
self.populate()
else:
self.frame.mode.modeMsgBox(serverResult)
except:
self.frame.mode.modeMsgBox('onChangeName->Connection to Server Lost, Login Again')
def onChangePassword(self, item):
"""Change Password"""
try:
d = {}
d['password'] = self.txtPassword.text
serverResult = self.frame.mode.game.server.setEmpire(self.frame.mode.game.authKey, d)
if serverResult == 1:
self.frame.mode.game.empirePass = self.txtPassword.text
self.frame.mode.modeMsgBox('Empire Password Changed')
else:
self.frame.mode.modeMsgBox(serverResult)
except:
self.frame.mode.modeMsgBox('onChangePassword->Connection to Server Lost, Login Again')
def populate(self):
"""Populate frame with new data"""
self.btnName.disable()
try:
myEmpireDict = self.frame.mode.game.myEmpire
myEmpirePict = '%s%s.png' % (self.frame.app.simImagePath, myEmpireDict['imageFile'])
self.lblTitle.setText('CONFIGURATION FOR: %s' % myEmpireDict['name'])
self.lblTitle.setColor(anwp.func.globals.colors[myEmpireDict['color1']])
myCaptains = self.buildCaptainsData()
self.txtEmail.setText(myEmpireDict['emailAddress'])
self.txtPassword.setText(self.frame.mode.game.empirePass)
except:
# this allows for testing panel outside game
myEmpirePict = self.testImagePath + 'empire1.png'
self.lblTitle.setText('CONFIGURATION FOR: Test')
myCaptains = self.testDict
self.pctEmpire.setFilename(myEmpirePict)
self.populateListbox(self.lstCaptains, myCaptains)
def main():
"""Run gui for testing"""
import run
width = 1024
height = 768
pyui.init(width, height, 'p3d', 0, 'Testing Config Info Panel')
app = run.TestApplication(width, height)
frame = ConfigInfoFrame(None, app)
app.addGui(frame)
app.run()
pyui.quit()
if __name__ == '__main__':
main()
| [
"colshag@gmail.com"
] | colshag@gmail.com |
3bacda1269a3f3130f182ec8f974b29c16819d8f | b7eed26cf8a0042a61f555eed1e9bf0a3227d490 | /students/arkadiusz_kasprzyk/lesson_01_basics/first_digit_after_decimal_point.py | e5e0c5af3647e665f9c37e9233b389e50eaacf34 | [] | no_license | jedzej/tietopythontraining-basic | e8f1ac5bee5094c608a2584ab19ba14060c36dbe | a68fa29ce11942cd7de9c6bbea08fef5541afa0f | refs/heads/master | 2021-05-11T11:10:05.110242 | 2018-08-20T12:34:55 | 2018-08-20T12:34:55 | 118,122,178 | 14 | 84 | null | 2018-08-24T15:53:04 | 2018-01-19T12:23:02 | Python | UTF-8 | Python | false | false | 502 | py | '''
title: first_digit_after_decimal_point
author: arkadiusz.kasprzyk@tieto.com
date: 2018-03-05
description:
Given a positive real number, print its first digit to the right of the decimal point.
'''
import math as m
print("Given a positive real number, prints its first digit to the right of the decimal point.")
x = float(input("Give a number: "))
f = m.floor(x)
r = m.floor((x - f)*10)
print("The first digit after decimal point is {}".format(r))
input("Press Enter to quit the program.") | [
"akasp@interia.pl"
] | akasp@interia.pl |
d9c7132ecc0054858916e28ca1197a865068eec4 | e93c0664a4ed160410266e0c337f229cced3e588 | /DIM/func/misc.py | b8f50458eb1e42b7fb780fad1a869fd14e363519 | [] | no_license | jbonato1/AML_exam | 56d63bf5d00660106774e24440307070dd2e5e54 | 3c5e15f4c6b239f2a09a7cafc440a3c013f1f789 | refs/heads/master | 2022-07-14T22:18:29.861863 | 2020-05-14T10:31:10 | 2020-05-14T10:31:10 | 259,679,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,411 | py | """
miscilaneous functions.
"""
import math
import torch
def log_sum_exp(x, axis=None):
"""Log sum exp function
Args:
x: Input.
axis: Axis over which to perform sum.
Returns:
torch.Tensor: log sum exp
"""
x_max = torch.max(x, axis)[0]
y = torch.log((torch.exp(x - x_max)).sum(axis)) + x_max
return y
def random_permute(X):
"""Randomly permutes a tensor.
Args:
X: Input tensor.
Returns:
torch.Tensor
"""
X = X.transpose(1, 2)
b = torch.rand((X.size(0), X.size(1))).cuda()
idx = b.sort(0)[1]
adx = torch.range(0, X.size(1) - 1).long()
X = X[idx, adx[None, :]].transpose(1, 2)
return X
def ms_ssim(X_a, X_b, window_size=11, size_average=True, C1=0.01**2, C2=0.03**2):
"""
Taken from Po-Hsun-Su/pytorch-ssim
"""
channel = X_a.size(1)
def gaussian(sigma=1.5):
gauss = torch.Tensor(
[math.exp(-(x - window_size // 2) **
2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window():
_1D_window = gaussian(window_size).unsqueeze(1)
_2D_window = _1D_window.mm(
_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = torch.Tensor(
_2D_window.expand(channel, 1, window_size,
window_size).contiguous())
return window.cuda()
window = create_window()
mu1 = torch.nn.functional.conv2d(X_a, window,
padding=window_size // 2, groups=channel)
mu2 = torch.nn.functional.conv2d(X_b, window,
padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = torch.nn.functional.conv2d(
X_a * X_a, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = torch.nn.functional.conv2d(
X_b * X_b, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = torch.nn.functional.conv2d(
X_a * X_b, window, padding=window_size // 2, groups=channel) - mu1_mu2
ssim_map = (((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) /
((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
| [
"jacopo.bonato@iit.it"
] | jacopo.bonato@iit.it |
2d317cdf5e42c823bdc120acb47c51b94c18b40b | fbca8558e1d733dcf9b3894e344cdfda8aa34e3f | /ponteRolante/fatorDistribuicaoCarga.py | 6db700ccbf94be9f10d3e452092e2ffa945ef8dd | [
"MIT"
] | permissive | ZaqueuCavalcante/Gears | 5d0def80e30fedb1bf1002a3ad8271004dde5355 | 8a4cf7fb4c5d145c8ae9424f9eb9608596edf15e | refs/heads/master | 2023-01-11T22:41:45.511293 | 2020-11-16T16:07:17 | 2020-11-16T16:07:17 | 291,861,711 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | from enum import Enum
class C_mc_TIPO(Enum):
dentesNaoCoroados = 1.00
dentesCoroados = 0.80
def C_pf(larguraDeFace, diametroPrimitivo):
F = larguraDeFace
d = diametroPrimitivo
if (F <= 1):
return F/(10*d) - 0.025
elif (F > 1 and F <= 17):
return F/(10*d) - 0.0375 + 0.0125*F
else:
return F/(10*d) - 0.1109 + 0.0207*F - 0.000228*(F**2)
class C_pm_TIPO(Enum):
mancaisImediatamenteAdjacentes = 1.00
mancaisNaoAdjacentes = 1.10
class C_ma_TIPO(Enum):
engrenamentoAberto = 0
fechadasComerciais = 1
fechadasDePrecisao = 2
fechadasExtraprecisas = 3
def C_ma(C_ma_TIPO, larguraDeFace):
F = larguraDeFace
if (C_ma_TIPO.value == 0):
A, B, C = 0.247, 0.0167, -0.765E-4
elif (C_ma_TIPO.value == 1):
A, B, C = 0.127, 0.0158, -0.930E-4
elif (C_ma_TIPO.value == 2):
A, B, C = 0.0675, 0.0128, -0.926E-4
elif (C_ma_TIPO.value == 3):
A, B, C = 0.00360, 0.0102, -0.822E-4
return A + B*F + C*(F**2)
class C_e_TIPO(Enum):
engrenamentoAjustado = 0.80
outrasCondicoes = 1.00
def fatorDistribuicaoCarga(C_mc, C_pf, C_pm, C_ma, C_e):
return 1 + C_mc*(C_pf*C_pm + C_ma*C_e)
| [
"zaqueudovale@gmail.com"
] | zaqueudovale@gmail.com |
f85a8541f0cd6aa396af64f52e86d47c7bff5e5e | 6d2fa5bc2e8798bf4c85d618b32282e830850c7d | /contactbook.py | 7008d2972b7386ba9f5ac63e9fc13e350cd86865 | [] | no_license | ChandraPrakashBanda/Contactbook | f09f00c24b8865cb94880bc2258200e631a6a8ff | 9cfed8c522eded2e04a74dd2d77be49189d07291 | refs/heads/master | 2022-11-30T21:08:27.747110 | 2020-08-01T10:26:36 | 2020-08-01T10:26:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,824 | py | from tkinter import *
from tkinter import Tk
#creation of main dialogue box
root=Tk()
root.title("Contacts book")
root.geometry("400x400+50+50")
readablecontacts=StringVar()
def Addcontact():
#new contacts dialogue box entries and buttons
root1=Tk()
root1.title("new contacts")
root1.geometry("200x200+20+20")
def save():
#saving in a file
file1=open("contactlist.txt","a")
file1.write("\n")
file1.writelines(new_name.get())
file1.write("\n")
file1.writelines(new_number.get())
file1.write("\n")
file1.close()
Label2=Label(root1,text="Contact saved")
Label2.pack()
#taking entries for new contact
new_name=Entry(root1)
new_name.pack()
new_number=Entry(root1)
new_number.pack()
btn3=Button(root1,text="save contact",command=save)
btn3.pack()
root1.mainloop()
#check contacts dialogue box buttons and entries
def checkcontact():
file1=open("contactlist.txt","r")
readablecontacts=file1.readlines()
file1.close()
for i in range(0,len(readablecontacts)):
if(str(query.get()+"\n")==readablecontacts[i]):
label4=Label(root,text=str(readablecontacts[i])+str(readablecontacts[i+1]))
label4.pack()
#main dialogue box buttons
btn1=Button(root,text="Add contact",command=Addcontact)
btn1.pack()
query=Entry(root)
query.pack()
btn4=Button(root,text="Show",command=checkcontact)
btn4.pack()
root.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
d2d30ed9ef98512ff7d30f5c9754fa535c698414 | c838c53ec5de94af57696f11db08f332ff2a65d8 | /mission/migrations/0121_picklist_pick_order.py | 042a0e8203c7fedb1905237711b5f99b55874583 | [] | no_license | memobijou/erpghost | 4a9af80b3c948a4d7bb20d26e5afb01b40efbab5 | c0ee90718778bc2b771b8078d9c08e038ae59284 | refs/heads/master | 2022-12-11T14:47:59.048889 | 2019-01-28T02:30:40 | 2019-01-28T02:30:40 | 113,774,918 | 1 | 1 | null | 2022-11-22T02:02:41 | 2017-12-10T18:53:41 | Python | UTF-8 | Python | false | false | 557 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-08-18 14:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mission', '0120_pickorder'),
]
operations = [
migrations.AddField(
model_name='picklist',
name='pick_order',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mission.PickOrder'),
),
]
| [
"mbijou@live.de"
] | mbijou@live.de |
4b0e4be71217535a3b023b8cd57fa3de00fa5b98 | a4440a990b86a239a30b4295661ca588db3f5928 | /src/knn/digital_recognition.py | 5411be618eb5bc4fae3f1b70b129b3cbdb7ead0f | [] | no_license | YangXinNewlife/MachineLearning | fdaa1f75b90c143165d457b645d3c13fee7ea9a1 | 196ebdc881b74c746f63768b7ba31fec65e462d5 | refs/heads/master | 2020-04-05T00:10:25.050507 | 2019-06-10T03:44:33 | 2019-06-10T03:44:33 | 156,386,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,377 | py | # -*- coding:utf-8 -*-
__author__ = 'yangxin_ryan'
from numpy import *
from os import listdir
from collections import Counter
import operator
"""
图片的输入为 32 * 32的转换为 1 * 1024的向量
"""
class DigitalRecognition(object):
def __init__(self):
print("Welcome, 手写数字识别算法!")
"""
1.距离计算
tile生成和训练样本对应的矩阵,并与训练样本求差
取平方
将矩阵的每一行相加
开方
根据距离从小到大的排序,并返回对应的索引位置
2.选择距离最小的k个值
3.排序并返回出现最多的那个类型
"""
def classify_1(self, in_x, data_set, labels, k):
data_set_size = data_set.shape[0]
diff_mat = tile(in_x, (data_set_size, 1)) - data_set
sq_diff_mat = diff_mat ** 2
sq_distances = sq_diff_mat.sum(axis=1)
distances = sq_distances ** 0.5
sorted_dist_indicies = distances.argsort()
class_count = {}
for i in range(k):
vote_i_label = labels[sorted_dist_indicies[i]]
class_count[vote_i_label] = class_count.get(vote_i_label, 0) + 1
sorted_class_count = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)
return sorted_class_count
"""
1.计算距离
2.k个最近的标签
3.出现次数最多的标签即为最终类别
"""
def classify_2(self, in_x, data_set, labels, k):
dist = np.sum((in_x - data_set) ** 2, axis=1) ** 0.5
k_labels = [labels[index] for index in dist.argsort()[0:k]]
label = Counter(k_labels).most_common(1)[0][0]
return label
def file_to_matrix(self, file_name):
fr = open(file_name)
number_of_lines = len(fr.readlines())
return_mat = zeros((number_of_lines, 3))
class_label_vector = []
fr = open(file_name)
index = 0
for line in fr.readlines():
line = line.strip()
list_from_line = line.split("\t")
return_mat[index, :] = list_from_line[0:3]
class_label_vector.append(int(list_from_line[-1]))
index += 1
return return_mat, class_label_vector
"""
将图片转换为向量
图片的输入为 32 * 32的,将图像转换为向量,该函数创建 1 * 1024 的Numpy数组
"""
def img_to_vector(self, file_name):
return_vector = zeros((1, 1024))
fr = open(file_name, 'r')
for i in range(32):
line_str = fr.readline()
for j in range(32):
return_vector[0, 32 * i + j] = int(line_str[j])
return return_vector
def run(self, train_file_path, test_file_path, k):
labels = []
training_file_list = listdir(train_file_path)
train_len = len(training_file_list)
training_mat = zeros((train_len, 1024))
for i in range(train_len):
file_name_str = training_file_list[i]
file_str = file_name_str.split(".")[0]
class_num_str = int(file_str.split("_")[0])
labels.append(class_num_str)
img_file = train_file_path + file_name_str
print(img_file)
training_mat[i] = self.img_to_vector(img_file)
test_file_list = listdir(test_file_path)
error_count = 0.0
test_len = len(test_file_list)
for i in range(test_len):
file_name_str = test_file_list[i]
file_str = file_name_str.split(".")[0]
class_num_str = int(file_str.split("_")[0])
test_file_img = test_file_path + file_name_str
vector_under_test = self.img_to_vector(test_file_img)
classifier_result = self.classify_1(vector_under_test, training_mat, labels, k)
if classifier_result != class_num_str:
print(file_name_str)
error_count += 1.0
print("\nthe total number of errors is: %d" % error_count)
print("\nthe total error rate is: %f" % (error_count / float(test_len)))
if __name__ == '__main__':
digital_recognition = DigitalRecognition()
digital_recognition.run("/Users/yangxin_ryan/PycharmProjects/MachineLearning/data/knn/trainingDigits/",
"/Users/yangxin_ryan/PycharmProjects/MachineLearning/data/knn/testDigits/",
6) | [
"yangxin03@youxin.com"
] | yangxin03@youxin.com |
dafd07934584a8b0a01a6f539c70656fbaa353a6 | 719f5ba07da760b300a4199d351d2699064676dc | /CaesarCipher/CaesarDecrypt/CBD.py | e1cf33a2bf616862a7d18db7fa617e60533847d8 | [
"MIT"
] | permissive | Bedrock02/General-Coding | 74a3aafee06f076830bcee7ed9f4f59f4349d362 | 803ac758bd0d23f8eefdb84045f219aba6f4936b | refs/heads/master | 2021-03-12T23:55:31.080105 | 2014-03-01T04:50:34 | 2014-03-01T04:50:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from BinarytoCharacter import BinaryToChar
from CaesarCharShiftDecoder import CaesarCharShiftDecoder
def CBD():
cipherText = raw_input("Enter the encoded binary sentance. Binary numbers should be separated with a space ")
cipherKey = int(raw_input("What is the key?"))
cipherText2 = BinaryToChar(cipherText)
for i in cipherText2:
print CaesarCharShiftDecoder(i,cipherKey),
if __name__ == '__main__':
CBD() | [
"JimSteve91@gmal.com"
] | JimSteve91@gmal.com |
180c01d24bedbf9795f408f3f46de3e9ca5b44e8 | bd30f4127305915a6ea5cd8ef7ccca155f3c5b87 | /stock/quotes/urls.py | 2c559b3529b501a734310738a853cf8eb39368f2 | [] | no_license | rajathn8/IEX_Cloud_Django | 0cb06a1b21a3a3e80d9f897acd1c06549c01b61f | b21d23ef196d27d93cd0f82192c026b4945a6ea6 | refs/heads/master | 2021-05-19T14:09:42.310302 | 2020-03-31T21:40:55 | 2020-03-31T21:40:55 | 251,745,986 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # create this for every app you create
from django.urls import path , include
from . import views
urlpatterns = [
path('',views.home,name='home'),
path('about.html',views.about,name='about'),
path('add_stock.html',views.add_stock,name='add_stock'),
path('delete/<stock_id>',views.delete,name='delete'),
path('delete_stock.html>',views.delete_stock,name='delete_stock'),
]
# url patters is a list | [
"rajath@nextorbite.com"
] | rajath@nextorbite.com |
89fbe9abb7d15ea2b5ba64aa8969ea42145c8590 | 4ac0c342a883ea2aecf2b70a401f724ec639977f | /userserver/setup.py | d4a3bf820f9ee31b0661d6a3c835f616f40c767d | [
"MIT"
] | permissive | nh-99/UMaine-Service-API | 3dbdfb383e6042eff811747f13d0ef5cac4cba38 | 7f231d69a9bb3242cc3f0bf839014de4b477e824 | refs/heads/master | 2020-04-06T06:59:12.018275 | 2016-08-28T15:02:28 | 2016-08-28T15:02:28 | 65,781,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from setuptools import find_packages, setup
setup(name="umaineapi_userserver",
version = "0.1",
description = "A service for storing authentication keys for all your services",
author = "Noah Howard",
platforms = ["any"],
license = "BSD",
packages = find_packages(),
install_requires = ["flask", "requests", "flask-mysqldb", "flask-sqlalchemy", "flask-cors" ],
)
| [
"ifence9@gmail.com"
] | ifence9@gmail.com |
ab81d46cb9f8931d361ef27504b8ed0ea3f38f14 | 4e2eca922ce7dfecce13887d3a949bfb379fb14f | /Router/Router_interface.py | af5314d987bb30abaaf3cd0a00a547c94d7a2ad2 | [] | no_license | ZYJ-33/Python-web | e580ac448d9ee9cbec31fef98c18f4a10dde232d | f92a7ee4dd932159b418e9614606bd9b409a7623 | refs/heads/master | 2020-05-03T16:18:22.650764 | 2019-03-31T18:14:30 | 2019-03-31T18:14:30 | 178,720,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | from Router.User import login
from Router.User import register
from Router.Router_basic_func import get_index
from Router.TW import TW_index
from Router import check_login2
from Router.TW import TW_add
from Router.TW import TW_all
from Router.TW import TW_del
from Router.TW import TW_edit
from Router.Comment import TW_comment
from Router.Comment import TW_comment_all
from Router.Comment import TW_comment_add
from Router.Comment import TW_comment_del
Router_dict = {
'/login': login,
'/register': register,
'/': get_index,
'/TW': check_login2(TW_index),
'/TW/add': check_login2(TW_add),
'/TW/all': check_login2(TW_all),
'/TW/del': check_login2(TW_del),
'/TW/edit': check_login2(TW_edit),
'/TW/comment': check_login2(TW_comment),
'/TW/Comment/all':check_login2(TW_comment_all),
'/TW/comment/add':check_login2(TW_comment_add),
'/TW/comment/del':check_login2(TW_comment_del),
}
| [
"noreply@github.com"
] | noreply@github.com |
a6ee9f96f7e5f35168645f57fbe94fa762dba675 | 28ef27ad66a974fe288ea821d76514c8ea14294f | /experiments/num_rules/fancy.py | d1b0e80d4a7254e339c684d85289668397a6dd12 | [] | no_license | csmith49/godel | 3f34b7dc0b13e6d1098f42f362afe19d4053ed7a | 11bbb2873f9d85ef53918bab51e5c85e8d7afea9 | refs/heads/master | 2021-01-19T01:16:42.800605 | 2018-11-30T03:53:09 | 2018-11-30T03:53:09 | 87,236,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | # sadly, requires python2.7 (or just not python3)
from __future__ import division
import seaborn as sns
import numpy as np
from argparse import ArgumentParser
import csv
# eh, arguments and stuff
parser = ArgumentParser("Graph generation for CSV files.")
parser.add_argument("-b", "--benchmark")
parser.add_argument("-t", "--topdown", default=False, action="store_true")
parser.add_argument("-o", "--output", default="")
args = parser.parse_args()
# load up the file
def load_data(filename):
data = []
with open(filename, "r") as f:
reader = csv.reader(f, delimiter="\t")
for row in reader:
data.append(map(float, row))
return list(map(list, zip(*data)))
# figure out what we're loading
if args.topdown:
strat = "td"
else:
strat = "bu"
bu = load_data("data/{}_{}.csv".format(args.benchmark, strat))
bus = load_data("data/{}_{}_old.csv".format(args.benchmark, strat))
# set up some formatting
rc={'axes.labelsize': 28, 'legend.fontsize': 24, 'axes.titlesize': 36, 'xtick.labelsize': 20, 'ytick.labelsize': 20}
sns.set(style="white", rc=rc)
# and plot the data
bu_ax = sns.tsplot(bu, color="r" )
bus_ax = sns.tsplot(bus, ax=bu_ax)
# now set the ticks
n = len(bu_ax.xaxis.get_ticklabels())
bu_ax.set_xticklabels(list(map(int, np.linspace(0, 150, n))))
# and the labels
bu_ax.set_xlabel("Number of rules")
bu_ax.set_ylabel("Time (seconds)")
# set upt he legend
sns.plt.legend([bu_ax, bus_ax], labels=["d-tree", "list"])
sns.despine()
if args.output != "":
sns.plt.savefig(args.output)
else:
sns.plt.show()
| [
"cjsmith@cs.wisc.edu"
] | cjsmith@cs.wisc.edu |
d6695294bf174f800a81fecb2c4d9d46e5e6c956 | d94136866c45de06b8b67f10a5f989182256289f | /utils/keywords.py | 16b9b3853b218c86ccf6afc43540d67f396b5e6f | [
"MIT"
] | permissive | ShaderOX/tweet-related-articles | db3e601e317e1bc606a894d6381ed05753bf4a6e | 06579dab516d419662cc0b979e7a8108a7cef06c | refs/heads/main | 2023-01-24T15:42:07.015251 | 2020-12-09T04:47:35 | 2020-12-09T04:47:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | from rake_nltk import Rake
from typing import List
def extract(text: str, num_phrases: int = 3) -> List[str]:
extractor = Rake()
extractor.extract_keywords_from_text(text)
phrases = extractor.get_ranked_phrases()[:num_phrases]
return phrases
| [
"saad.521509@gmail.com"
] | saad.521509@gmail.com |
4783e674493d75dfdd59a6f0cfbc585001f25b5f | 693f12aec010f7080faa34f1f310039386895c59 | /all/chapter_03/Number_13.py | 55f9a9c42b49975d1f5e71e4be0febb54b507aed | [
"MIT"
] | permissive | sts-sadr/cvhub | 281dbe4979adb824392358ae5369da3f6bb9207c | 8d986c8efacc634848c83d7d4cb14e8ab7f120d6 | refs/heads/main | 2023-04-22T16:21:26.573772 | 2021-05-20T04:20:09 | 2021-05-20T04:20:09 | 371,353,277 | 1 | 0 | MIT | 2021-05-27T11:43:10 | 2021-05-27T11:43:09 | null | UTF-8 | Python | false | false | 1,425 | py | """
Noise reduction using smoothing and blurring
Bilateral blurring:
The previous three blurring techniques yield blurred images with the side effect that we
lose the edges in the image. To blur an image while preserving the edges, we use bilateral
blurring, which is an enhancement over Gaussian blurring. Bilateral blurring takes two
Gaussian distributions to perform the computation. The first Gaussian function considers
the spatial neighbors (pixels in x and y space that are close together). The second Gaussian
function considers the pixel intensity of the neighboring pixels. This makes sure that only
those pixels that are of similar intensity to the central pixel are considered for blurring,
leaving the edges intact as the edges tend to have higher intensity compared to other pixels.
"""
from cv2 import imread, imshow, waitKey, bilateralFilter
# Bilateral blurring
# Loading the noisy image
noisy_image = imread("images/salt_and_pepper_noise.jpg")
imshow("Noisy Image", noisy_image)
# Performing bilateral filter
# Bilateral blurring with diameter 5
filtered_image_5 = bilateralFilter(noisy_image, 5, 150, 50)
imshow("Blurred Image with diameter 5", filtered_image_5)
# Bilateral blurring with diameter 7
filtered_image_7 = bilateralFilter(noisy_image, 7, 160, 60)
imshow("Blurred Image with diameter 7", filtered_image_7)
waitKey(0)
| [
"EverLookNeverSee@ProtonMail.ch"
] | EverLookNeverSee@ProtonMail.ch |
b182c99ff388cf77aaac8b8dc748cb729a7f377b | 561937ef8f04f58ff15abd86223da01a70f0ddc9 | /extra_classe/forms.py | a61b052932ef2fdafc689081cedc031906c0912d | [] | no_license | dem19/projeto_final | 854d5626cd59639070e496a9a5dad6856bfd8cad | ea681e2776d2dd9f4fb677b5d9eb7efed5a50d8c | refs/heads/master | 2023-04-30T11:53:32.572369 | 2019-07-04T00:52:05 | 2019-07-04T00:52:05 | 194,949,684 | 0 | 0 | null | 2023-04-21T20:34:43 | 2019-07-02T23:51:52 | Python | UTF-8 | Python | false | false | 610 | py | from django.forms import ModelForm
from .models import *
class Horariodisponivel(ModelForm):
class Meta:
model = Professor
fields = ['nome_professor','disciplina','data', 'hora']
class Horariomarcado(ModelForm):
class Meta:
model = Atendimento
fields = ['nome_professor','disciplina','data', 'hora']
class Alunofazercomentario(ModelForm):
class Meta:
model = Comentario
fields = ['nome_aluno','disciplina','duvidas']
class Alunodisponivelizar(ModelForm):
class Meta:
model = Aluno
fields = ['disciplina','nome','Confirmar']
| [
"dem19vieira@gmail.com"
] | dem19vieira@gmail.com |
ea8884ee217bc2ebacf21f538a754ef50ed61ba6 | b5d9ce70b77672f497bd1f474f3c01230931169f | /econobilidade/econobilidade/wsgi.py | 8e01bd3c1ce6d7414c64aed22433a5202ede9230 | [] | no_license | chemalle/fisconobilidade | a59ef6544f97195da637ba5ad458cf834e3e6030 | 2b9fdd000ce83e7e9927e85895b21d57c644af35 | refs/heads/master | 2021-07-02T17:19:54.864048 | 2017-09-21T20:03:08 | 2017-09-21T20:03:08 | 104,392,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | """
WSGI config for econobilidade project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "econobilidade.settings")
application = get_wsgi_application()
| [
"chemalle@econobilidade.com"
] | chemalle@econobilidade.com |
22fda8f90dfb1891959aef2b078086837855cbe6 | c1532f7e7ebb9f739144e75a92118c234008e74e | /utils/asgi.py | 50e0875055350047d50ea422999ae1e537f30808 | [] | no_license | GitJulie/ProjectSpeech | f6955b7d5eab93f8ca45b143865455748319970e | fd8ce2a0dddc5ff6d73f4bdd83a10a37aae2d15f | refs/heads/master | 2022-12-29T13:22:53.941742 | 2020-10-08T08:56:11 | 2020-10-08T08:56:11 | 273,981,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
ASGI config for utils project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'utils.settings')
application = get_asgi_application()
| [
"julie.montagne@esme.fr"
] | julie.montagne@esme.fr |
ae0b5e5cbc1a6dd99ee3a76b11137d30575e083a | 4ede26255cf96083499db696869aa1a9b5412bfb | /lesson4_step8.py | c429360a3c190ba5acee45d96886f35619df7faa | [] | no_license | CaerDarrow/stepik---auto-tests-course | 813caaf3f87f8424094c1a14ffbabce74912552d | 88fe13838cef57199e909555ae154f98584edd04 | refs/heads/master | 2020-07-20T00:12:34.421396 | 2019-09-05T10:41:02 | 2019-09-05T10:41:02 | 206,537,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
import time
import math
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
link = 'http://suninjuly.github.io/explicit_wait2.html'
try:
browser = webdriver.Chrome()
browser.get(link)
WebDriverWait(browser, 13).until(
EC.text_to_be_present_in_element((By.ID, "price"), '100')
)
browser.find_element_by_id('book').click()
x = browser.find_element_by_id('input_value').text
browser.find_element_by_id('answer').send_keys(calc(x))
browser.find_element_by_id('solve').click()
finally:
time.sleep(100)
browser.quit() | [
"ajon-hol@il-h3.21-school.ru"
] | ajon-hol@il-h3.21-school.ru |
c6beb75082885391bc95b1891a36e80e937a4666 | 642151dff23fff48310139ddc9b89c8bf6a670e3 | /app/base/routes.py | 14e706ea47230919be667a314346e7ccf0af73e5 | [] | no_license | gemicn/flask-navigation | 7ad371e0ac8220c14687f02b130707bf89c81553 | 382940492ca6cae41da44d30fb78c9535e8de955 | refs/heads/master | 2022-09-03T22:51:41.920901 | 2020-05-26T01:37:26 | 2020-05-26T01:37:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | from bcrypt import checkpw
from flask import jsonify, render_template, redirect, request, url_for
from app.base import blueprint
@blueprint.route('/')
def route_default():
return redirect(url_for('nav_blueprint.index'))
| [
"ntuwang@126.com"
] | ntuwang@126.com |
39119f47e5635e12337d7a2f14b7994e7b2e448f | 5a6724ac8086ad79ae742f3c138adfa0e924fb24 | /rallytap/apps/auth/migrations/0049_auto_20151204_1804.py | bec0208340a424ccc85e5f526bc25682c91dfbbf | [] | no_license | mkolodny/down-server | 2295b6fdf3b6d254dadf39866703e92e0deba7c3 | 76ec18ba478b02b5abdce48e8c1c1e24b3309ae5 | refs/heads/master | 2020-04-09T14:18:08.781862 | 2015-12-15T03:22:11 | 2015-12-15T03:22:11 | 31,437,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('down_auth', '0048_user_last_posted'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='last_posted',
new_name='last_post_notification',
),
]
| [
"michaelckolodny@gmail.com"
] | michaelckolodny@gmail.com |
d5764cbe2038ace864d954163ed2945eebe38507 | b20c902316ea121cdc8b98ff40c8dfae1d5ef3db | /models/engine/db_storage.py | d5f0797e54818127b0ef455d0ea35213a90ec948 | [] | no_license | danielchk/MyAir | f9b8b909d15bbd9075c7b9e124a814fa4022a76f | 02133f367b297c3999aee55ec9875b4609db6f7f | refs/heads/master | 2023-02-22T03:40:15.242585 | 2021-01-21T17:09:35 | 2021-01-21T17:09:35 | 321,668,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | #!/usr/bin/python3
"""
Contains the class DBStorage
"""
import models
from models.amenity import Amenity
from models.base_model import BaseModel, Base
from models.city import City
from models.place import Place
from models.review import Review
from models.state import State
from models.user import User
from os import getenv
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
classes = {"Amenity": Amenity, "City": City,
"Place": Place, "Review": Review, "State": State, "User": User}
class DBStorage:
"""interaacts with the MySQL database"""
__engine = None
__session = None
def __init__(self):
"""Instantiate a DBStorage object"""
HBNB_MYSQL_USER = getenv('HBNB_MYSQL_USER')
HBNB_MYSQL_PWD = getenv('HBNB_MYSQL_PWD')
HBNB_MYSQL_HOST = getenv('HBNB_MYSQL_HOST')
HBNB_MYSQL_DB = getenv('HBNB_MYSQL_DB')
HBNB_ENV = getenv('HBNB_ENV')
self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.
format(HBNB_MYSQL_USER,
HBNB_MYSQL_PWD,
HBNB_MYSQL_HOST,
HBNB_MYSQL_DB))
if HBNB_ENV == "test":
Base.metadata.drop_all(self.__engine)
def all(self, cls=None):
"""query on the current database session"""
new_dict = {}
for clss in classes:
if cls is None or cls is classes[clss] or cls is clss:
objs = self.__session.query(classes[clss]).all()
for obj in objs:
key = obj.__class__.__name__ + '.' + obj.id
new_dict[key] = obj
return (new_dict)
def new(self, obj):
"""add the object to the current database session"""
self.__session.add(obj)
def save(self):
"""commit all changes of the current database session"""
self.__session.commit()
def delete(self, obj=None):
"""delete from the current database session obj if not None"""
if obj is not None:
self.__session.delete(obj)
def reload(self):
"""reloads data from the database"""
Base.metadata.create_all(self.__engine)
sess_factory = sessionmaker(bind=self.__engine, expire_on_commit=False)
Session = scoped_session(sess_factory)
self.__session = Session
def close(self):
"""call remove() method on the private session attribute"""
self.__session.remove() | [
"hollowdaniel1@gmail.com"
] | hollowdaniel1@gmail.com |
22673e5c1820b8646b55bf630652d58b49177ef8 | bbf025a5f8596e5513bd723dc78aa36c46e2c51b | /dfs + tree/100 sameTree.py | 41ecd7912ef4878879d74277903c001435c1f6a2 | [] | no_license | AlanFermat/leetcode | 6209bb5cf2d1b19e3fe7b619e1230f75bb0152ab | cacba4abaca9c4bad8e8d12526336115067dc6a0 | refs/heads/master | 2021-07-11T04:00:00.594820 | 2020-06-22T21:31:02 | 2020-06-22T21:31:02 | 142,341,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | from binaryTree import Node
t1 = Node(1)
t1.left = Node(2)
t1.right = Node(3)
t2 = Node(1)
t2.left = Node(2)
t2.right = Node(3)
def treeToList(t):
if t == None:
return []
result = [t.val]
return result + treeToList(t.left) + treeToList(t.right)
def isSameTree(t,s):
t_list = treeToList(t)
s_list = treeToList(s)
return s_list == t_list
def isSame(t,s):
if not t and not s:
return True
if t and s:
if t.val == s.val:
return isSame(t.left, s.left) and isSame(t.right, s.right)
return False
print (isSame(t1,t2)) | [
"zy19@rice.edu"
] | zy19@rice.edu |
6f56337036667d28f57e911f1ecefc481266020d | 3730c2adf5e71eb356e7573973725e427a5ca295 | /controle_financeiro/views/permissaoView.py | 3cfbca2136d6f400ba76b301645972f8a7823dcb | [] | no_license | MatheusCustodio/controle_finnceiro_TCOI5 | 1b32824712b36b1c4269bc5261cc05d38e19a404 | 41890cc0671e2be12be4b90dce28b050f707d0d7 | refs/heads/master | 2020-05-23T09:46:13.081494 | 2019-07-03T19:21:49 | 2019-07-03T19:21:49 | 186,712,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | from django.shortcuts import render, redirect, get_object_or_404
from ..forms import PermissaoForm
from ..models import PermissaoModel
def listar(request):
form = PermissaoModel.objects.all()
context = {
'form':form
}
return render(request, 'permissao/listar.html', context)
def inserir(request):
form = PermissaoForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('listar_permissao')
context = {
'form':form
}
return render(request, 'permissao/novo.html', context)
def alterar(request,id):
grupo = get_object_or_404(PermissaoModel, grupo_id=id)
form = PermissaoForm(request.POST or None, instance=grupo)
if form.is_valid():
form.save()
return redirect('listar_permissao')
context = {
'form':form
}
return render(request, 'permissao/novo.html', context)
def remover(request,id):
grupo = get_object_or_404(PermissaoModel, grupo_id = id)
form = PermissaoForm(request.POST or None, instance=grupo)
if request.method == 'POST':
grupo.delete()
return redirect('listar_permissao')
context={
'form':form
}
return render(request, 'permissao/novo.html', context)
def exibir(request, id):
grupo = get_object_or_404(PermissaoModel, grupo_id=id)
form = PermissaoForm(request.POST or None, instance=grupo)
if request.method == 'POST':
return redirect('listar_permissao')
context={
'form':form
}
return render(request, 'permissao/novo.html', context)
| [
"matheus.custodio@oriongestao.com.br"
] | matheus.custodio@oriongestao.com.br |
3c2812664cb6e123f13dceb9f351e3e1875a9686 | 3072ff5f3396354b271da510f0aaadbe5d23f166 | /scripts/sklearn_training_GBT.py | 4d1c6a09ad236c9e0a5fa70d3eeaae4f84e06d35 | [] | no_license | kderoove/CSVv2ScikitLearn | 71156701f6490aa02f376e1687db6d86a30acf89 | 9ac7cbdfb4905b58e496f94116a88ee5c5b7eaba | refs/heads/master | 2021-01-10T08:53:02.675095 | 2016-11-30T13:28:38 | 2016-11-30T13:28:38 | 50,844,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,826 | py | import os
import re
import rootpy.io as io
import rootpy
import rootpy.tree as tr
from ROOT import TH1D
import numpy as np
np.set_printoptions(precision=5)
import root_numpy as rootnp
from sklearn.externals import joblib
log = rootpy.log["/toy_diagnostics"]
log.setLevel(rootpy.log.INFO)
#################################
# #
# Training #
# #
#################################
variables = [
'Jet_pt',
'Jet_eta',
'TagVarCSV_jetNTracks',
'TagVarCSV_trackSip3dSig_0',
'TagVarCSV_trackSip3dSig_1',
'TagVarCSV_trackSip3dSig_2',
'TagVarCSV_trackSip3dSig_3',
'TagVarCSV_trackSip3dSigAboveCharm',
'TagVarCSV_trackPtRel_0',
'TagVarCSV_trackPtRel_1',
'TagVarCSV_trackPtRel_2',
'TagVarCSV_trackPtRel_3',
'TagVarCSV_trackEtaRel_0',
'TagVarCSV_trackEtaRel_1',
'TagVarCSV_trackEtaRel_2',
'TagVarCSV_trackEtaRel_3',
'TagVarCSV_trackDeltaR_0',
'TagVarCSV_trackDeltaR_1',
'TagVarCSV_trackDeltaR_2',
'TagVarCSV_trackDeltaR_3',
'TagVarCSV_trackPtRatio_0',
'TagVarCSV_trackPtRatio_1',
'TagVarCSV_trackPtRatio_2',
'TagVarCSV_trackPtRatio_3',
'TagVarCSV_trackJetDist_0',
'TagVarCSV_trackJetDist_1',
'TagVarCSV_trackJetDist_2',
'TagVarCSV_trackJetDist_3',
'TagVarCSV_trackDecayLenVal_0',
'TagVarCSV_trackDecayLenVal_1',
'TagVarCSV_trackDecayLenVal_2',
'TagVarCSV_trackDecayLenVal_3',
'TagVarCSV_trackSumJetEtRatio',
'TagVarCSV_trackSumJetDeltaR',
'TagVarCSV_vertexMass',
'TagVarCSV_vertexNTracks',
'TagVarCSV_vertexEnergyRatio',
'TagVarCSV_vertexJetDeltaR',
'TagVarCSV_flightDistance2dSig',
'TagVarCSV_jetNSecondaryVertices',
]
input_files = [i.strip() for i in open('../data_trees/inputs/qcd_flat.list')] #Make sure there are no empty lines in .list
flavors = ['B', 'C', 'DUSG']
sv_categories = ["RecoVertex", "PseudoVertex", "NoVertex"]
fname_regex = re.compile('[a-zA-Z_0-9\/]*\/?[a-zA-Z_0-9]+_(?P<category>[a-zA-Z]+)_(?P<flavor>[A-Z]+)\.root')
print 'Merging and converting the samples'
X = np.ndarray((0,len(variables)),float) # container to hold the combined trees in numpy array structure
y = np.ones(0) # container to hold the truth signal(1) or background(0) information
weights_flavour = np.ones(0) # container to hold the truth signal(1) or background(0) information
weights_JetPtEta = np.ones(0) # container holding the weights for each of the jets
weights_cat = np.ones(0) # container holding the weights for each of the jets
weights = np.ones(0) # container holding the weights for each of the jets
for fname in input_files:
log.info('processing file %s for training' % fname)
with io.root_open(fname) as tfile:
match = fname_regex.match(fname)
if not match:
raise ValueError("Could not match the regex to the file %s" % fname)
flavor = match.group('flavor')
full_category = match.group('category')
category = [i for i in sv_categories if i in full_category][0]
# if flavor == 'C':
# log.info('Jet_flavour %s is not considered signal or background in this training and is omitted' % flavor)
# continue
nfiles_per_sample = None
skip_n_events = 2 # put this to 1 to include all the events
tree = rootnp.root2array(fname,'ttree',variables,None,0,nfiles_per_sample,skip_n_events,False,'weight')
tree = rootnp.rec2array(tree)
X = np.concatenate((X, tree),0)
if flavor == "B":
y = np.concatenate((y,np.ones(tree.shape[0])))
weight_B = np.empty(tree.shape[0])
weight_B.fill(2)
weights_flavour = np.concatenate((weights_flavour,weight_B))
elif flavor == "C":
y = np.concatenate((y,np.zeros(tree.shape[0])))
weight_C = np.empty(tree.shape[0])
weight_C.fill(1)
weights_flavour = np.concatenate((weights_flavour,weight_C))
else:
y = np.concatenate((y,np.zeros(tree.shape[0])))
weight_DUSG = np.empty(tree.shape[0])
weight_DUSG.fill(3)
weights_flavour = np.concatenate((weights_flavour,weight_DUSG))
# Getting the weights out
weights_tree_JetPtEta = rootnp.root2array(fname,'ttree','weight_etaPt',None,0,nfiles_per_sample,skip_n_events,False,'weight')
weights_JetPtEta = np.concatenate((weights_JetPtEta,weights_tree_JetPtEta),0)#Weights according to the JetPtEta
weights_tree_cat = rootnp.root2array(fname,'ttree','weight_category',None,0,nfiles_per_sample,skip_n_events,False,'weight')
weights_cat = np.concatenate((weights_cat,weights_tree_cat),0)#Weights according to the category
weights = np.multiply(weights_JetPtEta,weights_cat,weights_flavour)
print 'Starting training'
import time
#from sklearn.ensemble import RandomForestClassifier
min_frac_samples_split = 0.006
#clf = RandomForestClassifier(n_estimators=1000,min_samples_split = int(min_frac_samples_split*y.shape[0]), n_jobs = 5, verbose = 3)
from sklearn.ensemble import GradientBoostingClassifier
#clf = GradientBoostingClassifier(n_estimators=500, max_depth=15, min_samples_split=0.006*len(X), learning_rate=0.05)
clf = GradientBoostingClassifier(n_estimators=500,min_samples_split = int(min_frac_samples_split*y.shape[0]), learning_rate = 0.05, max_depth=15, verbose = 3)
start = time.time()
clf.fit(X, y,weights)
end = time.time()
print 'training completed --> Elapsed time: ' , (end-start)/60 , 'minutes'
#training_file = './trainingFiles/MVATraining.pkl'
#print 'Dumping training file in: ' + training_file
#joblib.dump(clf, training_file,protocol = HIGHEST_PROTOCOL)
#######################################
## Converting to TMVA readable xml file
#######################################
import sklearn_to_tmva as convert
trainingWeights_TMVA = 'TMVAClassification_BDTG.weights.xml'
log.info('Dumping training file in: ' + trainingWeights_TMVA)
# *** Sklearn(python)-type training file (.pkl) ***
#joblib.dump(clf, trainingWeights_TMVA, compress=True)
# *** TMVA-style training file (.xml) ***
out_ext = (trainingWeights_TMVA).split('.')[-1]
convert.gbr_to_tmva(clf,X,trainingWeights_TMVA,mva_name = "BDTG",coef = 10, var_names = variables)
#################################
# #
# Validation #
# #
#################################
#input_files = [i.strip() for i in open('data_trees/inputs/ttjets.list')]
input_files = [i.strip() for i in open('../data_trees/inputs/ttjets.list')] #Make sure there are no empty lines in .list
pt_bins = [15, 40, 60, 90, 150, 400, 600]
eta_bins = [1.2, 2.1]
#flavors = ['C', 'B', 'DUSG']
#sv_categories = ["NoVertex", "PseudoVertex", "RecoVertex"]
fname_regex = re.compile('[a-zA-Z_0-9\/]*\/?[a-zA-Z_0-9]+_(?P<category>[a-zA-Z]+)_(?P<flavor>[A-Z]+)\.root')
# you can reload the training if needed (or if you only want to do a validation on an existing training)
# but it is much faster to use the still existing classifier from the training
'''
#training_file = './training_file/MVATraining.pkl'
print 'Loading training file from: ' + training_file
clf_val = joblib.load(training_file)
'''
clf_val = clf
for fname in input_files:
log.info('processing file %s' % fname)
with io.root_open(fname) as tfile:
match = fname_regex.match(fname)
if not match:
raise ValueError("Could not match the regex to the file %s" % fname)
flavor = match.group('flavor')
full_category = match.group('category')
category = [i for i in sv_categories if i in full_category][0]
nfiles_per_sample = None
skip_n_events = 50 # put this to 1 to include all the events
X_val = rootnp.root2array(fname,'ttree',variables,None,0,nfiles_per_sample,skip_n_events,False,'weight')
X_val = rootnp.rec2array(X_val)
BDTG = clf_val.predict_proba(X_val)[:,1]
Output_variables = ['Jet_flavour','TagVarCSV_vertexCategory','Jet_pt','Jet_eta','Jet_CSVIVF']
Output_tree = rootnp.root2array(fname,'ttree',Output_variables,None,0,nfiles_per_sample,skip_n_events,False,'weight')
Output_tree = rootnp.rec2array(Output_tree)
Output_tree_final = np.ndarray((Output_tree.shape[0],),dtype=[('Jet_flavour', float), ('TagVarCSV_vertexCategory', float), ('Jet_pt', float), ('Jet_eta', float),('Jet_CSVIVF', float), ('BDTG', float)])#, buffer = np.array([1,2,3,4,5]))
for idx,val in enumerate(BDTG):
Output_tree_final[idx][0] = Output_tree[idx][0]
Output_tree_final[idx][1] = Output_tree[idx][1]
Output_tree_final[idx][2] = Output_tree[idx][2]
Output_tree_final[idx][3] = Output_tree[idx][3]
Output_tree_final[idx][4] = Output_tree[idx][4]
Output_tree_final[idx][5] = BDTG[idx]
Output_tree_final = Output_tree_final.view(np.recarray)
tree = rootnp.array2root(Output_tree_final, 'trainPlusBDTG_CombinedSV'+category+'_'+flavor+'.root', 'ttree','recreate')
log.info('Output file dumped in trainPlusBDTG_CombinedSV'+category+'_'+flavor+'.root')
log.info('done')
| [
"kederoov@gmail.com"
] | kederoov@gmail.com |
336c31fca80c5b1edd2c0ae1909608af55e7d349 | 61d22eef5483a046b418a295d2ffa22857a296e1 | /swtest/1952.py | 0a3cb502106980c8bed236da21232f446e6783fd | [] | no_license | seoul-ssafy-class-2-studyclub/hyeonhwa | 7ad680a67ba253eece07a9605a3b983f98a8cca3 | e51163b3135cf529d295bc0d527c98b642f8c367 | refs/heads/master | 2021-10-06T12:57:44.046963 | 2021-10-02T09:42:55 | 2021-10-02T09:42:55 | 198,594,633 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | import sys
sys.stdin = open('input4.txt', 'r')
T = int(input())
for t in range(T):
money = list(map(int, input().split()))
plan = list(map(int, input().split()))
dp = [0] * 12
dp[0] = min(money[0]*plan[0], money[1])
for i in range(1, 12):
dp[i] = min(dp[i-1]+money[0]*plan[i], dp[i-1]+money[1])
if i >= 2:
dp[i] = min(dp[i-3]+money[2], dp[i])
res = min(dp[11], money[3])
print('#{} {}'.format(t+1, res))
| [
"h3652k@gmail.com"
] | h3652k@gmail.com |
3e27b2ce3b5605cf170794331455238c72a87b29 | aaeb181508556473d16988f1650c3a81163907c5 | /organizer/crop.py | 8fbbd0cbbd75c124e8cfe9b090798342bade635d | [] | no_license | johnfoundations/energyplus-frontend | cd6ebb9a05461db265366fd21ba1704a31b8dd52 | 0f419bfa43fc2379a9f70a4c92e2545c8d245f7b | refs/heads/master | 2021-01-10T09:55:54.066968 | 2010-07-10T15:54:14 | 2010-07-10T15:54:14 | 47,439,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,635 | py | # -*- coding: utf-8 -*-
import Image, ImageChops
def autoCrop(image,backgroundColor=None):
'''Intelligent automatic image cropping.
This functions removes the usless "white" space around an image.
If the image has an alpha (tranparency) channel, it will be used
to choose what to crop.
Otherwise, this function will try to find the most popular color
on the edges of the image and consider this color "whitespace".
(You can override this color with the backgroundColor parameter)
Input:
image (a PIL Image object): The image to crop.
backgroundColor (3 integers tuple): eg. (0,0,255)
The color to consider "background to crop".
If the image is transparent, this parameters will be ignored.
If the image is not transparent and this parameter is not
provided, it will be automatically calculated.
Output:
a PIL Image object : The cropped image.
'''
def mostPopularEdgeColor(image):
''' Compute who's the most popular color on the edges of an image.
(left,right,top,bottom)
Input:
image: a PIL Image object
Ouput:
The most popular color (A tuple of integers (R,G,B))
'''
im = image
if im.mode != 'RGB':
im = image.convert("RGB")
# Get pixels from the edges of the image:
width,height = im.size
left = im.crop((0,1,1,height-1))
right = im.crop((width-1,1,width,height-1))
top = im.crop((0,0,width,1))
bottom = im.crop((0,height-1,width,height))
pixels = left.tostring() + right.tostring() + top.tostring() + bottom.tostring()
# Compute who's the most popular RGB triplet
counts = {}
for i in range(0,len(pixels),3):
RGB = pixels[i]+pixels[i+1]+pixels[i+2]
if RGB in counts:
counts[RGB] += 1
else:
counts[RGB] = 1
# Get the colour which is the most popular:
mostPopularColor = sorted([(count,rgba) for (rgba,count) in counts.items()],reverse=True)[0][1]
return ord(mostPopularColor[0]),ord(mostPopularColor[1]),ord(mostPopularColor[2])
bbox = None
# If the image has an alpha (tranparency) layer, we use it to crop the image.
# Otherwise, we look at the pixels around the image (top, left, bottom and right)
# and use the most used color as the color to crop.
# --- For transparent images -----------------------------------------------
if 'A' in image.getbands(): # If the image has a transparency layer, use it.
# This works for all modes which have transparency layer
bbox = image.split()[list(image.getbands()).index('A')].getbbox()
# --- For non-transparent images -------------------------------------------
elif image.mode=='RGB':
if not backgroundColor:
backgroundColor = mostPopularEdgeColor(image)
# Crop a non-transparent image.
# .getbbox() always crops the black color.
# So we need to substract the "background" color from our image.
bg = Image.new("RGB", image.size, backgroundColor)
diff = ImageChops.difference(image, bg) # Substract background color from image
bbox = diff.getbbox() # Try to find the real bounding box of the image.
else:
raise NotImplementedError, "Sorry, this function is not implemented yet for images in mode '%s'." % image.mode
if bbox:
image = image.crop(bbox)
return image
| [
"derekkite@d95564f8-3603-11de-82fb-7f665ef830f7"
] | derekkite@d95564f8-3603-11de-82fb-7f665ef830f7 |
f79dd2dab6a9e5e06232020fc812c26b78740da4 | a50e906945260351f43d57e014081bcdef5b65a4 | /collections/ansible_collections/fortinet/fortios/plugins/modules/fortios_test_autod.py | ba72e9dc091a5fba49d6174bde73f232cf0ec22c | [] | no_license | alhamdubello/evpn-ipsec-dci-ansible | 210cb31f4710bb55dc6d2443a590f3eb65545cf5 | 2dcc7c915167cd3b25ef3651f2119d54a18efdff | refs/heads/main | 2023-06-08T10:42:35.939341 | 2021-06-28T09:52:45 | 2021-06-28T09:52:45 | 380,860,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,724 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_test_autod
short_description: Automation daemon in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify test feature and autod category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.4.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
test_autod:
description:
- Automation daemon.
default: null
type: dict
suboptions:
<Integer>:
description:
- Test level.
type: str
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Automation daemon.
fortios_test_autod:
vdom: "{{ vdom }}"
test_autod:
<Integer>: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_test_autod_data(json):
option_list = ['<Integer>']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def test_autod(data, fos):
vdom = data['vdom']
test_autod_data = data['test_autod']
filtered_data = underscore_to_hyphen(filter_test_autod_data(test_autod_data))
return fos.set('test',
'autod',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_test(data, fos):
if data['test_autod']:
resp = test_autod(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('test_autod'))
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
def main():
mkeyname = None
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"test_autod": {
"required": False, "type": "dict", "default": None,
"options": {
"<Integer>": {"required": False, "type": "str"}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
fos = FortiOSHandler(connection, module, mkeyname)
is_error, has_changed, result = fortios_test(module.params, fos)
versions_check_result = connection.get_system_version()
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and galaxy, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"a.u.bello@bham.ac.uk"
] | a.u.bello@bham.ac.uk |
c3a41d0fda28cfa3185ed31b58435451a9884be0 | 946a19ae575329be79e0eecbf482873ad5ea85d6 | /Documents/report/ZyohoSec/E.py | d1418f66eed297035fefc5b762d77a775f22bd0d | [] | no_license | genzai0/HIKKOSHI | c2709079a94af44067e618f88fba500a1d79c04a | b947742cc8ccbd169035cf05f6abfa10497dad20 | refs/heads/master | 2020-06-07T02:05:57.907773 | 2019-06-20T10:19:42 | 2019-06-20T10:19:42 | 192,897,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | pc1_table = [57,49,41,33,25,17,9,
1,58,50,42,34,26,18,
10,2,59,51,43,35,27,
19,11,3,60,52,44,36,
63,55,47,39,31,23,15,
7,62,54,46,38,30,22,
14,6,61,53,45,37,29,
21,13,5,28,20,12,4]
pc2_table = [14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32]
syoki_table = [58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6, 64, 56, 48, 40, 32, 24, 16, 8, 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7]
E_table = [32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1]
k = "11111000101001000111000000101110"
num = ""
for i in E_table:
num += k[i-1]
else:
print(num)
| [
"naminoyukusaki0311@gmail.com"
] | naminoyukusaki0311@gmail.com |
ad451519dc6cc9b114165e3ad4a8f8cdd767afb1 | a95adc6c55a821304e54e0afb83369761eccb04c | /First_Neural_Network/first_neural_network.py | 6a94b24cc4b4f66175713a5ba08a3f717d82bae8 | [] | no_license | davidsmelo/PyImageSearch-tutorials | 6c4e2d57cb1e51dbf1d796aab4d61bf0327b28d4 | e418406e95536356e775c135a3cd9866a6c780c6 | refs/heads/master | 2022-07-06T22:15:33.399583 | 2020-05-19T09:22:07 | 2020-05-19T09:22:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,975 | py | import cv2
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Activation
from keras.optimizers import SGD
from keras.layers import Dense
import keras.utils as np_utils
from imutils import paths
import numpy as np
import argparse
import os
def image_to_feature_vector(image,size=(32,32)):
return cv2.resize(image,size).flatten()
ap=argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="Path to Dataset")
ap.add_argument ("-m", "--model", help="path to output model", required=True)
args=vars(ap.parse_args())
print ("[INFO] describing images..")
imagePaths= list(paths.list_images(args["dataset"]))
data=[]
labels=[]
for (i, ImagePath) in enumerate (imagePaths):
image=cv2.imread (ImagePath)
label=ImagePath.split(os.path.sep)[-1].split(".")[0]
features=image_to_feature_vector(image)
data.append(features)
labels.append(label)
if (i>0) and i % 1000==0 :
print("[INFO] Processed {} / {} images".format(i, len(imagePaths)))
le=LabelEncoder()
labels=le.fit_transform(labels)
data=np.array(data)/255
labels=np_utils.to_categorical(labels,2)
print("[INFO] Construction Train and Test Dataset")
(trainData,testData,trainLabels,testLabels)=train_test_split(data,labels,test_size=0.25,random_state=42)
model=Sequential()
model.add(Dense(768, input_dim=3072, init="uniform",activation="relu"))
model.add(Dense(384, activation="relu", kernel_initializer="uniform"))
model.add(Dense(2))
model.add(Activation("softmax"))
print("[INFO] compiling Model")
sgd= SGD(lr=0.01)
model.compile(loss="binary_crossentropy", optimizer=sgd, metrics=["accuracy"])
model.fit(trainData, trainLabels, epochs=50, batch_size=128, verbose=1)
(loss,accuracy)= model.evaluate(testData,testLabels, batch_size=128, verbose=1)
print("[INFO] Loss= {:.4f}, Accuracy={:.4f}".format(loss,accuracy))
model.save(args["model"]) | [
"david_melo94@hotmail.com"
] | david_melo94@hotmail.com |
0f7a1a0e9d685fa142ca125e4791c4f714250374 | edb788fe62a4115b278193f932ec52fc69308399 | /tamuhackers19/user.py | 6123e13f497858a4f9fa16eee98c29d2a56479d2 | [] | no_license | tamuhackers/TAMUhack-2019 | 5e23a5edefad257a22f3eb0c3d4f5fb27593f107 | 645a0529ff1a7a314003c30c9eb141c242e2bc8f | refs/heads/master | 2022-12-11T07:11:49.886896 | 2019-01-27T17:36:39 | 2019-01-27T17:36:39 | 167,290,015 | 1 | 0 | null | 2022-09-23T22:21:38 | 2019-01-24T02:37:47 | HTML | UTF-8 | Python | false | false | 213 | py | from flask_login import UserMixin
class User(UserMixin):
def __init__(self, phone_number, code):
self.user_id = phone_number
self.code = code
def get_id(self):
return self.user_id | [
"b30bd351371c686298d32281b337e8@gmail.com"
] | b30bd351371c686298d32281b337e8@gmail.com |
ac50fa692d845d1eeee00f4cca7f93fa4cfa9589 | c0caed81b5b3e1498cbca4c1627513c456908e38 | /src/python/bindings/app/membrane/predict_ddG.py | f9857bead41598d64c25060485f3cf5045c0b739 | [
"LicenseRef-scancode-other-permissive"
] | permissive | malaifa/source | 5b34ac0a4e7777265b291fc824da8837ecc3ee84 | fc0af245885de0fb82e0a1144422796a6674aeae | refs/heads/master | 2021-01-19T22:10:22.942155 | 2017-04-19T14:13:07 | 2017-04-19T14:13:07 | 88,761,668 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,380 | py | #!/usr/bin/env python
# :noTabs=true:
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
## @file: compute_ddG.py
##
## @brief: Compute ddGs of mutation
## @details: Use the Rosetta membrane framework to compute the ddG of unfolding of
## a membrane protein in Rosetta (uses packer, mutate.py from Evan Baugh)
##
## @author: Rebecca F. Alford (rfalford12@gmail.com)
## @author: JKLeman (julia.koehler1982@gmail.com)
# Tools
import sys, os
import commands
import random
from optparse import OptionParser, IndentedHelpFormatter
_script_path_ = os.path.dirname( os.path.realpath(__file__) )
# Rosetta-specific imports
import rosetta.protocols.membrane
from rosetta import Pose
from rosetta import create_score_function
from rosetta import TaskFactory
from rosetta.utility import vector1_bool
from rosetta import aa_from_oneletter_code
from rosetta import PackRotamersMover
from rosetta.core.pose import PDBInfo
from rosetta.core.chemical import VariantType
from toolbox import mutate_residue
###############################################################################
## @brief Main - Add Membrane to Pose, Compute ddG
def main( args ):
parser = OptionParser(usage="usage: %prog [OPTIONS] [TESTS]")
parser.set_description(main.__doc__)
#input options
parser.add_option('--in_pdb', '-p',
action="store",
help="Input PDB file.", )
parser.add_option('--in_span', '-s',
action="store",
help="Input spanfile.", )
parser.add_option('--out', '-o',
action="store", default='ddG.out',
help="Output filename with pose residue numbering. Default: 'ddG.out'", )
parser.add_option('--res', '-r',
action="store",
help="Pose residue number to mutate.", )
parser.add_option('--mut', '-m',
action="store",
help="One-letter code of residue identity of the mutant. Example: A181F would be 'F'", )
parser.add_option('--repack_radius', '-a',
action="store", default=0,
help="Repack the residues within this radius",)
parser.add_option('--output_breakdown', '-b',
action="store", default="scores.sc",
help="Output mutant and native score breakdown by weighted energy term into a scorefile", )
parser.add_option('--include_pH', '-t',
action="store", default=0,
help="Include pH energy terms: pH_energy and fa_elec. Default false.", )
parser.add_option('--pH_value', '-q',
action="store", default=7,
help="Predict ddG and specified pH value. Default 7. Will not work if include pH is not passed", )
#parse options
(options, args) = parser.parse_args(args=args[1:])
global Options
Options = options
# Check the required inputs (PDB file, spanfile) are present
if ( not Options.in_pdb or not Options.in_span or not Options.res ):
sys.exit( "Must provide flags '-in_pdb', '-in_span', and '-res'! Exiting..." )
# Initialize Rosetta options from user options. Enable pH mode if applicable
rosetta_options = ""
standard_options = "-membrane_new:setup:spanfiles " + Options.in_span + " -run:constant_seed -in:ignore_unrecognized_res"
if ( Options.include_pH ):
print Options.pH_value
if ( float( Options.pH_value ) < 0 or float(Options.pH_value) > 14 ):
sys.exit( "Specified pH value must be between 0-14: Exiting..." )
else:
pH_options = " -pH_mode -value_pH " + str(Options.pH_value)
rosetta_options = standard_options + pH_options
else:
rosetta_options = standard_options
# Initialize Rosetta based on user inputs
rosetta.init( extra_options=rosetta_options )
# Load Pose, & turn on the membrane
pose = pose_from_file( Options.in_pdb )
# Add Membrane to Pose
add_memb = rosetta.protocols.membrane.AddMembraneMover()
add_memb.apply( pose )
# Setup in a topology based membrane
init_mem_pos = rosetta.protocols.membrane.MembranePositionFromTopologyMover()
init_mem_pos.apply( pose )
# check the user has specified a reasonable value for the pH
sfxn = rosetta.core.scoring.ScoreFunction()
if ( Options.include_pH ):
# Create a membrane energy function enabled by pH mode
# Includes two terms not standard in the smoothed energy function: pH energy
# and fa_elec
sfxn = create_score_function( "mpframework_pHmode_fa_2015")
else:
# Create a smoothed membrane full atom energy function (pH 7 calculations)
sfxn = create_score_function( "mpframework_smooth_fa_2012")
# Repack the native rotamer and residues within the repack radius
native_res = pose.residue( int( Options.res ) ).name1()
repacked_native = mutate_residue( pose, int( Options.res), native_res, Options.repack_radius, sfxn )
# to output score breakdown, start by printing the score labels in
# the top of the file
print_score_labels_to_file( repacked_native, sfxn, Options.output_breakdown )
# Compute mutations
if ( Options.mut ):
with file( Options.out, 'a' ) as f:
ddGs = compute_ddG( repacked_native, sfxn, int( Options.res ), Options.mut, Options.repack_radius, Options.output_breakdown )
f.write( Options.in_pdb + " " + Options.res + " " + str(ddGs[0]) + " " + str(ddGs[1]) + " " + str(ddGs[2]) + " " + str(ddGs[3]) + "\n" )
f.close
else:
AAs = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
for aa in AAs:
with file( Options.out, 'a' ) as f:
ddGs = compute_ddG( repacked_native, sfxn, int( Options.res ), aa, Options.repack_radius, Options.output_breakdown )
f.write( str(ddGs[0]) + " " + str(ddGs[1]) + " " + str(ddGs[2]) + " " + str(ddGs[3]) + "\n" )
f.close
###############################################################################
## @brief Compute ddG of mutation in a protein at specified residue and AA position
def compute_ddG( pose, sfxn, resnum, aa, repack_radius, sc_file ):
# Score Native Pose
native_score = sfxn( pose )
# Perform Mutation at residue <resnum> to amino acid <aa>
mutated_pose = mutate_residue( pose, resnum, aa, repack_radius, sfxn )
# Score Mutated Pose
mutant_score = sfxn( mutated_pose )
# If specified the user, print the breakdown of ddG values into a file
print_ddG_breakdown( pose, mutated_pose, sfxn, resnum, aa, sc_file )
# return scores
return aa, round( mutant_score, 3 ), round( native_score, 3 ), round ( mutant_score - native_score, 3 )
###############################################################################
# @brief Replace the residue at <resid> in <pose> with <new_res> and allows
# repacking within a given <pack_radius>
def mutate_residue( pose, mutant_position, mutant_aa, pack_radius, pack_scorefxn ):
if pose.is_fullatom() == False:
IOError( 'mutate_residue only works with fullatom poses' )
test_pose = Pose()
test_pose.assign( pose )
# Create a packer task (standard)
task = TaskFactory.create_packer_task( test_pose )
# the Vector1 of booleans (a specific object) is needed for specifying the
# mutation, this demonstrates another more direct method of setting
# PackerTask options for design
aa_bool = vector1_bool()
# PyRosetta uses several ways of tracking amino acids (ResidueTypes)
# the numbers 1-20 correspond individually to the 20 proteogenic amino acids
# aa_from_oneletter returns the integer representation of an amino acid
# from its one letter code
# convert mutant_aa to its integer representation
mutant_aa = aa_from_oneletter_code( mutant_aa )
# mutation is performed by using a PackerTask with only the mutant
# amino acid available during design
# to do this, construct a Vector1 of booleans indicating which amino acid
# (by its numerical designation, see above) to allow
for i in range( 1 , 21 ):
# in Python, logical expression are evaluated with priority, thus the
# line below appends to aa_bool the truth (True or False) of the
# statement i == mutant_aa
aa_bool.append( i == mutant_aa )
# modify the mutating residue's assignment in the PackerTask using the
# Vector1 of booleans across the proteogenic amino acids
task.nonconst_residue_task( mutant_position
).restrict_absent_canonical_aas( aa_bool )
# prevent residues from packing by setting the per-residue "options" of
# the PackerTask
center = pose.residue( mutant_position ).nbr_atom_xyz()
for i in range( 1, pose.total_residue() + 1 ):
dist = center.distance_squared( test_pose.residue( i ).nbr_atom_xyz() );
# only pack the mutating residue and any within the pack_radius
if i != mutant_position and dist > pow( float( pack_radius ), 2 ) :
task.nonconst_residue_task( i ).prevent_repacking()
# apply the mutation and pack nearby residues
packer = PackRotamersMover( pack_scorefxn , task )
packer.apply( test_pose )
return test_pose
###############################################################################
#@brief Print ddG breakdown from the pose
# Extract weighted energies from the native and mutated pose. Calculate the ddG
# of each and print the component-wise ddG vlaues
def print_ddG_breakdown( native_pose, mutated_pose, sfxn, resnum, aa, fn ):
# Extract scores
tmp_native = native_pose.energies().total_energies().weighted_string_of( sfxn.weights() )
tmp_mutant = mutated_pose.energies().total_energies().weighted_string_of( sfxn.weights() )
# Parse out scores
array_native = filter( None, tmp_native.split(' ') )
array_mutant = filter( None, tmp_mutant.split(' ') )
# Pull out only the scores from these arrays
native_scores = []
for i in range( len(array_native) ):
if ( i % 2 != 0 ):
native_scores.append( float( array_native[i] ) )
mutant_scores = []
for i in range( len(array_mutant) ):
if ( i % 2 != 0 ):
mutant_scores.append( float( array_mutant[i] ) )
# Make a label for the mutation
native_res = native_pose.residue( int( Options.res ) ).name1()
mut_label = native_res + str(resnum) + aa
# Calculate ddG of individual components
ddGs = []
ddGs.append( mut_label )
for i in range( len( mutant_scores ) ):
ddG_component = mutant_scores[i] - native_scores[i]
ddGs.append( round( ddG_component, 3 ) )
ddGs_str = convert_array_to_str( ddGs )
with file( fn, 'a' ) as f:
f.write( ddGs_str + "\n" )
f.close()
###############################################################################
#@brief Get header for ddG breakdown output
# Save the score labels, to be printed at the top of the output breakdown file
def print_score_labels_to_file( native_pose, sfxn, fn ):
tmp_native = native_pose.energies().total_energies().weighted_string_of( sfxn.weights() )
array_native = filter( None, tmp_native.split(' ') )
labels = []
labels.append( 'mutation ' ) # Append field for mutation label
for i in range( len(array_native) ):
if ( i % 2 == 0 ):
labels.append( array_native[i].translate(None, ':') )
labels_str = convert_array_to_str( labels )
with file( fn, 'a' ) as f:
f.write( labels_str + "\n" )
f.close()
###############################################################################
#@brief Convert an array to a space deliminted string
# Save the score labels, to be printed at the top of the output breakdown file
def convert_array_to_str( array ):
linestr = ""
for elem in array:
if ( linestr == "" ):
linestr = linestr + str( elem )
else:
linestr = linestr + " " + str( elem )
return linestr
if __name__ == "__main__" : main(sys.argv)
| [
"malaifa@yahoo.com"
] | malaifa@yahoo.com |
6ec54bc427e38fff5352b9d4a525b5c7b1bbc069 | 214ea3873f451940c73c4fb02981b08c8161b23c | /Array/range-addition.py | 8617f93a5c2256d727dd8742f68e760606d4d616 | [] | no_license | Tiierr/LeetCode-Python | 4a086a76a6d3780140e47246304d11c520548396 | e8532b63fc5bb6ceebe30a9c53ab3a2b4b2a75a3 | refs/heads/master | 2021-06-14T04:36:57.394115 | 2017-03-07T06:46:39 | 2017-03-07T06:46:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | # Time: O(k + n)
# Space: O(1)
#
# Assume you have an array of length n initialized with
# all 0's and are given k update operations.
#
# Each operation is represented as a triplet:
# [startIndex, endIndex, inc] which increments each element of subarray
# A[startIndex ... endIndex] (startIndex and endIndex inclusive) with inc.
#
# Return the modified array after all k operations were executed.
#
# Example:
#
# Given:
#
# length = 5,
# updates = [
# [1, 3, 2],
# [2, 4, 3],
# [0, 2, -2]
# ]
#
# Output:
#
# [-2, 0, 3, 5, 3]
#
# Explanation:
#
# Initial state:
# [ 0, 0, 0, 0, 0 ]
#
# After applying operation [1, 3, 2]:
# [ 0, 2, 2, 2, 0 ]
#
# After applying operation [2, 4, 3]:
# [ 0, 2, 5, 5, 3 ]
#
# After applying operation [0, 2, -2]:
# [-2, 0, 3, 5, 3 ]
#
# Hint:
#
# Thinking of using advanced data structures? You are thinking it too complicated.
# For each update operation, do you really need to update all elements between i and j?
# Update only the first and end element is sufficient.
# The optimal time complexity is O(k + n) and uses O(1) extra space.
class Solution(object):
def getModifiedArray(self, length, updates):
"""
:type length: int
:type updates: List[List[int]]
:rtype: List[int]
"""
result = [0] * length
for update in updates:
result[update[0]] += update[2]
if update[1]+1 < length:
result[update[1]+1] -= update[2]
for i in xrange(1, length):
result[i] += result[i - 1]
return result
| [
"rayyu03@163.com"
] | rayyu03@163.com |
6168a3b2331db5b8eeef80583560970fab0652a2 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/popish.py | fc482e43309e27670d3e1bb14bc0bb4c6a354519 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 409 | py | ii = [('CookGHP3.py', 3), ('CookGHP.py', 50), ('BailJD2.py', 1), ('ClarGE2.py', 1), ('SeniNSP.py', 1), ('CookGHP2.py', 5), ('ClarGE.py', 5), ('DaltJMA.py', 33), ('WadeJEB.py', 5), ('NewmJLP.py', 1), ('SoutRD2.py', 1), ('SoutRD.py', 1), ('MereHHB3.py', 1), ('HowiWRL2.py', 1), ('HogaGMM.py', 2), ('FerrSDO.py', 1), ('WilbRLW3.py', 3), ('MereHHB2.py', 1), ('ClarGE3.py', 3), ('EvarJSP.py', 1), ('TaylIF.py', 3)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
a0641d0c62f8c6e39c81e1f9266c4710026b35aa | f3a7b2b71af1ca16e87fcc2c6063670d056f59c6 | /libs/configs_old/MLT/gwd/cfgs_res101_mlt_v1.py | d7bff74ce9ce7235a49aac2edc2032e4000f6281 | [
"Apache-2.0"
] | permissive | DLPerf/RotationDetection | 3af165ab00ea6d034774a7289a375b90e4079df4 | c5d3e604ace76d7996bc461920854b2c79d8c023 | refs/heads/main | 2023-07-16T06:01:42.496723 | 2021-08-28T03:17:39 | 2021-08-28T03:17:39 | 400,690,285 | 0 | 0 | Apache-2.0 | 2021-08-28T03:16:55 | 2021-08-28T03:16:55 | null | UTF-8 | Python | false | false | 3,449 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
"""
FLOPs: 737874381; Trainable params: 51265150
trainval/test + sqrt tau=2
"""
# ------------------------------------------------
VERSION = 'RetinaNet_MLT_GWD_1x_20201222'
NET_NAME = 'resnet101_v1d' # 'MobilenetV2'
# ---------------------------------------- System
ROOT_PATH = os.path.abspath('../../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 10000 * 2
SUMMARY_PATH = os.path.join(ROOT_PATH, 'output/summary')
TEST_SAVE_PATH = os.path.join(ROOT_PATH, 'tools/test_result')
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_R_DIR = os.path.join(ROOT_PATH, 'output/evaluate_result_pickle/')
# ------------------------------------------ Train and test
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
ADD_BOX_IN_TENSORBOARD = True
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
ANGLE_WEIGHT = 0.5
REG_LOSS_MODE = 2
ALPHA = 1.0
BETA = 1.0
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 1e-3
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Dataset
DATASET_NAME = 'MLT' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = [800, 600, 1000, 1200]
IMG_MAX_LENGTH = 1500
CLASS_NUM = 1
IMG_ROTATE = True
RGB2GRAY = True
VERTICAL_FLIP = True
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = True
# --------------------------------------------- Network
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
FPN_CHANNEL = 256
NUM_SUBNET_CONV = 4
FPN_MODE = 'fpn'
# --------------------------------------------- Anchor
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
USE_ANGLE_COND = False
ANGLE_RANGE = 90 # or 180
# -------------------------------------------- Head
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.2
# -------------------------------------------- GWD
GWD_TAU = 2.0
GWD_FUNC = tf.sqrt
| [
"yangxue0827@126.com"
] | yangxue0827@126.com |
cad9a13325be66e03f2c55ef2db7d5b6f3300796 | f9e3f8901b755841473ef7780ed69e458e5d6293 | /jnuke/gizmos/unsorted/fixPaths.py | ed5e8aebc6250ccd632820eb6d875b1eeb6e0408 | [] | no_license | tws0002/jeeves | 2170ec0866fd9cfe0e41d480f9679009b741dc14 | db198f0f02d4fd24af29dfdc0c22047c6dfd6e4f | refs/heads/master | 2021-01-11T19:11:32.540190 | 2015-01-18T19:11:15 | 2015-01-18T19:11:15 | 79,332,609 | 1 | 0 | null | 2017-01-18T11:13:34 | 2017-01-18T11:13:34 | null | UTF-8 | Python | false | false | 2,089 | py | # Copyright (c) 2009 The Foundry Visionmongers Ltd. All Rights Reserved.
#
# Example that attempts to fix all the missing read paths in a nuke script.
#
# Works on Read, ReadGeo, or Axis nodes. Doesn't work on proxy paths.
import os, nuke
def getFrame( filename, frame ):
ffile = filename
if ffile.find ( "%" ) != -1:
ffile = ffile %1
return getFile( ffile )
def getFile( filename ):
if filename.find ( "%" ) != -1 and filename.find( " " ) != -1:
filename = filename [:filename .rfind(' ')]
return filename
def getCommonBase( str1, str2 ):
str1reversed = str1[::-1]
str2reversed = str2[::-1]
commonBit = os.path.commonprefix( [str1reversed, str2reversed] )
commonBit = commonBit[::-1]
baseStr1 = str1[ 0 : str1.find ( commonBit ) ]
baseStr2 = str2[ 0 : str2.find ( commonBit ) ]
return [baseStr1, baseStr2]
def fixPaths( ):
oldDir = ""
newDir = ""
scriptDir = os.path.dirname ( nuke.root().knob("name").getValue() )
for n in nuke.allNodes():
#### check node types, supports Read, ReadGeo, or Axis nodes. You may want to add more here..
if n.Class() == "Read" or n.Class() == "ReadGeo" or n.Class() == "ReadGeo2" or n.Class() == "Axis2":
f = n.knob( "file" )
filename = f.getValue()
print 'Filename is ' + filename
if filename and filename != '':
basename = os.path.basename( filename )
frame = getFrame( filename, nuke.frame() )
print "Looking for : " + frame
if not os.path.exists( frame ):
filename = filename.replace( oldDir, newDir )
frame = getFrame( filename, nuke.frame() )
print "Looking for new filename " + frame
if not os.path.exists( frame ):
n = nuke.getClipname( "Looking for " + filename, basename, scriptDir )
if n != None:
(oldDir, newDir) = getCommonBase( os.path.dirname( filename ), os.path.dirname( n ) )
f.setValue ( getFile(n) )
else:
pass
else:
f.setValue( getFile(filename) )
| [
"elliott.james,smith@gmail.com"
] | elliott.james,smith@gmail.com |
a07e58b282f7898881dacafa8c747816a7315947 | cd9e21d81583e90d14e78846f0cb973bb05a8b0d | /apps/__init__.py | d66b74f62cdd13c68d40a1848c8e5cbaac3d58fe | [] | no_license | yueludanfeng/djangostart | 861d9657aa20e99d4fe87131285e050f8e43f37e | 61bbecda3078b30f03efb5ecd1b02b8303cd1d7d | refs/heads/master | 2021-01-20T01:38:59.930223 | 2017-04-25T03:35:49 | 2017-04-25T03:35:49 | 89,313,176 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19 | py | __author__ = 'lxm'
| [
"yueludanfeng@gmail.com"
] | yueludanfeng@gmail.com |
9b73d63340c959b69fac7d9c9d0f12ede5f2c25c | eaf95c9ee7c7c8548c30cf0fdd9fbc3cdfd20f31 | /removedoublesfromall/__init__.py | 036860e5f3e7b7afdd9815b5bdacc187af124e1d | [] | no_license | Fl0GUI/blender-scripts | 95566a13be6b20180707e55fd1bb7b365c68d623 | 3d6b8cce91112ff95fc7bb8011f173b71807e707 | refs/heads/master | 2020-04-18T04:26:05.606063 | 2020-01-02T18:10:32 | 2020-01-02T18:10:32 | 167,238,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | import bpy
bl_info = {
"name": "remove all the doubles",
"description": "removes doubles from all meshes",
"author": "Flor Guilini",
"version": (420, 69),
"blender": (2,80,0),
"location": "Object",
"warning": "I don't know what the heck I'm doing",
"wiki_url": "https://github.com/Fl0GUI/blender-scripts",
"tracker_url": "",
"category": "Object"
}
class RemoveDoublesFromAll(bpy.types.Operator):
bl_idname = "object.removedoublesfromall"
bl_label = "remove all doubles"
bl_options = {'REGISTER'}
def execute(self, context):
for obj in bpy.data.objects:
context.view_layer.objects.active = obj
if bpy.ops.object.editmode_toggle.poll():
bpy.ops.object.editmode_toggle()
if bpy.ops.mesh.select_all.poll():
bpy.ops.mesh.select_all(action='SELECT')
if bpy.ops.mesh.remove_doubles.poll():
bpy.ops.mesh.remove_doubles(use_unselected=True)
if bpy.ops.object.editmode_toggle.poll():
bpy.ops.object.editmode_toggle()
return {'FINISHED'}
def addUI(self, context):
layout = self.layout
layout.separator()
layout.operator("object.removedoublesfromall", text="remove doubles from all meshes")
def register():
bpy.utils.register_class(RemoveDoublesFromAll)
bpy.types.VIEW3D_MT_object.append(addUI)
def unregister():
bpy.utils.unregister_class(RemoveDoublesFromAll)
bpy.types.VIEW3D_MT_object.remove(addUI)
| [
"flor.guilini@hotmail.com"
] | flor.guilini@hotmail.com |
0638fe82fed7e47502334e6a6fc971322690549a | 8f73125d816f3b44b03159dba272e095f37c1f0c | /scripts/viewhdf.py | dc40b917231106ad0c60945affdc86ff17e43524 | [] | no_license | tarah28/nanopore | 356b218e5ca3dfb98e4dd7232d8f1c6303f899d1 | ec716ee15ab26d7bf33b7f7352ab8cad1c369ae8 | refs/heads/master | 2021-05-27T06:21:51.958938 | 2014-09-10T11:36:07 | 2014-09-10T11:36:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | #!/usr/bin/python
import h5view
import sys
for fn in sys.argv[1:]:
with h5view.open(fn) as f:
print(f)
| [
"n.j.loman@bham.ac.uk"
] | n.j.loman@bham.ac.uk |
2e8a022d3ba3022ee59727e88fe7c3c7b818ad36 | d2f45145b8a8ab51c90ceb62f1a184996bf649c0 | /getfilename/__init__.py | 2b6c8843bbf02f49f2ca29b366561a1fba4a65fa | [
"MIT"
] | permissive | raguay/GetFileName | 3de65fc8494c27462978096c9ae15e2d0203b738 | 604c1d551cb69031b86f3c69abb9dc84d7867493 | refs/heads/master | 2021-05-12T00:31:49.250616 | 2018-02-14T07:31:45 | 2018-02-14T07:31:45 | 117,537,082 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | from fman import DirectoryPaneCommand, show_alert
from fman.url import as_human_readable
from fman.clipboard import set_text
import os.path
import ntpath
class getFileName(DirectoryPaneCommand):
def __call__(self):
filesSelection = self.pane.get_selected_files()
if not filesSelection:
fileUnderCursor = self.pane.get_file_under_cursor()
if fileUnderCursor is not None:
filepath = as_human_readable(fileUnderCursor)
set_text(ntpath.basename(filepath))
else:
fileList = ""
first = True
for files in filesSelection:
if first:
fileList += ntpath.basename(as_human_readable(files))
first = False
else:
fileList += ", " + ntpath.basename(as_human_readable(files))
set_text(fileList)
| [
"raguay@customct.com"
] | raguay@customct.com |
06a50ac1e728fab6f9d52e822811aac70ca81d33 | 81c3c38b6265fd953f2a4f09011a82dbb3c97058 | /Day - 53/map.py | 699f9069fc593bcd51824ce205f3fcc01245c1b4 | [] | no_license | KannanPalani57/100DaysOfCode | ec60b7aed92fce1afa043c157b5d6beec0f527a2 | e1947e9d80461758c2996dd49dcd84edda6ef297 | refs/heads/master | 2023-01-21T05:08:53.415395 | 2020-07-03T13:02:18 | 2020-07-03T13:02:18 | 243,007,782 | 3 | 1 | null | 2023-01-06T04:23:38 | 2020-02-25T13:35:52 | JavaScript | UTF-8 | Python | false | false | 207 | py |
def addFive(x):
return x + 1
numbers = [1,2,3,4,5]
mappedList = list(map(addFive, numbers))
print("The mapped list are , "+str(mappedList))
list2 = list(map((lambda x: x+2), numbers))
print(list2) | [
"kannangreener@gmail.com"
] | kannangreener@gmail.com |
6f27a784b92807e7f67299bbffc957c9a2bd379a | 9d484077026b7fcf26188d77281f573eaec1f1d3 | /utils/reference_genomes.py | fc7683a365dd302daa0a34bb8c60602e2eead1c7 | [] | no_license | gaberosser/qmul-bioinf | 603d0fe1ed07d7233f752e9d8fe7b02c7cf505fe | 3cb6fa0e763ddc0a375fcd99a55eab5f9df26fe3 | refs/heads/master | 2022-02-22T06:40:29.539333 | 2022-02-12T00:44:04 | 2022-02-12T00:44:04 | 202,544,760 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,530 | py | import os
import csv
import pandas as pd
from settings import GIT_LFS_DATA_DIR
def known_genes(tax_id=9606, index_field=None):
"""
Get dataframe of known genes for the supplied taxonomy ID (default: human)
Other options: mouse (10090)
:param index_field: If supplied, reindex the dataframe by this field. Examples are 'Symbol', 'GeneID'
"""
if tax_id == 9606:
fn = os.path.join(GIT_LFS_DATA_DIR, 'ncbi_gene', 'Homo_sapiens.gene_info.gz')
elif tax_id == 10090:
fn = os.path.join(GIT_LFS_DATA_DIR, 'ncbi_gene', 'Mus_musculus.gene_info.gz')
else:
raise ValueError("Unrecognised tax_id %d.", tax_id)
df = pd.read_csv(fn, sep='\t')
if index_field is not None:
df = df.set_index(index_field)
return df
def conversion_table(type='protein_coding', tax_id=9606):
## TODO: add a version option
if tax_id == 9606:
if type == 'protein_coding':
in_file = os.path.join(GIT_LFS_DATA_DIR, 'genenames', 'protein_coding', 'genenames.org.2017.01.tsv')
elif type == 'all':
# in_file = os.path.join(GIT_LFS_DATA_DIR, 'genenames', 'all', 'genenames.org.2017.09.tsv')
# in_file = os.path.join(GIT_LFS_DATA_DIR, 'genenames', 'all', 'genenames.org.2018.03.tsv')
in_file = os.path.join(GIT_LFS_DATA_DIR, 'genenames', 'all', 'genenames.org.2018.11.tsv')
else:
raise ValueError("Unsupported type option '%s'" % type)
# in_file = os.path.join(DATA_DIR, 'genenames', 'genenames.org.tsv')
df = pd.read_csv(in_file, delimiter='\t')
elif tax_id == 10090:
in_file = os.path.join(GIT_LFS_DATA_DIR, 'biomart', 'mm10', 'mm10.csv')
df = pd.read_csv(in_file, header=0, index_col=None)
else:
raise NotImplementedError("Unknown taxonomy ID")
return df
def _translate(cat, x, to_field, from_field):
return cat.set_index(from_field).loc[x, to_field]
def translate(x, to_field, from_field, tax_id=9606, type='all'):
cat = conversion_table(type=type, tax_id=tax_id)
return cat.set_index(from_field).loc[x, to_field]
def gene_symbol_to_entrez(g, tax_id=9606):
cat = conversion_table(type='all', tax_id=tax_id)
return _translate(cat, g, 'Entrez Gene ID', 'Approved Symbol')
def entrez_to_gene_symbol(e, tax_id=9606):
cat = conversion_table(type='all', tax_id=tax_id)
return _translate(cat, e, 'Approved Symbol', 'Entrez Gene ID')
def entrez_to_ensembl(e, tax_id=9606):
cat = conversion_table(type='all', tax_id=tax_id)
return _translate(cat, e, 'Ensembl Gene ID', 'Entrez Gene ID')
def gene_symbol_to_ensembl(g, tax_id=9606):
cat = conversion_table(type='all', tax_id=tax_id)
return _translate(cat, g, 'Ensembl Gene ID', 'Approved Symbol')
def ensembl_to_gene_symbol(e, tax_id=9606):
cat = conversion_table(type='all', tax_id=tax_id)
return _translate(cat, e, 'Approved Symbol', 'Ensembl Gene ID')
def ensembl_to_name(e, tax_id=9606):
cat = conversion_table(type='all', tax_id=tax_id)
return _translate(cat, e, 'Approved Name', 'Ensembl Gene ID')
def translate_quantification_resolving_duplicates(dat, from_field, to_field, type='all', tax_id=9606, resolution='mean'):
"""
Translate the index used for the input quantification data to some other
:param dat: Pandas DataFrame containing some kind of quantification data
:param to_field:
:param from_field:
:param type:
:param tax_id:
:param resolution: Approach used to resolve duplicates (TODO: add more as required).
:return:
"""
gs = translate(dat.index, to_field, from_field, tax_id=tax_id)
gs = gs.loc[~gs.index.duplicated()]
gs.dropna(inplace=True)
dat = dat.loc[gs.index]
dat.index = gs
# this may leave some duplicate values to resolve
dupe_idx = dat.index[dat.index.duplicated()]
dupe_map = dat.index.isin(dupe_idx)
if dupe_map.sum() != 0:
dupes = dat.loc[dupe_map]
dat = dat.loc[~dupe_map]
if resolution == 'mean':
dupes_aggr = dupes.groupby(dupes.index).mean()
else:
raise NotImplementedError("Unsupported resolution method %s." % resolution)
dat = dat.append(dupes_aggr)
return dat
def gene_symbol_to_x_robust(
g,
to_field,
tax_id=9606,
additional_search_fields=('Previous Symbols', 'Synonyms'),
split_str=', ',
type='all'
):
"""
Translate from gene symbol to another compatible field.
Where a search fails, we also look in the fields in additional_search_fields. If a match is found, this is used
instead. Priority is given to fields mentioned earlier.
:param g: Gene symbol or list of gene symbols
:param to_field: Lookup column in the conversion table
:param tax_id:
:param additional_search_fields:
:param split_str: String used to split the field (in the event that it contains multiple entries). If None, no
splitting is attempted. This is necessary unless the entries are lists already.
:param type:
:return:
"""
if not hasattr(g, '__iter__'):
g = [g]
cat = conversion_table(type=type, tax_id=tax_id).set_index('Approved Symbol')
res = cat.loc[g, to_field]
spares = pd.Index(g).difference(cat.index).tolist()
for asf in additional_search_fields:
if not len(spares):
return res
lookup = cat.loc[:, asf]
if split_str is not None:
lookup = lookup.fillna('').str.split(split_str)
new_spares = []
for t in spares:
ix = lookup.apply(lambda x: t in x)
if ix.sum() == 1:
res.loc[t] = cat.loc[ix, to_field].values
else:
new_spares.append(t)
spares = new_spares
return res
mouse_tid = 10090
human_tid = 9606
def homologs_table(tid1, tid2, field='gene_symbol'):
"""
Find matching properties between the two different taxonomies, specified by the IDs. The field tells us which
attribute column to report.
Homologene data is used to generate this.
:param tid1: Taxonomy ID 1.
:param tid2: Taxonomy ID 2
:param field: Column name in homologene data.
:return: DataFrame with two columns, named by taxonomy ID
"""
homolog_file = os.path.join(GIT_LFS_DATA_DIR, 'homologene', 'homologeneb68.data')
homolog_header = [
'hid',
'taxonomy_id',
'gene_id',
'gene_symbol',
'protein_gi',
'protein_accessor'
]
homologs = pd.read_csv(homolog_file, header=None, names=homolog_header, sep='\t')
# get HID
hid1 = homologs.loc[homologs.taxonomy_id == tid1, 'hid']
hid2 = homologs.loc[homologs.taxonomy_id == tid2, 'hid']
joint_hid = pd.Index(hid1.values).intersection(hid2.values)
joint_hid = joint_hid[~joint_hid.duplicated()]
# only keep 1:1 mappings (but quantify this loss of m2m mappings)
map1 = homologs.loc[(homologs.taxonomy_id == tid1) & (homologs.hid.isin(joint_hid))]
map1 = map1.loc[~map1.hid.duplicated()].set_index('hid')
map2 = homologs.loc[(homologs.taxonomy_id == tid2) & (homologs.hid.isin(joint_hid))]
map2 = map2.loc[~map2.hid.duplicated()].set_index('hid')
# map on the field HID
col_names = ["%s_%d" % (field, tid1), "%s_%d" % (field, tid2)]
res = pd.DataFrame(columns=col_names, index=joint_hid)
res.loc[:, col_names[0]] = map1.loc[:, field]
res.loc[:, col_names[1]] = map2.loc[:, field]
# remove duplicates
return res
| [
"g.rosser@qmul.ac.uk"
] | g.rosser@qmul.ac.uk |
3a23a0fa6b2df3de7ac1de52c4ea86e8a827ab99 | 54c90c37d6fc0942eb5e3d28de108b6bf7b494e4 | /Homework/hw13/tests/cyber-monday-part2.py | 95aa24d510af52d110144073a911701b56a8a1e7 | [] | no_license | kanakgarg/CS61A-Fall-2017 | 82ef434ad52b160eb34e2575c54275a9f2caeca0 | d9ee467723502f3077ceaaf99a4a40a107381bc4 | refs/heads/master | 2022-01-10T20:04:28.348041 | 2019-06-03T18:45:56 | 2019-06-03T18:45:56 | 155,434,691 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | test = {
'name': 'cyber-monday-part2',
'points': 0,
'suites': [
{
'cases': [
{
'code': r"""
sqlite> SELECT * FROM lowest_prices;
Hallmart|GameStation|298.98
Targive|QBox|390.98
Targive|iBook|110.99
RestBuy|kBook|94.99
Hallmart|qPhone|85.99
Hallmart|rPhone|69.99
RestBuy|uPhone|89.99
RestBuy|wBook|114.29
""",
'hidden': False,
'locked': False
}
],
'ordered': False,
'scored': True,
'setup': r"""
sqlite> .read hw13.sql
""",
'teardown': '',
'type': 'sqlite'
}
]
}
| [
"kanakgarg@gmail.com"
] | kanakgarg@gmail.com |
2469baef7d63dc0312a7232192a2d47770aa10f8 | 1b68eb16fbc5fe7cc64c250958878cc5f7a6cc28 | /scripts/flowcells/test_barcode_masks.py | e79d9fb7def2ded39cca1bd1d644d3c114535524 | [] | no_license | StamLab/stampipes | 537ff3cfcaa7e502dfdf7a08443d912ecb20c16e | 4680d91ad7c134fd1aba4128a51a3708477fbc65 | refs/heads/main | 2023-08-20T19:14:05.517397 | 2023-07-30T21:13:47 | 2023-07-30T21:16:13 | 22,504,544 | 0 | 10 | null | 2023-08-01T22:44:42 | 2014-08-01T10:02:18 | Python | UTF-8 | Python | false | false | 1,523 | py | import random
from typing import List, Tuple
import pytest
from barcode_masks import get_barcode_masks
@pytest.mark.parametrize("_name,read_len,index_len,lib_lengths,expected", [
("basic-paired", 75, 8, [(8, 8)], ["y75,i8,i8,y75"]),
("toolong-paired", 75, 10, [(8, 8)], ["y75,i8n2,i8n2,y75"]),
("tooshort-paired", 75, 8, [(10, 10)], ["y75,i8,i8,y75"]),
("mixed-paired", 75, 8, [(8, 8), (8, 0)], ["y75,i8,i8,y75", "y75,i8,n8,y75"]),
])
def test_expected_index_masks(
_name, read_len, index_len, lib_lengths, expected
):
""" Run some table-driven tests to make sure we get the right output """
data = make_processing_json(read_len, index_len, lib_lengths)
actual = get_barcode_masks(data)
assert set(actual) == set(expected)
def gen_barcode(length: int) -> str:
""" Generates a random string of letters of length 'length' """
return "".join(
[random.choice(['A', 'C', 'T', 'G']) for _ in range(length)]
)
def make_processing_json(read_len: int,
index_len: int,
lib_index_lengths: List[Tuple[int, int]],
) -> dict:
""" Creates a minimal "processing" data structure """
return {
"flowcell": {"read_length": read_len, "index_length": index_len, },
"libraries": [{
"barcode1": {"sequence": gen_barcode(bc1)},
"barcode2": {"sequence": gen_barcode(bc2)},
} for (bc1, bc2) in lib_index_lengths]
}
| [
"nelsonjs@altiusinstitute.org"
] | nelsonjs@altiusinstitute.org |
6904036ce2502f7663f22285429d28ef88053170 | 771b50f241c6bd40c348b8bc6bd969e65aabbe52 | /vendors/JacMLDash/mldash/web/ui_methods/ui_methods.py | c1f7cdf6e9e8a9ddd72d0c1177eb7d43d8c969dd | [
"MIT"
] | permissive | sakshamjindal/NSCL-PyTorch-Release | 0abd26520b9e3f64a8506b1f684f5a830feaa167 | 830842d10da68f82916c131e1f1cbf31b5f8b918 | refs/heads/master | 2022-12-30T05:50:05.924479 | 2020-10-25T13:56:32 | 2020-10-25T13:56:32 | 306,860,381 | 1 | 0 | MIT | 2020-10-25T13:56:33 | 2020-10-24T10:36:11 | Python | UTF-8 | Python | false | false | 2,913 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : ui_methods.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 09/06/2019
#
# This file is part of JacMLDash.
# Distributed under terms of the MIT license.
import json
from jacinle.utils.printing import kvformat, stformat
from jacinle.utils.meta import dict_deep_kv
from .config import allow_custom_ui_method
@allow_custom_ui_method
def format_kv(handler, kvs):
if kvs is None or kvs == '':
return '<pre>N/A</pre>'
if not isinstance(kvs, dict):
kvs = json.loads(kvs)
return '<pre>' + kvformat(kvs) + '</pre>'
@allow_custom_ui_method
def format_kv_recursive_flat(handler, kvs):
if kvs is None or kvs == '':
return '<pre>N/A</pre>'
if not isinstance(kvs, dict):
kvs = json.loads(kvs)
return '<pre>' + kvformat({k: v for k, v in dict_deep_kv(kvs) if not '__' in k}) + '</pre>'
@allow_custom_ui_method
def format_kv_recursive(handler, kvs):
if kvs is None or kvs == '':
return '<pre>N/A</pre>'
if not isinstance(kvs, dict):
kvs = json.loads(kvs)
return '<div class="inline-pre">' + stformat(kvs, indent_format=' ', end_format='<br />') + '</div>'
@allow_custom_ui_method
def format_kv_inline(handler, kvs, html=True):
if kvs is None or kvs == '':
if html:
return '<code>N/A</code>'
else:
return ''
if not isinstance(kvs, dict):
kvs = json.loads(kvs)
kvs = {k: kvs[k] for k in sorted(kvs.keys())}
ret = '; '.join(['{}={}'.format(k, '{:.6f}'.format(v) if isinstance(v, (float)) else v) for k, v in kvs.items()])
if html:
return '<code>' + ret + '</code>'
return ret
def format_kv_inline_tb(handler, kvs):
if kvs is None or kvs == '':
return ''
if not isinstance(kvs, dict):
kvs = json.loads(kvs)
kvs = {k: kvs[k] for k in sorted(kvs.keys())}
ret = '_'.join(['{} = {}'.format(k, '{:.6f}'.format(v) if isinstance(v, (float)) else v) for k, v in kvs.items()])
return ret
@allow_custom_ui_method
def format_fpath(handler, path):
return '<code>' + path + '</code>'
@allow_custom_ui_method
def format_log_fpath(handler, path):
return '<code>' + path + '</code>'
@allow_custom_ui_method
def format_tb_link(handler, port):
host = handler.request.host
if ':' in host:
host = host[:host.find(':')]
link = 'http://' + host + ':' + str(port)
return '<a href="{link}" target="_blank">{link}</a>'.format(link=link)
@allow_custom_ui_method
def format_extra_items(handler, run):
return dict()
@allow_custom_ui_method
def format_extra_summary_items(handler, run):
return dict()
def is_deleted(handler, run):
from mldash.plugins.trashbin import is_trash
return is_trash(run)
def is_stared(handler, run):
from mldash.plugins.star import is_stared
return is_stared(run)
| [
"saksham.jindal@fractal.ai"
] | saksham.jindal@fractal.ai |
f85eef8c8bccf9e9831e0c08277a03289a82d45b | 287941090e5c97c9d64731ba33ddaa11a78263be | /bin/track_template.py | 28e9783616e5b9e620ef0e955a2f99365fceb585 | [] | no_license | iiiir/ucsc_track_hub | d7719c2acd8d584a0413514cfd20f9ba81c4da08 | 1e8bff35771a1fbc4ddf64d6d0d32aa18f3bceca | refs/heads/master | 2021-01-10T10:17:55.779667 | 2016-01-12T22:35:22 | 2016-01-12T22:35:22 | 49,533,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | #!/bin/env python
def big_track():
return '''
track %s
bigDataUrl %s
parent %s on
subGroups %s
shortLabel %s
longLabel %s
type %s %s
visibility pack
'''
def group_track():
return '''
track SJGROUP
compositeTrack on
subGroup1 subgroups SubGroups GROUP3=GROUP3 GROUP4=GROUP4 SHH=SHH UNKNOWN=UNKNOWN WNT=WNT
subGroup2 assay Assay H3K27Ac=H3K27Ac LHX2=LHX2 LMX1A=LMX1A HLX=HLX
dimensions dimX=subgroups dimY=assay
sortOrder subgroups=+ assay=+
type bigWig
shortLabel Read count
longLabel H3K27Ac and TF ChIP-Seq read count for each medulloblastoma sample
visibility dense
'''
def assay_track():
return '''
track SJSE
compositeTrack on
subGroup1 subgroups SubGroups GROUP3_SE=GROUP3 GROUP4_SE=GROUP4 GROUP3_GROUP4_SE=GROUP3_GROUP4 SHH_SE=SHH WNT_SE=WNT SHH_WNT_SE=SHH_WNT CONSERVED_SE=CONSERVED
dimensions dimX=subgroups
sortOrder subgroups=+
type bigBed
shortLabel Super enhancer
longLabel Super enhancer called from each medulloblastoma subgroup
visibility dense
'''
def ucsc_txt():
return '''\
hub SJEPD
shortLabel MB super enhancer
longLabel Subgroup-specific regulatory landscape of medulloblastoma
genomesFile genomes.txt
email shuoguo.wang@stjude.org
descriptionUrl https://www.stjude.org
'''
def genomes_txt():
return '''\
genome hg19
trackDb hg19/trackDb.txt
''' | [
"shuoguo@gmail.com"
] | shuoguo@gmail.com |
f942e1aeb559fdac152b1e65d28e59acc2f85863 | 7d172bc83bc61768a09cc97746715b8ec0e13ced | /catalog/migrations/0003_saleorder.py | 65f1ca2bf38a92aa7ef0c747114f6b61e4a61de3 | [] | no_license | shivam1111/jjuice | a3bcd7ee0ae6647056bdc62ff000ce6e6af27594 | 6a2669795ed4bb4495fda7869eeb221ed6535582 | refs/heads/master | 2020-04-12T05:01:27.981792 | 2018-11-08T13:00:49 | 2018-11-08T13:00:49 | 81,114,622 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-03-30 07:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0002_s3object'),
]
operations = [
migrations.CreateModel(
name='SaleOrder',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, verbose_name='Name')),
],
options={
'db_table': 'sale_order',
'managed': False,
},
),
]
| [
"shivam1111@gmail.com"
] | shivam1111@gmail.com |
42cab140d610bbd29733bcfc0cd8cb984adb6dc4 | aae74f5d7ae382251c733b65b9e71eebf33b37ef | /1.var.py | 5f388f00c51a929adcf873e4e4420f1563214eb6 | [] | no_license | vladmer27/My-python-lessons- | fc8aab26136e5b7a6b8757dbfa9e3fbff4421003 | 33b1460d60111e80ac9d3f5684cd4fbd05259002 | refs/heads/main | 2023-06-05T09:51:33.912069 | 2021-06-24T11:19:13 | 2021-06-24T11:19:13 | 370,652,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | # переменные
my_var_1 = 100
my_var_2 = 200
c = my_var_1 + my_var_2
#комментирование - ctrl + /
# print(c)
PI = 3.14
#*** Data types ***
#Int - целое float - натуральное число
_int = 100000
_float = 500.001
my_str = "hello 'there'"
# print(my_str)
# print(_float)
# булевы типы данных (boolean, bool)
# true/false
my_bool = True #true = логическая единица, false = логический ноль
# *** способ форматирования под названием "f - строка"
# способ форматирования - конкатинация
z = "Hello"
r = " "
q = ", world!"
s = z + r + q
# print(s)
res = f"the first word: {z} the second word {r} the third word{q}"
# print(res)
# ***Арифметические операции***
var_1 = 10
var_2 = 2
# операция сложения
print(var_1 + var_2)
# операция вычитания
print(var_1 - var_2)
# операция произведения
print(var_1 * var_2)
# операция простого деления(частного).
# Результат будет всегда натуральным числом (float)
# При этом без округления
print(var_1 // var_2)
| [
"vlad_2421996@mail.ru"
] | vlad_2421996@mail.ru |
ce6262c9cb7a4e54943ad60e11cbc087e6f1d08e | 3bad5405291b0caa97b2efb0a422f2bfe5a338ef | /webapp/pastbin_project/pastbin_project/urls.py | 802d31f3f93d4a197d25fac90d567d51bc65ad85 | [] | no_license | cyril7/mygit | 3aca60c204425522c0e01f897a0df90fc90f29ba | b323493bcd9d38aa40f1740bfc6be67dc8fd6eb3 | refs/heads/master | 2023-07-06T17:59:17.711961 | 2023-07-05T10:10:54 | 2023-07-05T10:10:54 | 33,746,822 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'pastbin_project.views.home', name='home'),
# url(r'^pastbin_project/', include('pastbin_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
from django.contrib import admin
admin.autodiscover()
urlpatterns += patterns('',
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
from django.conf.urls.defaults import *
from django.views.generic.list_detail import object_list, object_detail
from django.views.generic.create_update import create_object
from pastebin.models import Paste
display_info = {'queryset': Paste.objects.all()}
create_info = {'model':Paste}
urlpatterns += patterns('',
url(r'^$', object_list, dict(display_info, allow_empty=True)),
url(r'^paste/(?P<object_id>\d+)/$', object_detail, display_info),
url(r'^paste/add/$', create_object, create_info),
)
| [
"ericpenguins@gmail.com"
] | ericpenguins@gmail.com |
6a052f31fa2f030475ba2debea5e5ae8109e2018 | b51395ad5eff9f17ba6f984669e98f2485203788 | /awards/migrations/0001_initial.py | 9986d8edb970e79b743575ee39edaaf342002250 | [] | no_license | Balvine/Awwards | 683642ebc6d930fa1a9f9b87376b82927d777c9e | 02ebe7b330bc8782771f2f617cdbfc39a79d4fde | refs/heads/master | 2023-01-06T19:08:05.438648 | 2020-10-28T09:05:30 | 2020-10-28T09:05:30 | 306,539,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-10-25 15:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('profile_pic', models.ImageField(blank=True, upload_to='images/')),
('bio', models.TextField()),
('contact', models.CharField(blank=True, max_length=30)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"balvy075@gmail.com"
] | balvy075@gmail.com |
6ad010e433f3847a6d478e0b22994fe1007bc71e | 2774cd543667e48ad2f44539ff77bac2e84265ef | /ecell/ui/model_editor/ComplexShape.py | a9cbaddd22851fb517e0a9223184238b80f9ee93 | [] | no_license | ecell/ecell3-model-editor | 279fbe6797224ad319caa66731309923aed4cee5 | 02fcbb7085bd1410dcafcfb980c5ad138795a724 | refs/heads/master | 2021-01-19T06:40:17.464207 | 2008-02-27T09:25:44 | 2008-02-27T09:25:44 | 1,831,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,937 | py | #::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2007 Keio University
# Copyright (C) 2005-2007 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
import os
import gtk
try:
import gnomecanvas
except:
import gnome.canvas as gnomecanvas
from Constants import *
from ResizeableText import *
import Utils as utils
import ecell.util
class ComplexShape:
def __init__( self, anObject, aCanvas, x, y, width, height ):
self.theCanvas = aCanvas
self.parentObject = anObject
self.thePathwayCanvas = anObject.theLayout.getCanvas()
self.graphUtils = self.parentObject.getGraphUtils()
self.x = x
self.y = y
self.width = width
self.height = height
self.shapeMap = {}
self.lastmousex = 0
self.lastmousey = 0
self.imageOrigins = {}
self.shapeLock = {}
self.buttonpressed = False
self.sumdeltax=0
self.sumdeltay=0
self.outlinedrag=False
self.firstdrag=False
self.dragbefore=False
self.outlinedragged=False
self.objectdragged=False
self.shapename=None
self.shift_press=False
def show( self ):
canvasRoot = self.parentObject.theCanvas.getRoot()
self.theRoot = canvasRoot.add(gnomecanvas.CanvasGroup )
anSD = self.parentObject.getProperty( OB_PROP_SHAPE_DESCRIPTOR_LIST )
anSD.reCalculate()
self.shapeDescriptorList = anSD.getDescriptorList()
aDescList = self.__sortByZOrder( self.shapeDescriptorList.values()[:] )
for aDescriptor in aDescList:
if aDescriptor[SD_TYPE] == CV_RECT:
self.createRectangle( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_ELL:
self.createEllipse( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_TEXT:
self.createText( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_LINE:
self.createLine( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_BPATH:
self.createBpath( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_IMG:
self.createImage( aDescriptor )
self.isSelected = False
def delete( self ):
for aShapeName in self.shapeMap.keys():
self.shapeMap[ aShapeName ].destroy()
self.theRoot.destroy()
self.shapeMap = {}
self.imageOrigins = {}
self.shapeLock = {}
def selected( self ):
self.isSelected = True
def unselected( self ):
self.isSelected = False
def outlineColorChanged( self ):
# find shapes with outline color
anRGB = ecell.util.copyValue( self.parentObject.getProperty( OB_PROP_OUTLINE_COLOR ) )
if self.isSelected:
for i in range(0,3):
anRGB[i] = 32768 + anRGB[i]
aColor = self.graphUtils.getGdkColorByRGB( anRGB )
for aDescriptor in self.shapeDescriptorList.values():
if aDescriptor[ SD_COLOR ] == SD_OUTLINE:
if aDescriptor[SD_TYPE] in ( CV_RECT, CV_ELL, CV_BPATH ):
self.changeShapeColor( aDescriptor[ SD_NAME ] , aColor)
elif aDescriptor[SD_TYPE] == CV_LINE:
self.changeLineColor( aDescriptor[ SD_NAME ] , aColor )
elif aDescriptor[ SD_COLOR ] == SD_FILL:
if aDescriptor[SD_TYPE] in ( CV_RECT, CV_ELL, CV_BPATH ):
self.changeShapeColor( aDescriptor[ SD_NAME ] , aColor, True )
def fillColorChanged( self ):
# find shapes with outline color
anRGB = ecell.util.copyValue( self.parentObject.getProperty( OB_PROP_FILL_COLOR ) )
aColor = self.graphUtils.getGdkColorByRGB( anRGB )
for aDescriptor in self.shapeDescriptorList.values():
if aDescriptor[ SD_COLOR ] == SD_FILL:
if aDescriptor[SD_TYPE] == CV_RECT:
self.changeShapeColor( aDescriptor[ SD_NAME ] , aColor )
elif aDescriptor[SD_TYPE] == CV_ELL:
self.changeShapeColor( aDescriptor[ SD_NAME ] , aColor )
elif aDescriptor[SD_TYPE] == CV_LINE:
self.changeLineColor( aDescriptor[ SD_NAME ] , aColor )
elif aDescriptor[SD_TYPE] == CV_BPATH:
self.changeShapeColor( aDescriptor[ SD_NAME ] , aColor )
def labelChanged(self, newLabel):
# self.parentObject.getProperty( OB_PROP_SHAPE_DESCRIPTOR_LIST ).renameLabel(newLabel)
self.shapeDescriptorList = self.parentObject.getProperty( OB_PROP_SHAPE_DESCRIPTOR_LIST ).getDescriptorList()
self.renameText(self.shapeDescriptorList['text'])
self.resize(0,0)
def move( self, deltax , deltay ):
self.theRoot.move(deltax,deltay)
return
def resize( self, deltawidth, deltaheight ):
self.width += deltawidth
self.height += deltaheight
self.parentObject.getProperty(OB_PROP_SHAPE_DESCRIPTOR_LIST).reCalculate()
for aDescriptor in self.shapeDescriptorList.values():
if aDescriptor[SD_TYPE] == CV_RECT:
self.resizeRectangle( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_ELL:
self.resizeEllipse( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_TEXT:
self.resizeText( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_LINE:
self.resizeLine( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_BPATH:
self.resizeBpath( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_IMG:
self.resizeImage( aDescriptor )
def resizeBpath( self, aDescriptor ):
pathDef = aDescriptor[SD_SPECIFIC][SPEC_POINTS]
aBpath = self.shapeMap[ aDescriptor[ SD_NAME ] ]
self.setOutlineWidth( aDescriptor, aBpath )
newPathDef = []
for anArtPath in pathDef:
newArtPath=[ anArtPath[0]]
for i in range(0,(len(anArtPath) - 1 )/2):
x,y = aBpath.w2i(anArtPath[i*2+1], anArtPath[i*2+2] )
newArtPath.extend( [x,y] )
newArtPath = tuple(newArtPath)
newPathDef.append( newArtPath )
aBpath.set_bpath(gnomecanvas.path_def_new( newPathDef ) )
def createBpath( self, aDescriptor ):
pathDef = aDescriptor[SD_SPECIFIC][SPEC_POINTS]
aGdkColor = self.getGdkColor( aDescriptor )
outlineColor = self.getOutlineColor( )
aBpath = self.theRoot.add( gnomecanvas.CanvasBpath,
outline_color_gdk = outlineColor,
fill_color_gdk = aGdkColor )
aBpath.set_bpath(gnomecanvas.path_def_new( pathDef ) )
self.setOutlineWidth( aDescriptor, aBpath )
self.addHandlers( aBpath, aDescriptor[ SD_NAME ] )
self.shapeMap[ aDescriptor[ SD_NAME ] ] = aBpath
def setOutlineWidth( self, aDescriptor, aShape ):
outlineRatio = aDescriptor[SD_SPECIFIC][SPEC_WIDTH_RATIO]
outlineWidth = self.parentObject.getProperty( OB_PROP_OUTLINE_WIDTH )
outlineWidth *= outlineRatio
aShape.set_property( "width-units", outlineWidth )
def createRectangle( self, aDescriptor ):
( X1, Y1, X2, Y2 ) = aDescriptor[SD_SPECIFIC][SPEC_POINTS]
aGdkColor = self.getGdkColor( aDescriptor )
outlineColor = self.getOutlineColor( )
aRect = self.theRoot.add(
gnomecanvas.CanvasRect,
x1 = X1, y1 = Y1, x2 = X2, y2 = Y2,
outline_color_gdk = outlineColor,
fill_color_gdk = aGdkColor )
self.setOutlineWidth( aDescriptor, aRect )
self.addHandlers( aRect, aDescriptor[ SD_NAME ] )
self.shapeMap[ aDescriptor[ SD_NAME ] ] = aRect
def resizeRectangle( self, aDescriptor ):
( X1, Y1, X2, Y2 ) = aDescriptor[SD_SPECIFIC][SPEC_POINTS]
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
(X1, Y1) = aShape.w2i( X1, Y1 )
(X2, Y2) = aShape.w2i( X2, Y2 )
aShape.set_property( 'x1', X1 )
aShape.set_property( 'y1', Y1 )
aShape.set_property( 'x2', X2 )
aShape.set_property( 'y2', Y2 )
def changeShapeColor( self, shapeName, aColor, flag = False ):
aShape = self.shapeMap[ shapeName ]
if flag :
aShape.set_property( 'outline_color_gdk', aColor )
else:
aShape.set_property( 'fill_color_gdk', aColor )
def createEllipse( self, aDescriptor ):
( X1, Y1, X2, Y2 ) = aDescriptor[SD_SPECIFIC][SPEC_POINTS]
aGdkColor = self.getGdkColor( aDescriptor )
outlineColor = self.getOutlineColor( )
anEllipse = self.theRoot.add( gnomecanvas.CanvasEllipse, x1=X1, y1=Y1, x2=X2, y2=Y2, outline_color_gdk = outlineColor, fill_color_gdk = aGdkColor )
self.setOutlineWidth( aDescriptor, anEllipse )
self.addHandlers( anEllipse, aDescriptor[ SD_NAME ] )
self.shapeMap[ aDescriptor[ SD_NAME ] ] = anEllipse
def resizeEllipse( self, aDescriptor ):
( X1, Y1, X2, Y2 ) = aDescriptor[SD_SPECIFIC][SPEC_POINTS]
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
(X1, Y1) = aShape.w2i( X1, Y1 )
(X2, Y2) = aShape.w2i( X2, Y2 )
aShape.set_property( 'x1', X1 )
aShape.set_property( 'y1', Y1 )
aShape.set_property( 'x2', X2 )
aShape.set_property( 'y2', Y2 )
def createLine( self, aDescriptor ):
lineSpec = aDescriptor[SD_SPECIFIC]
( X1, Y1, X2, Y2 ) = aDescriptor[SD_SPECIFIC][SPEC_POINTS]
aGdkColor = self.getGdkColor( aDescriptor )
aLine = self.theRoot.add( gnomecanvas.CanvasLine,points=[X1,Y1,X2,Y2], width_units=lineSpec[ LINE_WIDTH ], fill_color_gdk = aGdkColor )
self.addHandlers( aLine, aDescriptor[ SD_NAME ] )
self.shapeMap[ aDescriptor[ SD_NAME ] ] = aLine
def resizeLine( self, aDescriptor ):
( X1, Y1, X2, Y2 ) = aDescriptor[SD_SPECIFIC][SPEC_POINTS]
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
(X1, Y1) = aShape.w2i( X1, Y1 )
(X2, Y2) = aShape.w2i( X2, Y2 )
aShape.set_property( 'points', [X1,Y1,X2,Y2] )
def changeLineColor ( self, shapeName, aColor ):
aShape = self.shapeMap[ shapeName ]
aclr = aShape.get_property('fill_color_gdk')
aShape.set_property('fill_color_gdk', aColor )
aclr = aShape.get_property('fill_color_gdk')
def createText( self, aDescriptor ):
textSpec = aDescriptor[SD_SPECIFIC]
(X1, Y1) = aDescriptor[SD_SPECIFIC][SPEC_POINTS]
aGdkColor = self.getGdkColor( aDescriptor )
#aText = self.theRoot.add( gnomecanvas.CanvasText,x=X1,y=Y1, fill_color_gdk = aGdkColor, text = textSpec[SPEC_LABEL], anchor #= gtk.ANCHOR_NW )
#parentID=self.parentObject.getProperty(OB_PROP_FULLID)
aText = ResizeableText( self.theRoot, self.theCanvas, X1, Y1, aGdkColor, textSpec[SPEC_LABEL], gtk.ANCHOR_NW )
self.addHandlers( aText, aDescriptor[ SD_NAME ] )
#aText.addHandlers(aDescriptor[ SD_NAME ])
self.shapeMap[ aDescriptor[ SD_NAME ] ] = aText
def resizeText( self, aDescriptor ):
#by default text cannot be resized, it defines size
(x1, y1) = aDescriptor[SD_SPECIFIC][SPEC_POINTS]
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
(x1, y1) = aShape.w2i( x1, y1 )
aShape.set_property( 'x', x1 )
aShape.set_property( 'y', y1 )
def renameText( self, aDescriptor ):
textSpec = aDescriptor[SD_SPECIFIC]
text=textSpec[SPEC_LABEL]
self.shapeMap[ aDescriptor[ SD_NAME ] ].set_property('text', text )
def createImage( self, aDescriptor ):
imgSpec = aDescriptor[SD_SPECIFIC]
filename = SHAPE_PLUGIN_PATH + imgSpec[IMG_FILENAME]
anImage = gtk.Image( )
anImage.set_from_file( filename )
aPixbuf = anImage.get_property("pixbuf")
aWidth = aPixbuf.get_property("width" )
aHeight = aPixbuf.get_property( "height" )
eventBox = gtk.EventBox()
eventBox.add( anImage )
eventBox.set_events( gtk.gdk.POINTER_MOTION_MASK| gtk.gdk.BUTTON_PRESS_MASK| gtk.gdk.BUTTON_RELEASE_MASK |gtk.gdk.ENTER_NOTIFY_MASK )
aName = aDescriptor [SD_NAME]
eventBox.show_all()
eventBox.connect( "motion-notify-event", self.img_event, aName )
eventBox.connect( "button-press-event", self.img_event, aName )
eventBox.connect( "button-release-event", self.img_event, aName )
eventBox.connect("enter-notify-event", self.enter_img )
x1,y1 = imgSpec[SPEC_POINTS]
imgShape = self.theRoot.add( gnomecanvas.CanvasWidget, x=x1, y=y1, width = aWidth, height = aHeight, widget = eventBox )
self.shapeMap[ aDescriptor[ SD_NAME ] ] = imgShape
self.imageOrigins[ aDescriptor[ SD_NAME ] ] = [ x1, y1 ]
self.shapeLock [ aDescriptor[ SD_NAME ] ] = False
def resizeImage( self, aDescriptor ):
(x1, y1) = aDescriptor[SD_SPECIFIC][SPEC_POINTS]
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
(x1, y1) = aShape.w2i( x1, y1 )
aShape.set_property( 'x', x1 )
aShape.set_property( 'y', y1 )
def img_event( self, *args ):
item = args[0]
event = ArtifitialEvent( args[1] )
# groupx, groupy = self.theRoot.get_property("x") , self.theRoot.get_property("y")
# allo = item.get_allocation()
# (a,b,c,d,offsx, offsy)= self.theRoot.i2c_affine( (0,0,0,0,0,0))
# event.x, event.y = groupx - offsx + event.x+allo.x, groupy - offsy + event.y+allo.y
shapeName = args[2]
shape = self.shapeMap[ shapeName ]
origx, origy = self.imageOrigins[ shapeName ]
zerox, zeroy = shape.w2i( 0,0 )
relx, rely = shape.w2i( event.x, event.y )
pixx, pixxy, flags =self.theCanvas.getCanvas().bin_window.get_pointer( )
worldx, worldy = self.theCanvas.getCanvas().window_to_world( pixx,pixxy)
event.x = worldx
event.y = worldy
self.rect_event( item, event, shapeName )
def enter_img( self, *args ):
self.thePathwayCanvas.beyondcanvas = False
def getGdkColor( self, aDescriptor ):
aColorType = aDescriptor[ SD_COLOR ]
if aColorType == SD_FILL:
queryProp = OB_PROP_FILL_COLOR
elif aColorType == SD_OUTLINE:
queryProp = OB_PROP_OUTLINE_COLOR
elif aColorType == SD_TEXT:
queryProp = OB_PROP_TEXT_COLOR
anRGBColor = self.parentObject.getProperty( queryProp )
return self.graphUtils.getGdkColorByRGB( anRGBColor )
def getOutlineColor( self ):
anRGBColor = self.parentObject.getProperty( OB_PROP_OUTLINE_COLOR )
return self.graphUtils.getGdkColorByRGB( anRGBColor )
def __sortByZOrder ( self, desclist ):
fn = lambda x, y: ( x[SD_Z] < y[SD_Z] ) - ( y[SD_Z] < x[SD_Z] )
desclist.sort(fn)
return desclist
def leftClick( self, shapeName, x, y):
# usually select
if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_SYSTEM_CANVAS:
self.parentObject.addItem( x, y )
else:
self.parentObject.doSelect()
if self.getShapeDescriptor(shapeName)[SD_FUNCTION] in [ SD_FILL, SD_RING ]:
self.changeCursor( shapeName, x, y, True )
def rightClick ( self, shapeName, x, y, anEvent, shift=False ):
# usually show menu
if not self.parentObject.isSelected:
self.parentObject.doSelect( shift )
self.parentObject.showMenu(anEvent,x, y)
def SHIFT_leftClick ( self, shapeName, x, y):
self.parentObject.doSelect( True )
def mouseDrag( self, shapeName, deltax, deltay, origx, origy ):
# decide whether resize or move or draw arrow
if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_OUTLINE:
if not self.firstdrag and not self.dragbefore:
self.outlinedragged=True
self.firstdrag=True
self.parentObject.outlineDragged( deltax, deltay, origx, origy )
elif self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_RING:
self.parentObject.ringDragged( shapeName, deltax, deltay )
elif self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_FILL:
if not self.firstdrag and not self.dragbefore:
self.firstdrag=True
self.objectdragged=True
self.orgx=origx
self.orgy=origy
self.shapename=shapeName
self.parentObject.objectDragged( deltax, deltay )
self.setCursor(CU_MOVE)
def doubleClick( self, shapeName ):
self.parentObject.popupEditor()
def getShapeDescriptor( self, shapeName ):
return self.parentObject.getProperty( OB_PROP_SHAPE_DESCRIPTOR_LIST ).getDescriptor( shapeName )
def addHandlers( self, canvasObject, aName ):
#canvasObject.connect('event', self.rect_event, aName )
canvasObject.connect('event', self.rect_event, aName )
def releaseButton( self, shapeName, x, y ):
self.changeCursor( shapeName, x, y, False )
self.parentObject.buttonReleased()
def mouseEntered( self, shapeName, x, y ):
self.changeCursor( shapeName, x, y )
def changeCursor( self, shapeName, x, y, buttonpressed = False):
aFunction = self.getShapeDescriptor(shapeName)[SD_FUNCTION]
aCursorType = self.parentObject.getCursorType( aFunction, x, y , buttonpressed)
self.theCanvas.setCursor( aCursorType )
def setCursor( self, aCursorType):
self.theCanvas.setCursor( aCursorType )
def getFirstDrag(self):
return self.firstdrag
def setFirstDrag(self,aValue):
self.firstdrag=aValue
def getDragBefore(self):
return self.dragbefore
def setDragBefore(self,aValue):
self.dragbefore=aValue
def getIsButtonPressed(self):
return self.buttonpressed
def getOutlineDragged(self):
return self.outlinedragged
def getObjectDragged(self):
return self.objectdragged
def rect_event( self, *args ):
event = args[1]
item = args[0]
shapeName = args[2]
if event.type == gtk.gdk.BUTTON_PRESS:
if event.button == 1:
self.lastmousex = event.x
self.lastmousey = event.y
self.buttonpressed = True
if event.state>k.gdk.SHIFT_MASK == gtk.gdk.SHIFT_MASK:
self.shift_press = True
self.SHIFT_leftClick( shapeName, event.x, event.y)
else:
self.shift_press = False
self.leftClick( shapeName, event.x, event.y )
elif event.button == 3:
if event.state>k.gdk.SHIFT_MASK == gtk.gdk.SHIFT_MASK:
self.shift_press = True
else:
self.shift_press = False
self.rightClick(shapeName, event.x, event.y, event, self.shift_press )
elif event.type == gtk.gdk.BUTTON_RELEASE:
if event.button == 1:
self.buttonpressed = False
if self.dragbefore:
self.dragbefore=False
if self.objectdragged:
self.parentObject.objectDragged( 0,0 )
self.objectdragged=False
if self.outlinedragged:
self.parentObject.outlineDragged( 0, 0, 0, 0 )
self.outlinedragged=False
self.releaseButton(shapeName, event.x, event.y )
elif event.type == gtk.gdk.MOTION_NOTIFY:
if not self.buttonpressed:
return
if (event.state>k.gdk.BUTTON1_MASK)==0:
return
oldx = self.lastmousex
oldy = self.lastmousey
deltax = event.x - oldx
deltay = event.y - oldy
self.lastmousex = event.x
self.lastmousey = event.y
if not self.parentObject.theLayout.getCanvas().getRecentScroll():
# if there was a scroll, event.x, event.y gets a stupid value
self.mouseDrag( shapeName, deltax, deltay, oldx, oldy )
elif event.type == gtk.gdk._2BUTTON_PRESS:
if event.button == 1:
self.doubleClick( shapeName )
elif event.type == gtk.gdk.ENTER_NOTIFY:
self.mouseEntered( shapeName, event.x, event.y )
class ArtifitialEvent:
def __init__( self, anEvent ):
self.type = anEvent.type
self.x = anEvent.x
self.y = anEvent.y
self.state = anEvent.state
self.time = anEvent.time
if anEvent.type not in [ gtk.gdk.ENTER_NOTIFY, gtk.gdk.MOTION_NOTIFY ]:
self.button = anEvent.button
| [
"moriyoshi@f1531174-cb10-0410-9fe6-89aa7ac3eedb"
] | moriyoshi@f1531174-cb10-0410-9fe6-89aa7ac3eedb |
09dec256d7e185853e651ea7cb426c2f622660c8 | e24cc8a99ae614c16da3e0486cea3af59bbe1f62 | /python/program_020.py | ac27c183bd32d41151e7d20ca210f253a1ff1714 | [] | no_license | JamesonSherman/Python_Repository | 197d9ba3b2f59eb3613029c057402c50481c000d | fad8cd7cdc3f5f4f064ef27bbc75d97befff9377 | refs/heads/master | 2020-04-06T11:57:07.653370 | 2018-11-13T19:51:10 | 2018-11-13T19:51:10 | 157,437,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | #####################################################
### Name: James Sherman
### Student ID: 900114
### Assignment: Problem Set 2
### Program Name: program_20.py
### Due Date: October 11, 2018 1:15pm
#####################################################
list1 = [5, 38, 9, 45, 3, 38, 66, 83, 9, 5, 7, 98, 3]
uniqueList = []
for i in range(0,len(list1), +1):
if (list1[i] not in uniqueList):
uniqueList.append(list1[i])
print("$ python remove_dups_from_list_v0.py")
print("list =", list1)
print("unique_list =", uniqueList)
| [
"noreply@github.com"
] | noreply@github.com |
370bdedfa4af55d99c1d4c1db116c26d97c39037 | aea96aa406250c3a2a8f2799e6cbbad256c262c3 | /EG/reduce_str_float.py | f08662bdfd724cd99ca78245308d53339a3013ec | [] | no_license | xiaochuanjiejie/python_exercise | cb0ffaa4b7c961c8ca9847526c84ee6ba261620c | 710fa85fd2d7a17994081bdc5f8b5ff66b77416e | refs/heads/master | 2021-01-21T16:18:04.640093 | 2017-08-11T10:02:49 | 2017-08-11T10:02:49 | 95,403,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | #coding: utf-8
import math
from functools import reduce
s = '1234.5678'
index = s.index('.')
n = len(s) - 1 - index
s = s.replace('.','')
print s
def chr2num(s):
return {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9}[s]
print map(chr2num,s)
lst = map(chr2num,s)
# lst = list(map(chr2num,s))
# print lst
def cal(x,y):
return x * 10 + y
number = reduce(cal,lst)
print number
floatx = number / math.pow(10,n)
print floatx | [
"xiaochuanjiejie@gmail.com"
] | xiaochuanjiejie@gmail.com |
4dc8b1cb224998e9deef88383369e47bde61a18b | 315cec5ad7f72f99a22a5c22b0ea459413a9ca3f | /py_intro/lesson03/task06.py | ee822bc59826cbdecd084f7989b31b40abb8e7c0 | [] | no_license | azhuma/geek | 53e7e8930c031d6da0193112bee8742493759d13 | 1b3ce4eac193421b32b3a7d88623c1fd756bd7fa | refs/heads/master | 2021-01-15T00:53:17.459018 | 2020-11-19T11:45:58 | 2020-11-19T11:45:58 | 242,819,613 | 0 | 0 | null | 2020-11-19T11:48:34 | 2020-02-24T19:07:50 | Jupyter Notebook | UTF-8 | Python | false | false | 251 | py | def int_func(str):
return str.title()
def process(str):
wl = str.split()
for i in range(len(wl)):
wl[i] = int_func(wl[i])
return " ".join(wl)
print(f"result={int_func('text')}")
print(f"result={process('this is my text')}")
| [
"akylbek.zhumabayev@processing.kz"
] | akylbek.zhumabayev@processing.kz |
da7bc737ca8c812ae6b9e3231177c6e3ef5354f9 | 89d236554b6d424f1864c7dde987f7b9619ea252 | /src/online_triplet_loss.py | 4405f69f41fc60212936c3f003680108e5385ea2 | [
"MIT"
] | permissive | gvravi/FaceNet_TF-2.0 | 56bda61f4ba24be5c85e6a6196d2d13e96fc7b7a | 2fe54ed53af46c89b86ae347511b4c758c2f988c | refs/heads/master | 2022-11-16T18:14:54.563864 | 2020-07-17T10:01:09 | 2020-07-17T10:01:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,683 | py | import tensorflow as tf
class Online_Mining(tf.keras.layers.Layer):
def __init__(self,margin = 0.6):
super(Online_Mining, self).__init__()
self.margin = margin
def call(self, total_input):
self.inputs,y_inputs = total_input
self.y_inputs = tf.reshape(y_inputs,[y_inputs.shape[0],])
# calculates |a|^2 , |b|^2
dot_product = tf.matmul(self.inputs, tf.transpose(self.inputs))
square_norm = tf.linalg.diag_part(dot_product)
# computes pairwise_dist where index (x,y) is the distance between x and y image in data
# |a-b|^2 = |a|^2 + |b|^2 - 2 <a,b>
# For more info visit 'https://omoindrot.github.io/triplet-loss'
distances = tf.expand_dims(square_norm, 0) - 2.0 * dot_product + tf.expand_dims(square_norm, 1)
pairwise_dist = tf.maximum(distances,0.0)
# creates a mask and zeros out the invalid anchor_positive pairs
mask_anchor_positive = tf.cast(positive_anchor_mask(self.y_inputs),tf.float32)
anchor_positive_dist = tf.multiply(pairwise_dist,mask_anchor_positive)
hardest_positive_dist = tf.reduce_max(anchor_positive_dist, axis=1, keepdims=True)
# creates a mask and zeros out the invalid anchor_negative pairs
mask_anchor_negative = tf.cast(negative_anchor_mask(self.y_inputs),tf.float32)
anchor_negative_dist_max = tf.reduce_max(pairwise_dist,axis = 1,keepdims=True)
anchor_negative_dist = pairwise_dist + tf.multiply(anchor_negative_dist_max,(1.0 - mask_anchor_negative))
hardest_negative_dist = tf.reduce_min(anchor_negative_dist,axis = 1, keepdims = True )
# averages over all the hard valid triplets in the batch
loss = tf.maximum(hardest_positive_dist - hardest_negative_dist + self.margin, 0.0)
loss = tf.reduce_mean(loss)
return loss
def positive_anchor_mask(y_inputs):
'''
Returns a mask of positive anchor pairs i.e, True if anchor != positive and labels(anchor) == labels(positive) else False
'''
equals_positive = tf.equal(tf.expand_dims(y_inputs,0),tf.expand_dims(y_inputs,1))
indices_not_equals = tf.logical_not(tf.cast(tf.eye(tf.shape(y_inputs)[0]),tf.bool))
anchor_positive_mask = tf.logical_and(equals_positive,indices_not_equals)
return anchor_positive_mask
def negative_anchor_mask(y_inputs):
'''
Returns a mask of valid negative anchor pairs i.e, True if anchor != negative and labels(anchor) != labels(negative) else False
'''
negative_anchor_mask = tf.logical_not(tf.equal(tf.expand_dims(y_inputs,0),tf.expand_dims(y_inputs,1)))
return negative_anchor_mask
| [
"noreply@github.com"
] | noreply@github.com |
50fa25dc930404d0d7198078e8b171b750953f5c | ca5c5becd8b57b4d77af3aa776b3c478ca962bf0 | /src/main/ODR_MC/branches/v3/upFun.py | 28e5d2a45a215fc146e7f361050eb44ad40d186e | [] | no_license | gerritholl/Harmonisation | 42118c46d093115ddd87fca094a9ac8881aede71 | 31b8bd5a0da8c6fc4a31453cf7801fcca25d4951 | refs/heads/master | 2021-09-07T03:07:03.843117 | 2017-12-18T13:57:38 | 2017-12-18T13:57:38 | 110,711,229 | 0 | 0 | null | 2017-11-14T15:53:19 | 2017-11-14T15:53:19 | null | UTF-8 | Python | false | false | 4,966 | py | """ FIDUCEO FCDR harmonisation
Author: Arta Dilo / NPL MM
Date created: 09-01-2017
Last update: 17-01-2017
Functions for propagating uncertainty to the calibrated radiance:
- function to calculate first derivatives to measurement eq. variables,
- and first derivatives to calibration coefficients;
- function for uncertainty propagation using GUM.
"""
import numpy as np
class avhrr(object):
''' The class contains a function for the measurement equation and functions
for calculating sensitivity coefficients to variables and parameters in the
measurement equation. '''
def __init__(self, nop, nos):
self.slabel = 'avhrr' # series label
self.nopairs = nop # number of sensor pairs in the series
self.nosensors = nos # number of sensors in the series
# set manually number of meas. eq. parameters; will change if needed
self.nocoefs = 4 # number of calibration coefficients
self.novars = 5 # number of meas. eq. variables
# AVHRR measurement equation
def measEq(self, X, a):
# add checks for number of calib. coefficients and variables
a0 = a[0] # AVHRR model coefficients
a1 = a[1]
a2 = a[2]
a3 = a[3]
CE = X[:,2] # Earth counts
Cs = X[:,0] # space counts
Cict = X[:,1] # ICT counts
Lict = X[:,3] # ICT radiance
To = X[:,4] # orbit temperature
# Earth radiance from Earth counts and calibration data
LE = a0 + (0.98514+a1)*Lict*(Cs-CE)/(Cs-Cict) + a2*(Cict-CE)*(Cs-CE)
LE += a3*To
return LE # return Earth radiance
''' Partial derivatives to measurement equation variables and coefficients;
these form the Jacobian row(s) for the LS in a pair sensor-reference. '''
def sensCoeff(self, X, a):
p = self.nocoefs # number of calibration coefficients
m = self.novars # number of harmonisation variables
a1 = a[1] # AVHRR model coefficients
a2 = a[2]
a3 = a[3]
CE = X[:,2] # Earth counts
Cs = X[:,0] # space counts
Cict = X[:,1] # ICT counts
Lict = X[:,3] # ICT radiance
To = X[:,4] # orbit temperature
# initialize array of sensitivity coefficients per data row
sens = np.zeros((CE.shape[0], p+m)) # should check it is 9
# partial derivatives to calibration coefficients
sens[:,0] = 1. # dLE / da0
sens[:,1] = Lict * (Cs - CE) / (Cs - Cict) # dLE / da1
sens[:,2] = (Cict - CE) * (Cs - CE) # dLE / da2
sens[:,3] = To # dLE / da3
# partial derivatives to meas.eq. variables
sens[:,4] = (0.98514+a1)*Lict*(CE-Cict)/(Cs-Cict)**2 + a2*(Cict-CE) # dLE/dCs
sens[:,5] = (0.98514+a1)*Lict*(Cs-CE)/(Cs-Cict)**2 + a2*(Cs-CE) # dLE/dCict
sens[:,6] = (0.98514+a1)*Lict/(Cict-Cs) + a2*(2*CE-Cs-Cict) # dLE/dCE
sens[:,7] = (0.98514+a1) * (Cs-CE) / (Cs-Cict) # dLE/dLict
sens[:,8] = a3 # dLE/dTo
return sens
''' Evaluate Earth radiance uncertainty from coefficients uncertainty '''
def va2ULE(self, X, a, Va):
p = self.nocoefs # number of calibration coefficients
sens = self.sensCoeff(X, a) # sensitivity coeffs for matchup obs.
# compute uncertainty from calibration coefficients
u2La = np.dot(sens[:, 0:p]**2, np.diag(Va)) # coeffs. variance component
corU = np.zeros((X[:,0].shape[0]))
for i in range(p-1):
for j in range(i+1,p):
corU[:] += 2 * sens[:,i] * sens[:,j] * Va[i,j]
u2La += corU # add coeffs' correlation component
return np.sqrt(u2La) # return radiance uncert. from coeffs uncertainty
''' Evaluate Earth radiance uncertainty via GUM law of propagation '''
def uncLE(self, X, a, uX, Va):
# assumes no correlation between X variables
p = self.nocoefs # number of calibration coefficients
m = self.novars # number of harmonisation variables
sens = self.sensCoeff(X, a) # sensitivity coeffs for matchup obs.
u2La = self.va2ULE(X, a, Va)**2 # uncertainty from calib. coefficients
# evaluate uncertainty from harmonisation data variables
u2LX = np.einsum('ij,ij->i', sens[:, p:p+m]**2, uX**2)
u2L = u2La + u2LX # total squared uncertainty of radiance
print "Ratio of coeffs' uncertainty component to total radiance uncertainty:"
print min(np.sqrt(u2La/u2L)), '-', max(np.sqrt(u2La/u2L))
return np.sqrt(u2L) # return uncertainty of Earth radiance
| [
"seh2@eoserver.npl.co.uk"
] | seh2@eoserver.npl.co.uk |
a4a41c511ad4e482fe95c4a61ab6d49518ec4964 | 2a3606551a4d850a7b4d6a4e08089c51108ef7be | /plugin.video.fanfilm/resources/lib/libraries/cleangenre.py | 855f071415fcee23e4105c7f8e444d07c12b232c | [
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | rrosajp/filmkodi | a6bb1823f4ed45453c8b8e54ffbd6a7b49f44450 | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | refs/heads/master | 2021-09-18T06:03:17.561062 | 2018-06-22T23:28:53 | 2018-06-22T23:28:53 | 234,768,781 | 1 | 0 | Apache-2.0 | 2021-06-03T20:33:07 | 2020-01-18T17:11:57 | null | UTF-8 | Python | false | false | 42,826 | py | # -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2016 mrknow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
def lang(i, lang):
if lang == 'bg':
i = i.replace('Action', u'\u0415\u043a\u0448\u044a\u043d')
i = i.replace('Adventure', u'\u041f\u0440\u0438\u043a\u043b\u044e\u0447\u0435\u043d\u0438\u0435')
i = i.replace('Animation', u'\u0410\u043d\u0438\u043c\u0430\u0446\u0438\u044f')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u041a\u043e\u043c\u0435\u0434\u0438\u044f')
i = i.replace('Crime', u'\u041a\u0440\u0438\u043c\u0438\u043d\u0430\u043b\u0435\u043d')
i = i.replace('Documentary', u'\u0414\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430\u043b\u0435\u043d')
i = i.replace('Drama', u'\u0414\u0440\u0430\u043c\u0430')
i = i.replace('Family', u'\u0421\u0435\u043c\u0435\u0435\u043d')
i = i.replace('Fantasy', u'\u0424\u0435\u043d\u0442\u044a\u0437\u0438')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0418\u0441\u0442\u043e\u0440\u0438\u0447\u0435\u0441\u043a\u0438')
i = i.replace('Horror', u'\u0423\u0436\u0430\u0441')
i = i.replace('Music ', u'\u041c\u0443\u0437\u0438\u043a\u0430')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u041c\u0438\u0441\u0442\u0435\u0440\u0438\u044f')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0420\u043e\u043c\u0430\u043d\u0441')
i = i.replace('Science Fiction', u'\u041d\u0430\u0443\u0447\u043d\u0430\u002d\u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Sci-Fi', u'\u041d\u0430\u0443\u0447\u043d\u0430\u002d\u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0422\u0440\u0438\u043b\u044a\u0440')
i = i.replace('War', u'\u0412\u043e\u0435\u043d\u0435\u043d')
i = i.replace('Western', u'\u0423\u0435\u0441\u0442\u044a\u0440\u043d')
elif lang == 'cs':
i = i.replace('Action', u'\u0041\u006b\u010d\u006e\u00ed')
i = i.replace('Adventure', u'\u0044\u006f\u0062\u0072\u006f\u0064\u0072\u0075\u017e\u006e\u00fd')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u006f\u0076\u0061\u006e\u00fd')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069\u0065')
i = i.replace('Crime', u'\u004b\u0072\u0069\u006d\u0069')
i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u00e1\u0072\u006e\u00ed')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0052\u006f\u0064\u0069\u006e\u006e\u00fd')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0063\u006b\u00fd')
i = i.replace('Horror', u'\u0048\u006f\u0072\u006f\u0072')
i = i.replace('Music ', u'\u0048\u0075\u0064\u0065\u0062\u006e\u00ed')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0065\u0072\u0069\u00f3\u007a\u006e\u00ed')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u0063\u006b\u00fd')
i = i.replace('Science Fiction', u'\u0056\u011b\u0064\u0065\u0063\u006b\u006f\u0066\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u0063\u006b\u00fd')
i = i.replace('Sci-Fi', u'\u0056\u011b\u0064\u0065\u0063\u006b\u006f\u0066\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u0063\u006b\u00fd')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0056\u00e1\u006c\u0065\u010d\u006e\u00fd')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'da':
i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Adventure', u'\u0045\u0076\u0065\u006e\u0074\u0079\u0072')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0074\u0069\u006f\u006e')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069\u0065')
i = i.replace('Crime', u'\u004b\u0072\u0069\u006d\u0069\u006e\u0061\u006c\u0069\u0074\u0065\u0074')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0072\u0079')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0065')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0065 ')
i = i.replace('Horror', u'\u0047\u0079\u0073\u0065\u0072')
i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u006b')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0065\u0072\u0069\u0075\u006d')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u002d\u0066\u0069')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u002d\u0066\u0069')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u004b\u0072\u0069\u0067')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'de':
i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Adventure', u'\u0041\u0062\u0065\u006e\u0074\u0065\u0075\u0065\u0072')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0074\u0069\u006f\u006e')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u00f6\u0064\u0069\u0065')
i = i.replace('Crime', u'\u004b\u0072\u0069\u006d\u0069')
i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u0061\u0072\u0066\u0069\u006c\u006d')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0065')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0065')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u006b')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0065\u0072\u0079')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u004c\u006f\u0076\u0065\u0073\u0074\u006f\u0072\u0079')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065 \u0046\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065 \u0046\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u004b\u0072\u0069\u0065\u0067\u0073\u0066\u0069\u006c\u006d')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'el':
i = i.replace('Action', u'\u0394\u03c1\u03ac\u03c3\u03b7')
i = i.replace('Adventure', u'\u03a0\u03b5\u03c1\u03b9\u03c0\u03ad\u03c4\u03b5\u03b9\u03b1')
i = i.replace('Animation', u'\u039a\u03b9\u03bd\u03bf\u03cd\u03bc\u03b5\u03bd\u03b1 \u03a3\u03c7\u03ad\u03b4\u03b9\u03b1')
i = i.replace('Biography', u'\u0392\u03b9\u03bf\u03b3\u03c1\u03b1\u03c6\u03b9\u03ba\u03ae')
i = i.replace('Comedy', u'\u039a\u03c9\u03bc\u03c9\u03b4\u03af\u03b1')
i = i.replace('Crime', u'\u0391\u03c3\u03c4\u03c5\u03bd\u03bf\u03bc\u03b9\u03ba\u03ae')
i = i.replace('Documentary', u'\u039d\u03c4\u03bf\u03ba\u03c5\u03bc\u03b1\u03bd\u03c4\u03ad\u03c1')
i = i.replace('Drama', u'\u0394\u03c1\u03ac\u03bc\u03b1')
i = i.replace('Family', u'\u039f\u03b9\u03ba\u03bf\u03b3\u03b5\u03bd\u03b5\u03b9\u03b1\u03ba\u03ae')
i = i.replace('Fantasy', u'\u03a6\u03b1\u03bd\u03c4\u03b1\u03c3\u03af\u03b1\u03c2')
i = i.replace('Game-Show', u'\u03a4\u03b7\u03bb\u03b5\u03c0\u03b1\u03b9\u03c7\u03bd\u03af\u03b4\u03b9')
i = i.replace('History', u'\u0399\u03c3\u03c4\u03bf\u03c1\u03b9\u03ba\u03ae')
i = i.replace('Horror', u'\u03a4\u03c1\u03cc\u03bc\u03bf\u03c5')
i = i.replace('Music ', u'\u039c\u03bf\u03c5\u03c3\u03b9\u03ba\u03ae')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u039c\u03c5\u03c3\u03c4\u03b7\u03c1\u03af\u03bf\u03c5')
i = i.replace('News', u'\u0395\u03b9\u03b4\u03ae\u03c3\u03b5\u03b9\u03c2')
i = i.replace('Reality-TV', u'\u03a1\u03b9\u03ac\u03bb\u03b9\u03c4\u03c5')
i = i.replace('Romance', u'\u03a1\u03bf\u03bc\u03b1\u03bd\u03c4\u03b9\u03ba\u03ae')
i = i.replace('Science Fiction', u'\u0395\u03c0\u002e \u03a6\u03b1\u03bd\u03c4\u03b1\u03c3\u03af\u03b1\u03c2')
i = i.replace('Sci-Fi', u'\u0395\u03c0\u002e \u03a6\u03b1\u03bd\u03c4\u03b1\u03c3\u03af\u03b1\u03c2')
i = i.replace('Sport', u'\u0391\u03b8\u03bb\u03b7\u03c4\u03b9\u03ba\u03ae')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0398\u03c1\u03af\u03bb\u03b5\u03c1')
i = i.replace('War', u'\u03a0\u03bf\u03bb\u03b5\u03bc\u03b9\u03ba\u03ae')
i = i.replace('Western', u'\u0393\u03bf\u03c5\u03ad\u03c3\u03c4\u03b5\u03c1\u03bd')
elif lang == 'es':
i = i.replace('Action', u'\u0041\u0063\u0063\u0069\u00f3\u006e')
i = i.replace('Adventure', u'\u0041\u0076\u0065\u006e\u0074\u0075\u0072\u0061')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0063\u0069\u00f3\u006e')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u0043\u006f\u006d\u0065\u0064\u0069\u0061')
i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0065\u006e')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u006c')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0061')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u00ed\u0061')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0061')
i = i.replace('Horror', u'\u0054\u0065\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u00fa\u0073\u0069\u0063\u0061')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u0065\u0072\u0069\u006f')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0063\u0065')
i = i.replace('Science Fiction', u'\u0043\u0069\u0065\u006e\u0063\u0069\u0061 \u0066\u0069\u0063\u0063\u0069\u00f3\u006e')
i = i.replace('Sci-Fi', u'\u0043\u0069\u0065\u006e\u0063\u0069\u0061 \u0066\u0069\u0063\u0063\u0069\u00f3\u006e')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0053\u0075\u0073\u0070\u0065\u006e\u0073\u0065')
i = i.replace('War', u'\u0047\u0075\u0065\u0072\u0072\u0061')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'fr':
i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Adventure', u'\u0041\u0076\u0065\u006e\u0074\u0075\u0072\u0065')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0074\u0069\u006f\u006e')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u0043\u006f\u006d\u00e9\u0064\u0069\u0065')
i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0065')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0069\u0072\u0065')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0065')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0061\u006c')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u0071\u0075\u0065')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0069\u0072\u0065')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u0065\u0075\u0072')
i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u0071\u0075\u0065')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u00e8\u0072\u0065')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0063\u0065')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065\u002d\u0046\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065\u002d\u0046\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0047\u0075\u0065\u0072\u0072\u0065')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'he':
i = i.replace('Action', u'\u05d0\u05e7\u05e9\u05df')
i = i.replace('Adventure', u'\u05d4\u05e8\u05e4\u05ea\u05e7\u05d0\u05d5\u05ea')
i = i.replace('Animation', u'\u05d0\u05e0\u05d9\u05de\u05e6\u05d9\u05d4')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u05e7\u05d5\u05de\u05d3\u05d9\u05d4')
i = i.replace('Crime', u'\u05e4\u05e9\u05e2')
i = i.replace('Documentary', u'\u05d3\u05d5\u05e7\u05d5\u05de\u05e0\u05d8\u05e8\u05d9')
i = i.replace('Drama', u'\u05d3\u05e8\u05de\u05d4')
i = i.replace('Family', u'\u05de\u05e9\u05e4\u05d7\u05d4')
i = i.replace('Fantasy', u'\u05e4\u05e0\u05d8\u05d6\u05d9\u05d4')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u05d4\u05e1\u05d8\u05d5\u05e8\u05d9\u05d4')
i = i.replace('Horror', u'\u05d0\u05d9\u05de\u05d4')
i = i.replace('Music ', u'\u05de\u05d5\u05e1\u05d9\u05e7\u05d4')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u05de\u05e1\u05ea\u05d5\u05e8\u05d9\u05df')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u05e8\u05d5\u05de\u05e0\u05d8\u05d9')
i = i.replace('Science Fiction', u'\u05de\u05d3\u05e2 \u05d1\u05d3\u05d9\u05d5\u05e0\u05d9')
i = i.replace('Sci-Fi', u'\u05de\u05d3\u05e2 \u05d1\u05d3\u05d9\u05d5\u05e0\u05d9')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u05de\u05d5\u05ea\u05d7\u05df')
i = i.replace('War', u'\u05de\u05dc\u05d7\u05de\u05d4')
i = i.replace('Western', u'\u05de\u05e2\u05e8\u05d1\u05d5\u05df')
elif lang == 'hu':
i = i.replace('Action', u'\u0041\u006b\u0063\u0069\u00f3')
i = i.replace('Adventure', u'\u004b\u0061\u006c\u0061\u006e\u0064')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u00e1\u0063\u0069\u00f3\u0073')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u0056\u00ed\u0067\u006a\u00e1\u0074\u00e9\u006b')
i = i.replace('Crime', u'\u0042\u0171\u006e\u00fc\u0067\u0079\u0069')
i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u0075\u006d')
i = i.replace('Drama', u'\u0044\u0072\u00e1\u006d\u0061')
i = i.replace('Family', u'\u0043\u0073\u0061\u006c\u00e1\u0064\u0069')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0054\u00f6\u0072\u0074\u00e9\u006e\u0065\u006c\u006d\u0069')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u005a\u0065\u006e\u0065\u0069')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u0052\u0065\u006a\u0074\u00e9\u006c\u0079')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b\u0075\u0073')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u002d\u0046\u0069')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u002d\u0046\u0069')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0048\u00e1\u0062\u006f\u0072\u00fa\u0073')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'it':
i = i.replace('Action', u'\u0041\u007a\u0069\u006f\u006e\u0065')
i = i.replace('Adventure', u'\u0041\u0076\u0076\u0065\u006e\u0074\u0075\u0072\u0061')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u007a\u0069\u006f\u006e\u0065')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u0043\u006f\u006d\u006d\u0065\u0064\u0069\u0061')
i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0065')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0072\u0069\u006f')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u0067\u006c\u0069\u0061')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0053\u0074\u006f\u0072\u0069\u0061')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u0063\u0061')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u0065\u0072\u006f')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0063\u0065')
i = i.replace('Science Fiction', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0063\u0069\u0065\u006e\u007a\u0061')
i = i.replace('Sci-Fi', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0063\u0069\u0065\u006e\u007a\u0061')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0047\u0075\u0065\u0072\u0072\u0061')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'ja':
i = i.replace('Action', u'\u30a2\u30af\u30b7\u30e7\u30f3')
i = i.replace('Adventure', u'\u30a2\u30c9\u30d9\u30f3\u30c1\u30e3\u30fc')
i = i.replace('Animation', u'\u30a2\u30cb\u30e1\u30fc\u30b7\u30e7\u30f3')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u30b3\u30e1\u30c7\u30a3')
i = i.replace('Crime', u'\u72af\u7f6a')
i = i.replace('Documentary', u'\u30c9\u30ad\u30e5\u30e1\u30f3\u30bf\u30ea\u30fc')
i = i.replace('Drama', u'\u30c9\u30e9\u30de')
i = i.replace('Family', u'\u30d5\u30a1\u30df\u30ea\u30fc')
i = i.replace('Fantasy', u'\u30d5\u30a1\u30f3\u30bf\u30b8\u30fc')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u5c65\u6b74')
i = i.replace('Horror', u'\u30db\u30e9\u30fc')
i = i.replace('Music ', u'\u97f3\u697d')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u8b0e')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u30ed\u30de\u30f3\u30b9')
i = i.replace('Science Fiction', u'\u30b5\u30a4\u30a8\u30f3\u30b9\u30d5\u30a3\u30af\u30b7\u30e7\u30f3')
i = i.replace('Sci-Fi', u'\u30b5\u30a4\u30a8\u30f3\u30b9\u30d5\u30a3\u30af\u30b7\u30e7\u30f3')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u30b9\u30ea\u30e9\u30fc')
i = i.replace('War', u'\u6226\u4e89')
i = i.replace('Western', u'\u897f\u6d0b')
elif lang == 'ko':
i = i.replace('Action', u'\uc561\uc158')
i = i.replace('Adventure', u'\ubaa8\ud5d8')
i = i.replace('Animation', u'\uc560\ub2c8\uba54\uc774\uc158')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\ucf54\ubbf8\ub514')
i = i.replace('Crime', u'\ubc94\uc8c4')
i = i.replace('Documentary', u'\ub2e4\ud050\uba58\ud130\ub9ac')
i = i.replace('Drama', u'\ub4dc\ub77c\ub9c8')
i = i.replace('Family', u'\uac00\uc871')
i = i.replace('Fantasy', u'\ud310\ud0c0\uc9c0')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\uc5ed\uc0ac')
i = i.replace('Horror', u'\uacf5\ud3ec')
i = i.replace('Music ', u'\uc74c\uc545')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\ubbf8\uc2a4\ud130\ub9ac')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\ub85c\ub9e8\uc2a4')
i = i.replace('Science Fiction', u'\u0053\u0046')
i = i.replace('Sci-Fi', u'\u0053\u0046')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\uc2a4\ub9b4\ub7ec')
i = i.replace('War', u'\uc804\uc7c1')
i = i.replace('Western', u'\uc11c\ubd80')
elif lang == 'nl':
i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u0065')
i = i.replace('Adventure', u'\u0041\u0076\u006f\u006e\u0074\u0075\u0075\u0072')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0074\u0069\u0065')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069\u0065')
i = i.replace('Crime', u'\u004d\u0069\u0073\u0064\u0061\u0061\u0064')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0069\u0072\u0065')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0065')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0069\u0065')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0073\u0063\u0068')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u0075\u007a\u0069\u0065\u006b')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0065\u0072\u0069\u0065')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u0065\u006b')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065\u0066\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065\u0066\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u004f\u006f\u0072\u006c\u006f\u0067')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'pl':
i = i.replace('Action', u'\u0041\u006b\u0063\u006a\u0061')
i = i.replace('Adventure', u'\u0050\u0072\u007a\u0079\u0067\u006f\u0064\u006f\u0077\u0079')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0063\u006a\u0061')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069\u0061')
i = i.replace('Crime', u'\u004b\u0072\u0079\u006d\u0069\u006e\u0061\u0142')
i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u0061\u006c\u006e\u0079')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061\u0074')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u006a\u006e\u0079')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0079\u0063\u007a\u006e\u0079')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u0075\u007a\u0079\u0063\u007a\u006e\u0079')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u0054\u0061\u006a\u0065\u006d\u006e\u0069\u0063\u0061')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0073')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u002d\u0046\u0069')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u002d\u0046\u0069')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0057\u006f\u006a\u0065\u006e\u006e\u0079')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'pt':
i = i.replace('Action', u'\u0041\u00e7\u00e3\u006f')
i = i.replace('Adventure', u'\u0041\u0076\u0065\u006e\u0074\u0075\u0072\u0061')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u00e7\u00e3\u006f')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u0043\u006f\u006d\u00e9\u0064\u0069\u0061')
i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0065')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u00e1\u0072\u0069\u006f')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u00ed\u006c\u0069\u0061')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0069\u0061')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u00f3\u0072\u0069\u0061')
i = i.replace('Horror', u'\u0054\u0065\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u00fa\u0073\u0069\u0063\u0061')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u00e9\u0072\u0069\u006f')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0063\u0065')
i = i.replace('Science Fiction', u'\u0046\u0069\u0063\u00e7\u00e3\u006f \u0063\u0069\u0065\u006e\u0074\u00ed\u0066\u0069\u0063\u0061')
i = i.replace('Sci-Fi', u'\u0046\u0069\u0063\u00e7\u00e3\u006f \u0063\u0069\u0065\u006e\u0074\u00ed\u0066\u0069\u0063\u0061')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0047\u0075\u0065\u0072\u0072\u0061')
i = i.replace('Western', u'\u0046\u0061\u0072\u006f\u0065\u0073\u0074\u0065')
elif lang == 'ro':
i = i.replace('Action', u'\u0041\u0063\u021b\u0069\u0075\u006e\u0065')
i = i.replace('Adventure', u'\u0041\u0076\u0065\u006e\u0074\u0075\u0072\u0069')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0163\u0069\u0065')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u0043\u006f\u006d\u0065\u0064\u0069\u0065')
i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0103')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0072')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0103')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0065')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0049\u0073\u0074\u006f\u0072\u0069\u0063')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u0075\u007a\u0069\u0063\u0103')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u0065\u0072')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u0063')
i = i.replace('Science Fiction', u'\u0053\u0046')
i = i.replace('Sci-Fi', u'\u0053\u0046')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0052\u0103\u007a\u0062\u006f\u0069')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'ru':
i = i.replace('Action', u'\u0431\u043e\u0435\u0432\u0438\u043a')
i = i.replace('Adventure', u'\u043f\u0440\u0438\u043a\u043b\u044e\u0447\u0435\u043d\u0438\u044f')
i = i.replace('Animation', u'\u043c\u0443\u043b\u044c\u0442\u0444\u0438\u043b\u044c\u043c')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u043a\u043e\u043c\u0435\u0434\u0438\u044f')
i = i.replace('Crime', u'\u043a\u0440\u0438\u043c\u0438\u043d\u0430\u043b')
i = i.replace('Documentary', u'\u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430\u043b\u044c\u043d\u044b\u0439')
i = i.replace('Drama', u'\u0434\u0440\u0430\u043c\u0430')
i = i.replace('Family', u'\u0441\u0435\u043c\u0435\u0439\u043d\u044b\u0439')
i = i.replace('Fantasy', u'\u0444\u044d\u043d\u0442\u0435\u0437\u0438')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0438\u0441\u0442\u043e\u0440\u0438\u044f')
i = i.replace('Horror', u'\u0443\u0436\u0430\u0441\u044b')
i = i.replace('Music ', u'\u043c\u0443\u0437\u044b\u043a\u0430')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u0434\u0435\u0442\u0435\u043a\u0442\u0438\u0432')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u043c\u0435\u043b\u043e\u0434\u0440\u0430\u043c\u0430')
i = i.replace('Science Fiction', u'\u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Sci-Fi', u'\u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0442\u0440\u0438\u043b\u043b\u0435\u0440')
i = i.replace('War', u'\u0432\u043e\u0435\u043d\u043d\u044b\u0439')
i = i.replace('Western', u'\u0432\u0435\u0441\u0442\u0435\u0440\u043d')
elif lang == 'sl':
i = i.replace('Action', u'\u0041\u006b\u0063\u0069\u006a\u0061')
i = i.replace('Adventure', u'\u0041\u0076\u0061\u006e\u0074\u0075\u0072\u0061')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0063\u0069\u006a\u0061')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u041a\u043e\u043c\u0435\u0064\u0069\u006a\u0061')
i = i.replace('Crime', u'\u041a\u0072\u0069\u006d\u0069\u006e\u0061\u006c\u006e\u0069')
i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u0061\u0072\u006e\u0069')
i = i.replace('Drama', u'\u0044\u0072\u0430\u043c\u0430')
i = i.replace('Family', u'\u0044\u0072\u0075\u017e\u0069\u006e\u0073\u006b\u0069')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u006b\u0061')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u005a\u0067\u006f\u0064\u006f\u0076\u0069\u006e\u0073\u006b\u0069')
i = i.replace('Horror', u'\u0047\u0072\u006f\u007a\u006c\u006a\u0069\u0076\u006b\u0061')
i = i.replace('Music ', u'\u0047\u006c\u0061\u007a\u0062\u0065\u006e\u0069')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u0065\u0072\u0069\u006a\u0061')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b\u0061')
i = i.replace('Science Fiction', u'\u005a\u006e\u0061\u006e\u0073\u0074\u0076\u0065\u006e\u0061 \u0066\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u006b\u0061')
i = i.replace('Sci-Fi', u'\u005a\u006e\u0061\u006e\u0073\u0074\u0076\u0065\u006e\u0061 \u0066\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u006b\u0061')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0422\u0072\u0069\u006c\u0065\u0072')
i = i.replace('War', u'\u0056\u006f\u006a\u006e\u006f\u002d\u0070\u006f\u006c\u0069\u0074\u0069\u010d\u006e\u0069')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'sr':
i = i.replace('Action', u'\u0410\u043a\u0446\u0438\u043e\u043d\u0438')
i = i.replace('Adventure', u'\u0410\u0432\u0430\u043d\u0442\u0443\u0440\u0438\u0441\u0442\u0438\u0447\u043a\u0438')
i = i.replace('Animation', u'\u0426\u0440\u0442\u0430\u043d\u0438')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u041a\u043e\u043c\u0435\u0434\u0438\u0458\u0430')
i = i.replace('Crime', u'\u041a\u0440\u0438\u043c\u0438')
i = i.replace('Documentary', u'\u0414\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430\u0440\u043d\u0438')
i = i.replace('Drama', u'\u0414\u0440\u0430\u043c\u0430')
i = i.replace('Family', u'\u041f\u043e\u0440\u043e\u0434\u0438\u0447\u043d\u0438')
i = i.replace('Fantasy', u'\u0424\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0418\u0441\u0442\u043e\u0440\u0438\u0458\u0441\u043a\u0438')
i = i.replace('Horror', u'\u0425\u043e\u0440\u043e\u0440')
i = i.replace('Music ', u'\u041c\u0443\u0437\u0438\u0447\u043a\u0438')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u041c\u0438\u0441\u0442\u0435\u0440\u0438\u0458\u0430')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0409\u0443\u0431\u0430\u0432\u043d\u0438')
i = i.replace('Science Fiction', u'\u041d\u0430\u0443\u0447\u043d\u0430 \u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Sci-Fi', u'\u041d\u0430\u0443\u0447\u043d\u0430 \u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0422\u0440\u0438\u043b\u0435\u0440')
i = i.replace('War', u'\u0420\u0430\u0442\u043d\u0438')
i = i.replace('Western', u'\u0412\u0435\u0441\u0442\u0435\u0440\u043d')
elif lang == 'sv':
i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Adventure', u'\u00c4\u0076\u0065\u006e\u0074\u0079\u0072')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0065\u0072\u0061\u0074')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069')
i = i.replace('Crime', u'\u004b\u0072\u0069\u006d\u0069\u006e\u0061\u006c')
i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u00e4\u0072')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u006a')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0073\u006b')
i = i.replace('Horror', u'\u0053\u006b\u0072\u00e4\u0063\u006b')
i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u0063')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0069\u006b')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065 \u0046\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065 \u0046\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u004b\u0072\u0069\u0067')
i = i.replace('Western', u'\u0056\u00e4\u0073\u0074\u0065\u0072\u006e')
elif lang == 'tr':
i = i.replace('Action', u'\u0041\u006b\u0073\u0069\u0079\u006f\u006e')
i = i.replace('Adventure', u'\u004d\u0061\u0063\u0065\u0072\u0061')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0073\u0079\u006f\u006e')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069')
i = i.replace('Crime', u'\u0053\u0075\u00e7')
i = i.replace('Documentary', u'\u0042\u0065\u006c\u0067\u0065\u0073\u0065\u006c')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d')
i = i.replace('Family', u'\u0041\u0069\u006c\u0065')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u006b')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0054\u0061\u0072\u0069\u0068')
i = i.replace('Horror', u'\u004b\u006f\u0072\u006b\u0075')
i = i.replace('Music ', u'\u004d\u00fc\u007a\u0069\u006b')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u0047\u0069\u007a\u0065\u006d')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b')
i = i.replace('Science Fiction', u'\u0042\u0069\u006c\u0069\u006d\u002d\u004b\u0075\u0072\u0067\u0075')
i = i.replace('Sci-Fi', u'\u0042\u0069\u006c\u0069\u006d\u002d\u004b\u0075\u0072\u0067\u0075')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0047\u0065\u0072\u0069\u006c\u0069\u006d')
i = i.replace('War', u'\u0053\u0061\u0076\u0061\u015f')
i = i.replace('Western', u'\u0056\u0061\u0068\u015f\u0069 \u0042\u0061\u0074\u0131')
elif lang == 'zh':
i = i.replace('Action', u'\u52a8\u4f5c')
i = i.replace('Adventure', u'\u5192\u9669')
i = i.replace('Animation', u'\u52a8\u753b')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u559c\u5267')
i = i.replace('Crime', u'\u72af\u7f6a')
i = i.replace('Documentary', u'\u7eaa\u5f55')
i = i.replace('Drama', u'\u5267\u60c5')
i = i.replace('Family', u'\u5bb6\u5ead')
i = i.replace('Fantasy', u'\u5947\u5e7b')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u5386\u53f2')
i = i.replace('Horror', u'\u6050\u6016')
i = i.replace('Music ', u'\u97f3\u4e50')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u60ac\u7591')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u7231\u60c5')
i = i.replace('Science Fiction', u'\u79d1\u5e7b')
i = i.replace('Sci-Fi', u'\u79d1\u5e7b')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u60ca\u609a')
i = i.replace('War', u'\u6218\u4e89')
i = i.replace('Western', u'\u897f\u90e8')
return i
| [
"mrknow@interia.pl"
] | mrknow@interia.pl |
aff2907b48026f62d9a296f9ebf1f41e450063f9 | 66a08c7da73bacd98452d69a23486e70f9338b4a | /build/navigation/costmap_2d/catkin_generated/pkg.installspace.context.pc.py | a66adf2fbe72440ac5563d7b02a341a9ed4f7c36 | [] | no_license | StephenWhit/MazeMappingROS | bd21aa3b10b694c511c4613a79bebd91a5ce11b0 | 59bdf30de0df375362879e01f7ac3e08b1019b9d | refs/heads/master | 2020-04-09T10:41:15.413221 | 2018-12-07T20:52:05 | 2018-12-07T20:52:05 | 160,279,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/stephen/finalProject/install/include;/usr/include/eigen3;/usr/include".split(';') if "/home/stephen/finalProject/install/include;/usr/include/eigen3;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "dynamic_reconfigure;geometry_msgs;laser_geometry;map_msgs;message_filters;message_runtime;nav_msgs;pluginlib;roscpp;sensor_msgs;std_msgs;tf2_ros;visualization_msgs;voxel_grid".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lcostmap_2d;-llayers;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libboost_thread.so;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_date_time.so;/usr/lib/x86_64-linux-gnu/libboost_atomic.so;/usr/lib/x86_64-linux-gnu/libpthread.so".split(';') if "-lcostmap_2d;-llayers;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libboost_thread.so;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_date_time.so;/usr/lib/x86_64-linux-gnu/libboost_atomic.so;/usr/lib/x86_64-linux-gnu/libpthread.so" != "" else []
PROJECT_NAME = "costmap_2d"
PROJECT_SPACE_DIR = "/home/stephen/finalProject/install"
PROJECT_VERSION = "1.16.2"
| [
"stephenwhit897@gmail.com"
] | stephenwhit897@gmail.com |
f6e938446c3e9eb27bff2f6b8a85c21a34381a94 | 345acf7452ae27faa8cffba6f8bd5a976c43e60f | /data/MIND.py | 687d70cc5ac47b554d3a84a4d2ea0ae7db8c3909 | [
"MIT"
] | permissive | Hi-Simon/NRMS_Pytorch_MIND | 42e2252ef921355a502c48aedf1d0e4d3ccbc10a | f3f9ca3b2069d27aa8975c833f36fababaf8f00c | refs/heads/main | 2023-07-07T20:24:17.383881 | 2021-08-19T07:19:14 | 2021-08-19T07:19:14 | 397,846,560 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,419 | py | from torch.utils.data import Dataset
import pickle
import numpy as np
import csv
import random
import re
from tqdm.auto import tqdm
class MINDataset(Dataset):
def __init__(self,
hparams,
news_file,
behaviors_file,
wordDict_file,
userDict_file,
col_spliter='\t',
ID_spliter='%',
npratio=-1,
modify_news_id=None,
target_news_id=None,
modify_index=None,
target_index=None,
target_news_file=None,
):
self.modify_news_id=modify_news_id
self.target_news_id=target_news_id
self.modify_index=modify_index
self.target_index=target_index
self.target_news_file=target_news_file
self.news_file = news_file
self.behaviors_file = behaviors_file
self.col_spliter = col_spliter
self.ID_spliter = ID_spliter
self.batch_size = hparams['batch_size']
self.title_size = hparams['data']['title_size']
self.his_size = hparams['data']['his_size']
self.npratio = npratio
self.word_dict = self.load_dict(wordDict_file)
self.uid2index = self.load_dict(userDict_file)
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item]
def load_dict(self, file_path):
"""load pickle file
Args:
file path (str): file path
Returns:
object: pickle loaded object
"""
with open(file_path, "rb") as f:
return pickle.load(f)
def word_tokenize(self, sent):
""" Split sentence into word list using regex.
Args:
sent (str): Input sentence
Return:
list: word list
"""
pat = re.compile(r"[\w]+|[.,!?;|]")
if isinstance(sent, str):
return pat.findall(sent.lower())
else:
return []
def init_news(self, news_file):
"""init news information given news file, such as news_title_index and nid2index.
Args:
news_file: path of news file
"""
target_title_token = []
if self.modify_news_id != None:
with open(self.target_news_file, "r", encoding='utf-8') as f:
rd = f.readlines()
for line in rd:
nid, vert, subvert, title, ab, url, _, _ = line.strip("\n").split(self.col_spliter)
if nid == self.target_news_id:
title=self.word_tokenize(title)
target_title_token.extend(title)
break
self.nid2index = {}
news_title = [""]
with open(news_file, "r", encoding='utf-8') as f:
rd = f.readlines()
for line in rd:
nid, vert, subvert, title, ab, url, _, _ = line.strip("\n").split(self.col_spliter)
if nid in self.nid2index:
continue
self.nid2index[nid] = len(self.nid2index) + 1
title = self.word_tokenize(title)
if self.modify_news_id != None and nid == self.modify_news_id:
title[self.modify_index] = target_title_token[self.target_index]
news_title.append(title)
self.news_title_index = np.zeros(
(len(news_title), self.title_size), dtype="int32"
)
for news_index in tqdm(range(len(news_title)), desc='init news'):
title = news_title[news_index]
for word_index in range(min(self.title_size, len(title))):
if title[word_index] in self.word_dict:
self.news_title_index[news_index, word_index] = self.word_dict[
title[word_index].lower()
]
def init_behaviors(self, behaviors_file):
"""init behavior logs given behaviors file.
Args:
behaviors_file: path of behaviors file
"""
self.histories = []
self.imprs = []
self.labels = []
self.impr_indexes = []
self.uindexes = []
with open(behaviors_file, "r", encoding='utf-8') as f:
rd = f.readlines()
impr_index = 0
for line in tqdm(rd, desc='init behaviors'):
uid, time, history, impr = line.strip("\n").split(self.col_spliter)[-4:]
history = [self.nid2index[i] for i in history.split()]
history = [0] * (self.his_size - len(history)) + history[: self.his_size]
impr_news = [self.nid2index[i.split("-")[0]] for i in impr.split()]
label = [int(i.split("-")[1]) for i in impr.split()]
uindex = self.uid2index[uid] if uid in self.uid2index else 0
self.histories.append(history)
self.imprs.append(impr_news)
self.labels.append(label)
self.impr_indexes.append(impr_index)
self.uindexes.append(uindex)
impr_index += 1
def _convert_data(
self,
label_list,
imp_indexes,
user_indexes,
candidate_title_indexes,
click_title_indexes,
):
"""Convert data into numpy arrays that are good for further model operation.
Args:
label_list (list): a list of ground-truth labels.
imp_indexes (list): a list of impression indexes.
user_indexes (list): a list of user indexes.
candidate_title_indexes (list): the candidate news titles' words indices.
click_title_indexes (list): words indices for user's clicked news titles.
Returns:
dict: A dictionary, containing multiple numpy arrays that are convenient for further operation.
"""
labels = np.asarray(label_list, dtype=np.int64)
imp_indexes = np.asarray(imp_indexes, dtype=np.int32)
user_indexes = np.asarray(user_indexes, dtype=np.int32)
candidate_title_index_batch = np.asarray(
candidate_title_indexes, dtype=np.int64
)
click_title_index_batch = np.asarray(click_title_indexes, dtype=np.int64)
return (
labels,
imp_indexes,
user_indexes,
candidate_title_index_batch,
click_title_index_batch,
)
def newsample(self, news, ratio):
""" Sample ratio samples from news list.
If length of news is less than ratio, pad zeros.
Args:
news (list): input news list
ratio (int): sample number
Returns:
list: output of sample list.
"""
if ratio > len(news):
return news + [0] * (ratio - len(news))
else:
return random.sample(news, ratio)
def init_data(self, line):
"""Parse one behavior sample into feature values.
if npratio is larger than 0, return negtive sampled result.
Args:
line (int): sample index.
Yields:
list: Parsed results including label, impression id , user id,
candidate_title_index, clicked_title_index.
"""
if self.npratio > 0:
impr_label = self.labels[line]
impr = self.imprs[line]
poss = []
negs = []
for news, click in zip(impr, impr_label):
if click == 1:
poss.append(news)
else:
negs.append(news)
for p in poss:
candidate_title_index = []
impr_index = []
user_index = []
# label = [1] + [0] * self.npratio
label = 0
n = self.newsample(negs, self.npratio)
candidate_title_index = self.news_title_index[[p] + n]
click_title_index = self.news_title_index[self.histories[line]]
impr_index.append(self.impr_indexes[line])
user_index.append(self.uindexes[line])
self.data.append(
self._convert_data(
label,
impr_index,
user_index,
candidate_title_index,
click_title_index,
)
)
else:
impr_label = self.labels[line]
impr = self.imprs[line]
if self.target_news_id != None and self.nid2index[self.target_news_id] not in impr:
return
for news, label in zip(impr, impr_label):
candidate_title_index = []
impr_index = []
user_index = []
label = [label]
candidate_title_index.append(self.news_title_index[news])
click_title_index = self.news_title_index[self.histories[line]]
impr_index.append(self.impr_indexes[line])
user_index.append(self.uindexes[line])
self.data.append(
self._convert_data(
label,
impr_index,
user_index,
candidate_title_index,
click_title_index,
)
)
def load_data_from_file(self):
"""Read and parse data from news file and behavior file.
Args:
news_file (str): A file contains several informations of news.
beahaviros_file (str): A file contains information of user impressions.
Yields:
object: An iterator that yields parsed results, in the format of dict.
"""
# 第i条新闻的第j个单词在词表中是第几个词
if not hasattr(self, "news_title_index"):
self.init_news(self.news_file)
if not hasattr(self, "impr_indexes"):
self.init_behaviors(self.behaviors_file)
if not hasattr(self, "data"):
self.data = []
indexes = np.arange(len(self.labels))
if self.npratio > 0:
np.random.shuffle(indexes)
for index in tqdm(indexes, desc='init data'):
self.init_data(index)
| [
"34621564+Simon-Lon@users.noreply.github.com"
] | 34621564+Simon-Lon@users.noreply.github.com |
758bccdb4bbf4799379014e0d650369b5e6516f2 | edd9b317496e2561ac99e36ae833a270cba3ebd6 | /ECS 32A/ECS32A/Ass3/part1.py | 698b2aff0024fcc4d751e2bfae6be1dff1f0a374 | [] | no_license | zoubohao/UC_Davis_STA_CS_Coureses | 832fc3eb68a15f4cfa8ac0af55e026411c70532c | f09579f0e46ea976807548f8b7bdb18fa7fc0bdc | refs/heads/master | 2023-04-07T12:57:26.242818 | 2021-04-02T00:00:00 | 2021-04-02T00:00:00 | 256,613,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py |
n = int(input("Enter N: "))
i = 1
sumN = 0
while i <= n:
sumN = sumN + i ** 2
i += 1
print("The sum is: {}".format(sumN))
| [
"bhzou@ucdaivs.edu"
] | bhzou@ucdaivs.edu |
80c5356a05a9b735e32ddb0a37181e086ea187ef | 7249e0f089ef1defc229063c303fa056953d9409 | /Assignment03/ex18.py | 99139894d35c9f59b9e42c7a15c0df82fa030914 | [] | no_license | JohnHadish/hort503 | 94ad78b4e67a5c1c6a280f62046c8618563b2948 | 7463e53a349c322ce12e76a9ce20c16b6873bfab | refs/heads/master | 2018-09-22T17:23:29.857830 | 2018-06-06T18:47:47 | 2018-06-06T18:47:47 | 117,146,134 | 0 | 0 | null | 2018-01-15T07:03:09 | 2018-01-11T19:51:57 | Python | UTF-8 | Python | false | false | 481 | py | #This one is like your script with argv
def print_two(*args):
arg1, arg2 = args
print(f"arg1: {arg1}, arg2: {arg2}")
#Takes two arguements and prints them out
def print_two_again(a1, arg2):
print(f"arg1: {a1}, arg2: {arg2}")
#Takes 1 arguement and prints it out
def print_one(arg1):
print(f"arg1: {arg1}")
#Takes no arguements!
def print_none():
print("I got nothin'.")
print_two("Zed","Shaw")
print_two_again("Zed","Shaw")
print_one("First!")
print_none()
| [
"john.hadish@wsu.edu"
] | john.hadish@wsu.edu |
b6ea6fe721b4f557ccddee3acac575884f7e3ba1 | 16ebd865ce7930b092536f21e083e85adf767598 | /src/FactorCategory.py | 5f805e0f9a9f916ec2c57b31132c1b222d4a0be0 | [] | no_license | dunn0052/GTD_V3 | 9b8a43d1b0dc69a34b8432380796a85fe0db3bc4 | 5fae637b0a84af8726594cb71f95e92a2ca5da99 | refs/heads/master | 2022-12-06T23:56:02.149218 | 2020-06-26T12:42:23 | 2020-06-26T12:42:23 | 262,635,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,984 | py | from queue import Queue
class Node:
def __init__(self, value: bool):
self.v = value
self.l = None
self.r = None
class BinaryTree:
#nth most common primes to condense groupings
PRIMES = [2,3,5,7,11,13,17,19,23,29]
# Modular inverse > ord("z") * max_len
# so that modular inverses will be
# greater than max key int "zzzzzzzzz" = 1342
INVERSE_MOD_PRIME = 1361
def __init__(self, max_len = 11):
self.root = Node(None)
self.height = len(self.PRIMES)
self.max_len = max_len
def addKey(self, key):
if isinstance(key, str):
if 0 < len(key) < self.max_len:
key = self.HashFactorKey(self.ModularInverse(self.stringToInt(key)))
else:
print("Key too long. Max length is: " + str(self.max_len))
return
else:
print("Key must be a string")
return
n = self.root
for value in key:
value = bool(int(value))
if value:
if not n.r:
n.r = Node(value)
n = n.r
else:
if not n.l:
n.l = Node(value)
n = n.l
@staticmethod
def printInorder(node):
if node:
# then print the data of node
print(node.v)
# First recur on left child
BinaryTree.printInorder(node.l)
# now recur on right child
BinaryTree.printInorder(node.r)
def size(self):
return self.sumNode(self.root) - 1
def sumNode(self, node):
if node:
return self.sumNode(node.l) + self.sumNode(node.r) + 1
else:
return 0
def printTree(self):
self.printInorder(self.root)
@staticmethod
def stringToInt(key):
return sum(ord(letter) for letter in key)
# check the prime factor bases as unique key
def HashFactorKey(self, key):
return [key%p == 0 for p in self.PRIMES]
# used to mix up numbers to allow for similar keys to be mapped
# to wildly diverse integers within the max key size
def ModularInverse(self, num):
return pow(num, self.INVERSE_MOD_PRIME -2, self.INVERSE_MOD_PRIME)
def getUnique(self):
return self.IntToString(self.BinaryToInt(self.findUnique()))
def BFS(self):
# the list of all visited nodes
path = list()
to_visit = Queue()
to_visit.put(self.root)
while not to_visit.empty():
node = to_visit.pop()
path.append(node)
if node:
to_visit.put(node.l)
to_visit.put(node.r)
else:
path.pop(-1)
break
return path
r = BinaryTree()
for i in range(500):
r.addKey("Station" + str(i))
print(r.size())
print(list(l.v for l in r.BFS()))
| [
"dunn0407@umn.edu"
] | dunn0407@umn.edu |
6795849f26ad1104be9a8a8a594ca7719b1ba0f5 | c1b3a407abbd27cb54670a4d91b5e31010fe0d62 | /setup.py | 15d97ab0b3cad5b4c2d2ceb028291cff78c71ca4 | [] | no_license | goabout/goabout-survey | 3c4bc1ffd5d217b6a1851eb062b3d589c3fe3815 | b4737bfa17e559fcaaf36b21e19b6e9865fe2e34 | HEAD | 2016-09-05T17:26:59.579535 | 2015-03-06T13:30:02 | 2015-03-06T13:30:02 | 31,770,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | from setuptools import setup
import survey
setup(
name = survey.__name__,
version = survey.__version__,
author = survey.__author__,
author_email = survey.__email__,
license = survey.__license__,
description = survey.__doc__.splitlines()[0],
long_description = open('README.rst').read(),
url = 'http://github.com/goabout/survey',
packages = ['survey'],
include_package_data = True,
zip_safe = False,
platforms = ['all'],
test_suite = 'tests',
entry_points = {
'console_scripts': [
'survey-send = survey.worker:main',
],
},
install_requires = [
'mandrill',
'dougrain',
'requests',
'gdata',
'requests-oauthlib',
],
)
| [
"matthias.hogerheijde@goabout.com"
] | matthias.hogerheijde@goabout.com |
3b5c6b2bef3a8ad8b85b1a0e4ff87204562525a3 | 7871ac34b07ffdc1ade7d552f032c11cf0fb38ec | /new.py | c2b5b5688ef97e4bc0f309b76ea7e880299dcca0 | [] | no_license | qxqz/ML_practice | ca81e5635c4f1fefdce974cf4d86ee11fccdb333 | 842a4f26516d440043b60e29067880bc585e4a84 | refs/heads/master | 2023-06-20T07:27:23.958658 | 2021-07-15T09:57:46 | 2021-07-15T09:57:46 | 386,237,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | print('blabla')
| [
"qxqz3333@gmail.com"
] | qxqz3333@gmail.com |
42cecea00b7391ed5181646dc7c931c9a4f52077 | b94f8b45877aa1a5f49ee2b126aeb1e97aa563a0 | /modules/bot_utils.py | 562818bd0700bb6eaa359f1064c736901121df20 | [
"MIT"
] | permissive | FFrost/CBot | 32670eab23e841dff3e3413d54ec59be800d7d81 | f7361ae6d417f42498589fa2903e2e998b8c25fc | refs/heads/master | 2023-08-04T17:11:24.971923 | 2022-07-06T20:29:50 | 2022-07-06T20:29:50 | 111,182,933 | 4 | 0 | MIT | 2023-07-20T15:09:35 | 2017-11-18T06:35:26 | Python | UTF-8 | Python | false | false | 7,320 | py | import discord
from discord.ext import commands
from modules import utils
from typing import Optional, List
# this file is for utility functions that require access to discord
class BotUtils:
def __init__(self, bot):
self.bot = bot
# writes to a file
# input: filename, filename to write to
# mode, what mode to open the file in, r, w, or a
# string, what should be written
# add_time, if should the current time be prepended
@staticmethod
def write_to_file(filename: str, mode: str, string: str, add_time: bool = False) -> None:
if (add_time):
string = f"[{utils.get_cur_time()}] {string}"
with open(filename, mode) as f:
f.write(f"{string}\n")
# logs an error to file
def log_error_to_file(self, error: str, prefix: str = "") -> None:
if (prefix):
error = f"[{prefix}] {error}"
self.write_to_file(self.bot.ERROR_FILEPATH, "a", error, add_time=True)
# prints a message
async def output_log(self, message: discord.Message) -> None:
try:
print(utils.format_log_message(message))
except Exception as e:
await self.bot.messaging.error_alert(e, extra="on_command")
# finds a user by full or partial name or id
# input: name, keyword to search usernames for
# output: found user or None if no users were found
async def find(self, name: str) -> Optional[discord.User]:
if (not name):
return None
return discord.utils.find(lambda m: (m.name.lower().startswith(name.lower()) or m.id == name), self.bot.get_all_members())
# gets a channel by name
# input: name, keyword to search channel names for
# guild, guild to search for the channel
# output: channel object matching search or None if no channels were found
def find_channel(self, name: str, guild: discord.Guild) -> Optional[discord.abc.GuildChannel]:
if (not guild):
return None
guild = str(guild)
return discord.utils.get(self.bot.get_all_channels(), guild__name=guild, name=name)
# find last embed in channel
# input: channel, channel to search for embeds
# embed_type, type of embed to search for, video or image
# output: url of the embed or None if not found
async def find_last_embed(self, channel: discord.abc.GuildChannel) -> Optional[str]:
async for message in channel.history():
embed = utils.find_image_embed(message)
if (embed):
return embed
return None
# finds last image in channel
# input: message, message from which channel will be extracted and point to search before
# output: url of image found or None if no images were found
async def find_last_image(self, message: discord.Message) -> Optional[str]:
async for message in message.channel.history(before=message):
attachments = utils.find_attachment(message)
if (attachments):
return attachments
embed = utils.find_image_embed(message)
if (embed):
return embed
return None
# finds last text message in channel
# input: message, message from which channel will be used as point to search before
# output: text of message or None if no text messages were found
async def find_last_text(self, message: discord.Message) -> Optional[str]:
async for message in message.channel.history(before=message):
if (message.content):
return message.content
# finds last youtube video embed in channel
# input: message, message from which channel will be used as point to search before
# output: url of youtube embed or None if no youtube video embeds were found
async def find_last_youtube_embed(self, message: discord.Message) -> Optional[str]:
async for message in message.channel.history(before=message, limit=50):
if (message.embeds):
for embed in message.embeds:
if (embed.type == "video" or embed.video):
if (embed.provider.name == "YouTube"):
return embed.url
elif (utils.youtube_url_validation(embed.url)):
return embed.url
# finds the last message sent before the command message
# input: message, the message to search before
# output: the message if found or None
async def find_last_message(self, message: discord.Message) -> Optional[discord.Message]:
async for message in message.channel.history(before=message, limit=1):
return message
return None
# deletes a message if the bot has permission to do so
# input: message, message to delete
# output: success of the operation
async def delete_message(self, message: discord.Message) -> bool:
channel = message.channel
if (isinstance(channel, discord.abc.PrivateChannel) and message.author != self.bot.user):
return False
elif (channel.permissions_for(message.guild.me).manage_messages):
try:
await message.delete()
return True
except Exception:
return False
return False
# deletes a number messages from a channel by user
# input: ctx, context to reference
# num_to_delete, number of messages to delete
# users, list of users to delete messages from or None to delete regardless of author
# output: number of messages successfully deleted
async def purge(self, ctx: commands.Context, num_to_delete: int, users: List[discord.User]) -> int:
num_to_delete = abs(num_to_delete)
num_deleted = 0
async for message in ctx.channel.history(before=ctx.message, limit=500):
if (num_deleted >= num_to_delete):
break
if (not users or message.author in users):
success = await self.delete_message(message)
if (success):
num_deleted += 1
return num_deleted
# find a guild from the ones we are currently in
# input: search, string to search for, either part/all of guild name or index in list of guilds
# output: the guild if found or None
def find_guild(self, search: str) -> Optional[discord.Guild]:
guilds = list(self.bot.guilds)
guild = None
try:
index = int(search)
except ValueError: # if it's not an index, search by name
guild = discord.utils.find(lambda s: search.lower() in s.name.lower(), guilds)
else:
index -= 1
if (index >= 0 and index < len(guilds)):
guild = guilds[index]
else: # search in the guild name
guild = discord.utils.find(lambda s: search.lower() in s.name.lower(), guilds)
return guild
| [
"FFrost@users.noreply.github.com"
] | FFrost@users.noreply.github.com |
a1207f8f3579ed5adb2f6c3c468f28880b3c9aee | 2b0d01dc35ca63752c9b143be26103309548ba40 | /processa_img.py | b457345f02ff5823f5beb7f166eb2b4551d06c2f | [
"MIT"
] | permissive | iamgomes/geoconding | d07dea3859ff82f9db2cbb0fca1e3c0446343d29 | 2f11931907e191e1bc8ddceef4ba9c79c3411d34 | refs/heads/master | 2021-10-23T23:42:21.177027 | 2020-03-18T01:10:14 | 2020-03-18T01:10:14 | 248,107,511 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | import cv2
import matplotlib.pyplot as plt
#%matplotlib inline
import numpy as np
face_image = cv2.imread('img_streetview/teste.jpg')
gray_img = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
plt.figure(figsize=(10,10))
plt.imshow(gray_img, cmap='gray') | [
"williamgomes@MacBook-Pro-de-William.local"
] | williamgomes@MacBook-Pro-de-William.local |
dd5a9976dc8993b4dfa82ebd52d122894f902c7c | 9b588d1dd05cb774d57f436c0f4daafae635b244 | /Timetable_task/Timetable_task/settings.py | e9ed31491edc1f8e34974e5d3bd6ff6db20d2737 | [] | no_license | MounikaDeveloper/Timetable | a0f91f8831d397a65281319b3a0d2bae0b80e79e | 734c6d219b6c0c13cfc9f516191ac2f54bed4598 | refs/heads/master | 2022-11-25T23:46:57.267386 | 2020-08-04T12:34:55 | 2020-08-04T12:34:55 | 282,472,326 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,316 | py | """
Django settings for Timetable_task project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!kr*4fkzl*1zhov&jp!e4wl*m2b$j94#_sn058jb&ao73l2u^q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app.apps.AppConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Timetable_task.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Timetable_task.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[
os.path.join(BASE_DIR, 'static/')
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/') | [
"57697586+MounikaDeveloper@users.noreply.github.com"
] | 57697586+MounikaDeveloper@users.noreply.github.com |
90a99102e6ad23ec94bd9649921692bcd0e04eb7 | 76e8dddb8d12906514f0004f32af8ef305ad43e9 | /Dragon/python/dragon/vm/tensorflow/ops/nn_ops.py | ce0d7c84235221fc898b94a97501b81947b4cb8a | [
"BSD-2-Clause"
] | permissive | XJTUeducation/Dragon | bf98bd3a10449fa8948e0409a0243d666324b749 | 843204956ff7775c49d0d6193e1cd77ab512fbdd | refs/heads/master | 2020-04-10T13:02:17.446430 | 2018-12-04T07:56:55 | 2018-12-04T07:56:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,799 | py | # ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
from dragon.core.tensor import Tensor
import dragon.ops as ops
__all__ = [
'convolution',
'relu',
'softmax',
'conv2d',
'conv2d_transpose',
'avg_pool',
'max_pool',
'xw_plus_b',
'bias_add',
'dropout',
'sigmoid_cross_entropy_with_logits',
'softmax_cross_entropy_with_logits',
'sparse_softmax_cross_entropy_with_logits',
'l2_loss'
]
def convolution(input, filter, padding, strides=None,
dilation_rate=None, name=None, data_format=None):
num_total_dims = filter.get_shape().ndims
if num_total_dims is None:
num_total_dims = input.get_shape().ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known.")
num_spatial_dims = num_total_dims - 2
# make default parameters
if data_format is None:
data_format = 'NHWC'
if strides is None:
strides = [1] * num_total_dims
else:
if len(strides) != num_total_dims:
_strides = [1] * num_total_dims
_n_provides = len(strides)
if data_format == 'NHWC':
_strides[1 : 1 + _n_provides] = strides
else:
_strides[2 : 2 + _n_provides] = strides
strides = _strides
if dilation_rate is not None:
if len(dilation_rate) != num_total_dims:
_dilation_rate = [1] * num_total_dims
_n_provides = len(dilation_rate)
if data_format == 'NHWC':
_dilation_rate[1 : 1 + _n_provides] = dilation_rate
else:
_dilation_rate[2 : 2 + _n_provides] = dilation_rate
dilation_rate = _dilation_rate
if num_spatial_dims == 2:
return conv2d(input, filter,
strides, padding, dilation_rate,
data_format, name)
else:
raise NotImplementedError('conv{}d is not implemented.'.format(num_spatial_dims))
def relu(features, name=None):
return ops.Relu(features, name=name)
def softmax(logits, dim=-1, name=None):
return ops.Softmax(logits, axis=dim)
def conv2d(input, filter, strides, padding, dilation_rate=None,
data_format='NHWC', name=None, **kwargs):
"""Compute 2D convolution according to the given 4D ``input`` and ``filter``.
For **NHWC** format, filter should be as ``[filter_height, filter_width, in_channels, out_channels]``.
For **NCHW** format, filter should be as ``[out_channels, in_channels, filter_height, filter_width]``.
Parameters
----------
input : Tensor
The input tensor.
filter : Tensor
The filter tensor.
strides : list of int
The strides with length 4.
padding : str
The padding algorithm. ``VALID`` or ``SAME``.
dilation_rate : list of int or None
The dilation rates with with length 4.
data_format : str
The data format. ``NHWC`` or ``NCHW``.
name : str
The optional name for this operator.
Returns
-------
Tensor
The output tensor.
"""
if filter.shape is None:
raise ValueError('filter must have a valid shape.')
else:
if len(filter.shape) != 4:
raise ValueError('filter must be a 4D Tensor.')
if len(strides) != 4:
raise ValueError('strides must be a list with length 4.')
if dilation_rate is not None:
if len(dilation_rate) != 4:
raise ValueError(' dilation_rate must be a list with length 4.')
if data_format == 'NHWC':
output = ops.Conv2d([input, filter],
num_output=filter.shape[3],
kernel_size=filter.shape[0:2],
stride=strides[1:3],
dilation=dilation_rate[1:3] if dilation_rate is not None else 1,
padding=padding,
data_format=data_format)
return output
elif data_format == 'NCHW':
output = ops.Conv2d([input, filter],
num_output=filter.shape[0],
kernel_size=filter.shape[2:4],
stride=strides[2:4],
dilation=dilation_rate[2:4] if dilation_rate is not None else 1,
padding=padding,
data_format=data_format)
return output
else:
raise ValueError('Unknown data format: {}'.format(data_format))
def conv2d_transpose(value, filter, output_shape, strides, padding='SAME',
data_format='NHWC', name=None):
"""Compute 2D deconvolution according to the given 4D ``input`` and ``filter``.
For **NHWC** format, filter should be as ``[filter_height, filter_width, out_channels, in_channels]``.
For **NCHW** format, filter should be as ``[in_channels, out_channels, filter_height, filter_width]``.
``output_shape`` will be ignored if padding algorithm is **VALID**.
Parameters
----------
input : Tensor
The input tensor.
filter : Tensor
The filter tensor.
output_shape : list of int
The deterministic output shape for **SAME** padding.
strides : list of int
The strides with length 4.
padding : str
The padding algorithm. ``VALID`` or ``SAME``.
data_format : str
The data format. ``NHWC`` or ``NCHW``.
name : str
The optional name for this operator.
Returns
-------
Tensor
The output tensor.
"""
if filter.shape is None:
raise ValueError('filter must have a valid shape.')
else:
if len(filter.shape) != 4:
raise ValueError('filter must be a 4D Tensor.')
if len(strides) != 4:
raise ValueError('strides must be a list with length 4.')
if not isinstance(output_shape, list):
raise TypeError('output_shape should be a list.')
if len(output_shape) != 4:
raise ValueError('output_shape should be a list with length 4.')
if data_format == 'NHWC':
output = ops.Conv2dTranspose([value, filter],
num_output=filter.shape[2],
kernel_size=filter.shape[0:2],
stride=strides[1:3],
padding=padding,
data_format=data_format,
output_shape=output_shape)
return output
elif data_format == 'NCHW':
output = ops.Conv2dTranspose([value, filter],
num_output=filter.shape[1],
kernel_size=filter.shape[2:4],
stride=strides[2:4],
padding=padding,
data_format=data_format,
output_shape=output_shape)
return output
else:
raise ValueError('Unknown data format: {}'.format(data_format))
def avg_pool(value, ksize, strides, padding, data_format='NHWC', name=None):
"""Perform avg pooling on spatial axes.
Parameters
----------
value : Tensor
The input tensor.
ksize : list of int
The kernel size with length >= 4.
strides : list of int
The strides with length >= 4.
padding : str
The padding algorithm. ``VALID`` or ``SAME``.
data_format : str
The data format. ``NHWC`` or ``NCHW``.
name : None or str
The optional name of op.
Returns
-------
Tensor
The output tensor.
"""
if len(ksize) < 4:
raise ValueError('ksize must be a list with length >=4.')
if len(strides) < 4:
raise ValueError('strides must be a list with length >=4.')
if len(ksize) != len(strides):
raise ValueError('ksize and strides should have the same length.')
if len(ksize) == 4:
if data_format == 'NHWC':
if ksize[0] != 1 or ksize[3] != 1 or strides[0] != 1 or strides[3] != 1:
raise ValueError('The pooling can only be performed on spatial axes.')
return ops.Pool2d(value, [ksize[1], ksize[2]], [strides[1], strides[2]],
padding=padding, data_format=data_format, mode='AVG')
if data_format == 'NCHW':
if ksize[0] != 1 or ksize[1] != 1 or strides[0] != 1 or strides[1] != 1:
raise ValueError('The pooling can only be performed on spatial axes.')
return ops.Pool2d(value, [ksize[2], ksize[3]], [strides[2], strides[3]],
padding=padding, data_format=data_format, mode='AVG')
else:
raise NotImplementedError('Pool{}d has not been implemented yet.'.format(len(ksize) - 2))
def max_pool(value, ksize, strides, padding, data_format='NHWC', name=None):
"""Perform max pooling on spatial axes.
Parameters
----------
value : Tensor
The input tensor.
ksize : list of int
The kernel size with length >= 4.
strides : list of int
The strides with length >= 4.
padding : str
The padding algorithm. ``VALID`` or ``SAME``.
data_format : str
The data format. ``NHWC`` or ``NCHW``.
name : None or str
The optional name of op.
Returns
-------
Tensor
The output tensor.
"""
if len(ksize) < 4:
raise ValueError('ksize must be a list with length >=4.')
if len(strides) < 4:
raise ValueError('strides must be a list with length >=4.')
if len(ksize) != len(strides):
raise ValueError('ksize and strides should have the same length.')
if len(ksize) == 4:
if data_format == 'NHWC':
if ksize[0] != 1 or ksize[3] != 1 or strides[0] != 1 or strides[3] != 1:
raise ValueError('The pooling can only be performed on spatial axes.')
return ops.Pool2d(value, [ksize[1], ksize[2]], [strides[1], strides[2]],
padding=padding, data_format=data_format, mode='MAX')
if data_format == 'NCHW':
if ksize[0] != 1 or ksize[1] != 1 or strides[0] != 1 or strides[1] != 1:
raise ValueError('The pooling can only be performed on spatial axes.')
return ops.Pool2d(value, [ksize[2], ksize[3]], [strides[2], strides[3]],
padding=padding, data_format=data_format, mode='MAX')
else:
raise NotImplementedError('Pool{}d has not been implemented yet.'.format(len(ksize) - 2))
def xw_plus_b(x, weights, biases, name=None):
if weights.shape is None:
raise ValueError('weights must have a valid shape.')
else:
if len(weights.shape) != 2:
raise ValueError('weights must be a 2D Tensor')
if biases.shape is None:
raise ValueError('biases must a have a valid shape.')
else:
if len(biases.shape) != 1:
raise ValueError('biases must be a 1D Tensor')
if weights.shape[1] != biases.shape[0]:
raise ValueError('the shape of weights and biaes are incompatible.')
return ops.InnerProduct([x, weights, biases], num_output=weights.shape[1], TransW=False)
def bias_add(value, bias, data_format='NHWC', name=None):
return ops.BiasAdd([value, bias], data_format=data_format)
def sigmoid_cross_entropy_with_logits(logits, targets, name=None):
return ops.SigmoidCrossEntropy([logits, targets], normalization='UNIT', name=None)
def softmax_cross_entropy_with_logits(_sentinel=None,
labels=None, logits=None,
dim=-1, name=None):
if _sentinel is not None:
raise ValueError('Only call `softmax_cross_entropy_with_logits` '
'with named arguments (labels=..., logits=..., ...)')
if dim == -1: dim = 1
return ops.SoftmaxCrossEntropy([logits, labels], axis=dim, normalization='UNIT', name=name)
def sparse_softmax_cross_entropy_with_logits(logits, labels, dim=-1, name=None):
if dim == -1: dim = 1
return ops.SparseSoftmaxCrossEntropy([logits, labels], axis=dim, normalization='UNIT', name=name)
def l2_loss(t, name=None):
return (ops.Reduce(ops.Square(t), operation='SUM') * 0.5)
def dropout(x, keep_prob, name=None):
return ops.Dropout(x, 1 - keep_prob)
| [
"ting.pan@seetatech.com"
] | ting.pan@seetatech.com |
e3715d7cbdd7977bd57b89bffe7e1c7374827eb2 | 40fc1d38f2d4b643bc99df347c4ff3a763ba65e3 | /arcade/space_shooter/setup.py | 899a87123f997e3c1122f83ff3f7a77fa541a2a7 | [
"LicenseRef-scancode-public-domain",
"MIT",
"CC-BY-4.0",
"CC-BY-3.0"
] | permissive | alecordev/pygaming | 0be4b7a1c9e7922c63ce4cc369cd893bfef7b03c | 35e479b703acf038f47c2151b3759ad852781e4c | refs/heads/master | 2023-05-14T05:03:28.484678 | 2021-06-03T10:11:08 | 2021-06-03T10:11:08 | 372,768,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | import sys
from cx_Freeze import setup, Executable
import os
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = {"packages": ["os", "pygame"]}
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
if sys.platform == "win32":
base = "Win32GUI"
pygame_py_file = os.path.join("spaceshooter", "spaceShooter.py")
## The image and sound files are added manually into the zip file
## A fix for this would be released
setup(
name="Space Shooter",
version="0.0.2",
description="classic retro game made using pygame",
options={"build_exe": build_exe_options},
executables=[Executable(pygame_py_file, base=base)],
)
| [
"alecor.dev@gmail.com"
] | alecor.dev@gmail.com |
79f3589507f3de821b18244ef90d9281c4d5bd57 | 1c726a56f75d6d57e9bf72e1ab1ae5f044e04a08 | /CLibWally/libwally-core/src/test/test_psbt.py | d81f6acca750d72aa479b00d1f6d0483c5bb7960 | [
"MIT"
] | permissive | Fonta1n3/libwally-swift | 7958d9a9b39461a763db37e5139ff0dfd0ab8257 | 72fe62b51cdc63f50b94aa044a23568d26863897 | refs/heads/master | 2020-12-09T11:34:38.754159 | 2020-08-12T02:07:40 | 2020-08-12T02:07:40 | 242,262,901 | 0 | 0 | MIT | 2020-02-22T01:52:19 | 2020-02-22T01:52:18 | null | UTF-8 | Python | false | false | 5,274 | py | import binascii
import base64
import json
import os
import unittest
from util import *
class PSBTTests(unittest.TestCase):
def test_serialization(self):
"""Testing serialization and deserialization"""
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/psbt.json')) as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
inval_signers = d['inval_signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
for invalid in invalids:
self.assertEqual(WALLY_EINVAL, wally_psbt_from_base64(invalid.encode('utf-8'), pointer(wally_psbt())))
for valid in valids:
psbt = pointer(wally_psbt())
self.assertEqual(WALLY_OK, wally_psbt_from_base64(valid['psbt'].encode('utf-8'), psbt))
ret, reser = wally_psbt_to_base64(psbt)
self.assertEqual(WALLY_OK, ret)
self.assertEqual(valid['psbt'], reser)
ret, length = wally_psbt_get_length(psbt)
self.assertEqual(WALLY_OK, ret)
self.assertEqual(length, valid['len'])
for creator in creators:
psbt = pointer(wally_psbt())
self.assertEqual(WALLY_OK, wally_psbt_init_alloc(2, 2, 0, psbt))
tx = pointer(wally_tx())
self.assertEqual(WALLY_OK, wally_tx_init_alloc(2, 0, 2, 2, tx))
for txin in creator['inputs']:
input = pointer(wally_tx_input())
txid = binascii.unhexlify(txin['txid'])[::-1]
self.assertEqual(WALLY_OK, wally_tx_input_init_alloc(txid, len(txid), txin['vout'], 0xffffffff, None, 0, None, input))
self.assertEqual(WALLY_OK, wally_tx_add_input(tx, input))
for txout in creator['outputs']:
addr = txout['addr']
amt = txout['amt']
spk, spk_len = make_cbuffer('00' * (32 + 2))
ret, written = wally_addr_segwit_to_bytes(addr.encode('utf-8'), 'bcrt'.encode('utf-8'), 0, spk, spk_len)
self.assertEqual(WALLY_OK, ret)
output = pointer(wally_tx_output())
self.assertEqual(WALLY_OK, wally_tx_output_init_alloc(amt, spk, written, output))
self.assertEqual(WALLY_OK, wally_tx_add_output(tx, output))
self.assertEqual(WALLY_OK, wally_psbt_set_global_tx(psbt, tx))
ret, ser = wally_psbt_to_base64(psbt)
self.assertEqual(WALLY_OK, ret)
self.assertEqual(creator['result'], ser)
for combiner in combiners:
to_combine = []
for comb in combiner['combine']:
psbt = pointer(wally_psbt())
self.assertEqual(WALLY_OK, wally_psbt_from_base64(comb.encode('utf-8'), psbt))
to_combine.append(psbt.contents)
combined = pointer(wally_psbt())
self.assertEqual(WALLY_OK, wally_combine_psbts((wally_psbt * len(to_combine))(*to_combine), len(to_combine), combined))
ret, comb_ser = wally_psbt_to_base64(combined)
self.assertEqual(combiner['result'], comb_ser)
for signer in signers:
psbt = pointer(wally_psbt())
self.assertEqual(WALLY_OK, wally_psbt_from_base64(signer['psbt'].encode('utf-8'), psbt))
for priv in signer['privkeys']:
buf, buf_len = make_cbuffer('00'*32)
self.assertEqual(WALLY_OK, wally_wif_to_bytes(priv.encode('utf-8'), 0xEF, 0, buf, buf_len))
self.assertEqual(WALLY_OK, wally_sign_psbt(psbt, buf, buf_len))
ret, reser = wally_psbt_to_base64(psbt)
self.assertEqual(WALLY_OK, ret)
self.assertEqual(signer['result'], reser)
for inval_signer in inval_signers:
psbt = pointer(wally_psbt())
self.assertEqual(WALLY_OK, wally_psbt_from_base64(inval_signer['psbt'].encode('utf-8'), psbt))
for priv in inval_signer['privkeys']:
buf, buf_len = make_cbuffer('00'*32)
self.assertEqual(WALLY_OK, wally_wif_to_bytes(priv.encode('utf-8'), 0xEF, 0, buf, buf_len))
self.assertEqual(WALLY_EINVAL, wally_sign_psbt(psbt, buf, buf_len))
for finalizer in finalizers:
psbt = pointer(wally_psbt())
self.assertEqual(WALLY_OK, wally_psbt_from_base64(finalizer['finalize'].encode('utf-8'), psbt))
self.assertEqual(WALLY_OK, wally_finalize_psbt(psbt))
ret, reser = wally_psbt_to_base64(psbt)
self.assertEqual(WALLY_OK, ret)
self.assertEqual(finalizer['result'], reser)
for extractor in extractors:
psbt = pointer(wally_psbt())
tx = pointer(wally_tx())
self.assertEqual(WALLY_OK, wally_psbt_from_base64(extractor['extract'].encode('utf-8'), psbt))
self.assertEqual(WALLY_OK, wally_extract_psbt(psbt, tx))
ret, reser = wally_tx_to_hex(tx, 1)
self.assertEqual(WALLY_OK, ret)
self.assertEqual(extractor['result'], reser)
if __name__ == '__main__':
unittest.main()
| [
"fontainedenton@googlemail.com"
] | fontainedenton@googlemail.com |
ec71f7f50fdfafa85cf8e6aa083d19b2a07346e5 | 7f26cd2b4f1c20e0b7f8c246b41c3d163f3287fb | /img_inpainting/inpaint_image_real.py | cb02610a5c46cc89f5772046045d9602515bded1 | [] | no_license | GU1020/occluded-offline-HCCR | 2059455b288a180468028e20b9770363298a7915 | f49a687df1bc704d3e420062a13c87a8057f6ae5 | refs/heads/master | 2020-04-29T10:46:15.525127 | 2018-09-30T13:15:31 | 2018-09-30T13:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | import numpy as np
from glob import glob
import cv2
from model import generator_model
def load_data(start, end):
path = glob('/home/alyssa/Desktop/the_second/D5/*')
batch_images = path[start:end]
imgs_hr = []
for img_path in batch_images:
img = cv2.imread(img_path)
imgs_hr.append(img)
imgs_hr = np.array(imgs_hr) / 127.5 - 1
return imgs_hr
def deblur_real():
g = generator_model()
g.load_weights('/Users/albert/con_lab/alyssa/OCCLUDED/generator_57000_243.h5')
count = 0
for i in range(10):
x_pre = load_data(i*2, (i+1)*2)
generated_images = g.predict(x=x_pre, batch_size=1)
result = (generated_images+1)*127.5
for j in range(2):
count += 1
cv2.imwrite('/home/alyssa/PythonProjects/occluded/11/'+str(count)+'_1.jpg', result[j])
deblur_real()
| [
"2120171111@bit.edu.cn"
] | 2120171111@bit.edu.cn |
b058d7c4c93cbb9799fe71b9306b07e4c6a1d2ed | 78633d5fb718701dd2f0570ce9a80d54a6993ae7 | /approximation.py | f5fb0212f81089fe696189153b5ec7f9f0aef84c | [] | no_license | ALexanderSpiridonov/python | dcc75e13b0920f59e19ffbb2d9c0f8b3c437e515 | a9dbae34850845ae500f5a5d7163e604f15c54c6 | refs/heads/master | 2021-01-13T12:34:38.729967 | 2016-10-22T14:19:21 | 2016-10-22T14:19:21 | 72,544,855 | 1 | 0 | null | 2016-11-01T14:37:38 | 2016-11-01T14:37:38 | null | UTF-8 | Python | false | false | 1,044 | py | import numpy as np
import scipy
from scipy import sin,cos,exp
import scipy.linalg
from scipy import interpolate
from matplotlib import pylab as plt
fx = lambda x: sin(x / 5.0) * exp(x / 10.0) + 5 * exp(-x / 2.0) #задачем функцию
p = np.array([1,15])
b = fx(p)
k=2
a = np.zeros((len(p), len(p))) #создаем пустой массив нужной размерности
for i in range(0,k): # заполняем массив
a[i,:] = np.array([p[i]**n for n in xrange(0,k)]) # заполняем массив
s1 = scipy.linalg.solve(a,b)
p = np.array([1,4,8,15])
b = fx(p)
k=4
a = np.zeros((len(p), len(p)))
for i in range(0,k):
a[i,:] = np.array([p[i]**n for n in xrange(0,k)])
s3 = scipy.linalg.solve(a,b)
f = interpolate.interp1d(p, fx(p), kind='quadratic')
xnew = np.arange(0, 15, 0.1)
plt.plot(p, b, 'x', p, b, '----')
plt.show()
p = np.array([1,8,15])
b = fx(p)
k=3
a = np.zeros((len(p), len(p)))
for i in range(0,k):
a[i,:] = np.array([p[i]**n for n in xrange(0,k)])
s2 = scipy.linalg.solve(a,b)
| [
"noreply@github.com"
] | noreply@github.com |
bd2b2d66a41b8b3ff7039513296b3de98f7528fd | 8f9337ab892629b1ca29ddf07421003ff2e2741a | /p15.py | 8218c73f9e39587ef22846dfefb443864eb44948 | [] | no_license | vkuzo/euler | b91a11446a9b04715d6990bb97e43de5529e209f | c0205ac60fdf6a58e0133f8e76d3a7de76ee9e4f | refs/heads/master | 2016-09-05T14:51:31.555179 | 2012-04-20T01:55:05 | 2012-04-20T01:55:05 | 3,962,899 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | """
Starting in the top left corner of a 22 grid, there are 6 routes (without backtracking) to the bottom right corner.
How many routes are there through a 2020 grid?
"""
import unittest
def getMatrix(n):
"""
Returns an (n+1) by (n+1) matrix result of ways to get from result[i,j] to result[-1,-1]
for n = 2, should return
[[6,3,1],
[3,2,1],
[1,1,1]
]
"""
res = []
for i in range(n+1):
res.append([0]*(n+1))
#build the origin
res[0][0] = 1
#build the edges
for x in range(1, len(res)):
res[x][0] = res[x-1][0]
for y in range(1, len(res)):
res[0][y] = res[0][y-1]
#build the inside
for x in range(1,len(res)):
for y in range(1, len(res)):
res[x][y] = res[x-1][y] + res[x][y-1]
return res
class testProblem(unittest.TestCase):
def setUp(self):
pass
def testGetMatrix(self):
pass
self.assertEquals([[1,1,1],[1,2,3],[1,3,6]], getMatrix(2))
if __name__ == '__main__':
#test
suite = unittest.TestLoader().loadTestsFromTestCase(testProblem)
unittest.TextTestRunner(verbosity=2).run(suite)
print getMatrix(20)[-1][-1] | [
"vasek1@gmail.com"
] | vasek1@gmail.com |
27c5a8484ca0453b96b571915907904ddc4c36ff | b8b1a47aa528b9a2cf22d5ef57b0d3c51864182c | /Data Structure/Pointer/reverseString.py | 504fa50fb319c6c69acdc73e74b7dabb3d602364 | [] | no_license | zxyzliu/algorithm | ef148aa3ffcb93e598aa3db00dc427461a918c49 | a82a16a3ad1d82cb467b3fc7f6b456128c87e5b9 | refs/heads/master | 2021-12-14T02:33:53.033593 | 2021-11-23T03:36:25 | 2021-11-23T03:36:25 | 220,148,441 | 0 | 0 | null | 2020-08-27T23:55:35 | 2019-11-07T04:02:31 | Python | UTF-8 | Python | false | false | 691 | py | """
Write a function that reverses a string. The input string is given as an array of characters char[].
Do not allocate extra space for another array, you must do this by modifying the input array in-place
with O(1) extra memory.
You may assume all the characters consist of printable ascii characters.
Example 1:
Input: ["h","e","l","l","o"]
Output: ["o","l","l","e","h"]
"""
def reverseString(s):
left, right = 0, len(s) - 1
char_array = list(s)
result = ""
while left < right:
char_array[left], char_array[right] = char_array[right], char_array[left]
left += 1
right -= 1
return result.join(char_array)
print(reverseString("hello"))
| [
"Dou Bao@WOAIROUBAOBAO"
] | Dou Bao@WOAIROUBAOBAO |
bdea1dd3f8285c7006c568ea1b92484b79f3ac39 | b1ffec4807af5c05427191fbf7860ca5acf207b5 | /tests/test_static_field.py | a3dbd9f8d1324bf5371016ab023f4ea3b1f1e31e | [
"Apache-2.0"
] | permissive | futhewo/alicia | 265a29626f0ffd5b8e28f8a3c77ad34c0b8ae388 | a23f1b29ca72b66380d2f7b34dc7ebedf75b737e | refs/heads/master | 2021-04-12T07:52:02.315900 | 2017-11-01T12:07:49 | 2017-11-01T12:07:49 | 94,511,892 | 0 | 0 | null | 2017-11-01T12:07:49 | 2017-06-16T06:19:10 | Python | UTF-8 | Python | false | false | 3,357 | py | #!/usr/bin/python
# -*- encoding: iso-8859-1 -*-
###############################################################################
# Copyright 2017 @fuzztheworld
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Unit-tests of Alicia
# Imports #####################################################################
from nose.tools import *
from alicia.configuration import *
from alicia.static_field import *
from alicia.contents.string_content import *
from alicia.contents.integer_content import *
from alicia.utils import *
# Nose functions ##############################################################
def setup():
pass
def teardown():
pass
# StaticField #################################################################
def test_StaticField___init__():
content0 = StringContent("ABCDE")
element0 = StaticField(content0)
assert_equals(element0.type , "StaticField")
assert_equals(element0.static , True)
assert_equals(element0.weight , 1.0)
assert_equals(element0.boundElements , [])
assert_equals(element0.notifiable , True)
assert_equals(element0.parsed , False)
assert_equals(element0.content , content0)
content1 = IntegerContent(12345)
element1 = StaticField(content1, name="My StaticField", weight=2.0)
assert_equals(element1.type , "StaticField")
assert_equals(element1.name , "My StaticField")
assert_equals(element1.static , True)
assert_equals(element1.weight , 2.0)
assert_equals(element1.boundElements , [])
assert_equals(element1.notifiable , True)
assert_equals(element1.parsed , False)
assert_equals(element1.content , content1)
def test_StaticField_getSize():
content0 = StringContent("ABCDE")
element0 = StaticField(content0)
assert_equals(element0.getSize() , 5)
def test_StaticField___str__():
content0 = StringContent("ABCDE")
element0 = StaticField(content0, name="My StaticField", weight=2.0)
assert_equals(str(element0) , "[My StaticField: ABCDE (StringContent)]\n")
def test_StaticField_compose():
content0 = StringContent("ABCDE")
element0 = StaticField(content0)
assert_equals(element0.compose() , "ABCDE")
def test_StaticField_commit():
content0 = StringContent("ABCDE")
element0 = StaticField(content0)
element0.content.future = "FGHEI"
element0.commit()
assert_equals(element0.compose() , "FGHEI")
def test_StaticField_clean():
content0 = StringContent("ABCDE")
element0 = StaticField(content0)
element0.content.current = "FGHEI"
element0.clean()
assert_equals(element0.compose() , "ABCDE")
| [
"fuzztheworld@gmail.com"
] | fuzztheworld@gmail.com |
fa712d9b26ddd79b5f012c922726398cea934e54 | 3289a94f8e62ad3e997e98fe6cb8768703bd7818 | /equalheights_stacks.py | c19c559c5e382e63260f8436d50948f6a690cd1e | [] | no_license | Jyothi3659/hackerrankproblems | 0325887f633d8a627fc8ddbf17c0543c1003bdfb | 18715aadf2c0d1ed17e706d81067403b9c39f24e | refs/heads/master | 2020-08-27T07:58:30.148512 | 2019-11-10T09:56:49 | 2019-11-10T09:56:49 | 217,292,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | def equalStacks(h1,h2,h3):
max = 0
height_h1 = sum(h1)
height_h2 = suum(h2)
height_h3 = sum(h3)
| [
"yedla.jyothi95@gmail.com"
] | yedla.jyothi95@gmail.com |
91736a2d122ff86accc01b5aed1da1134664e977 | cce40debf3f6cfc7467f5cc1e2364b8a31b88137 | /ContextualSecurityDashboard/dashboard/views.py | 13c4feafa87ba34cdc7cbea821fc7889256e2f4d | [] | no_license | jgkuttic/SecurityDashboard | 555ad258b3b7daefb0fd36c10c373f5ac740f13e | c08f5aba0fdb8d7cf4242e0a4b56d2ee5ec77c13 | refs/heads/master | 2020-12-30T10:11:20.212188 | 2017-08-03T15:12:29 | 2017-08-03T15:12:29 | 99,245,592 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,479 | py | from django.shortcuts import render
from django.http import HttpResponse
from rest_framework import status
from rest_framework.response import Response
from dashboard.models import Report
from dashboard.serializers import ReportSerializer
from .models import Module
from .serializers import ModuleSerializer
import django_filters
from rest_framework import viewsets
from rest_framework.decorators import detail_route, list_route
from rest_framework.renderers import JSONRenderer
import requests
import urllib
#Viewset for reports
class ReportViewSet(viewsets.ModelViewSet):
queryset = Report.objects.all()
serializer_class = ReportSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filter_fields = ('id', 'title','module', 'severity', 'status', 'date')
ordering_fields = '__all__'
ordering = ('title',)
def create(self, request):
#Check for new module
moduleName = request.data[0]["module"]
modulesWithName = Module.objects.filter(module=moduleName)
if not modulesWithName:
newModule = Module(module=moduleName)
newModule.save()
#Add report to Webhooks
for report in request.data:
if (report["severity"] == 1):
titleEncoded = urllib.quote(str(report["title"]), safe='')
url = 'http://localhost:5000/#reportDetail/' + titleEncoded
message = '[SEV 1] ' + str(report["title"]) + ' ' + url
r = requests.post('https://apps.mypurecloud.com:443/webhooks/api/v1/webhook/7d0b4897-9918-427b-b621-35f0253affb5',
data= { "message": message })
#Add the reports to the database
serializer = ReportSerializer(data=request.data, many=True)
if serializer.is_valid():
serializer.save()
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_201_CREATED)
#Viewset for reports
class ModuleViewSet(viewsets.ModelViewSet):
queryset = Module.objects.all()
serializer_class = ModuleSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filter_fields = ('module',)
ordering_fields = ('module',)
ordering = ('module',)
renderer_classes = (JSONRenderer, )
pagination_class = None
# homepage view
def index(request):
return HttpResponse("This will be our dashboard page.")
| [
"jgkuttic@ncsu.edu"
] | jgkuttic@ncsu.edu |
6b79c8535a340541d34f935ff6d1aebda39ffad7 | 342d850e5d87192b01e2ff8995d0a2fadf0a0e1a | /Proxy/subject.py | 06d8bcad87c85feb1f2a7b4b06d6453189597e2d | [] | no_license | PlumpMath/DesignPatternsPython-3 | 213a078d9b003fb577c79abff14e7267de24ae4d | a42ce5a471b6b17ee453b2b7f16d5d50c4cef7df | refs/heads/master | 2021-01-20T09:51:59.626139 | 2014-12-14T16:29:01 | 2014-12-14T16:29:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | from abc import ABCMeta, abstractmethod
import random
from _pyio import __metaclass__
class AbstractSubject(object):
"""A common interface for the real and proxy objects. """
__metaclass__ = ABCMeta
@abstractmethod
def sort(self, reverse=False):
pass
class RealSubject(AbstractSubject):
"""A class for a heavy object which takes a lot of memory \
space and takes some time to instatiate. """
def __init__(self):
self.digits = []
for i in xrange(10000000):
self.digits.append(random.random())
def sort(self, reverse=False):
self.digits.sort()
if reverse:
self.digits.reverse()
| [
"ron.zohan@yahoo.com"
] | ron.zohan@yahoo.com |
11bc9407b651db938ff3f6333da6b4972a5e9ef3 | 35cf6fc79b8d6c335add8e55e0f4dca6f2816d1d | /Python_Study/第七模块学习/Day01/用pycharm创建Django/venv/Lib/site-packages/django/contrib/gis/utils/layermapping.py | 972c24ce95f0b6498ee2791c87eee130c2af410d | [] | no_license | KongChan1988/51CTO-Treasure | 08b4ca412ad8a09d67c1ea79c7149f8573309ca4 | edb2e4bd11d39ac24cd240f3e815a88361867621 | refs/heads/master | 2021-07-04T15:57:56.164446 | 2019-07-24T15:28:36 | 2019-07-24T15:28:36 | 97,453,749 | 5 | 8 | null | 2019-10-30T22:05:12 | 2017-07-17T08:34:59 | Python | UTF-8 | Python | false | false | 27,129 | py | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
https://docs.djangoproject.com/en/dev/ref/contrib/gis/layermapping/
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTInteger64, OFTReal, OFTString,
OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping:
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.BigAutoField: OFTInteger64,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, str):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"Check the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
Check the Layer metadata and ensure that it's compatible with the
mapping information and model. Unlike previous revisions, there is no
need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.remote_field.model
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Check the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, str)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Check the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, str):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, return a dictionary of keyword arguments for
constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`), construct
and return the uniqueness keyword arguments -- a subset of the feature
kwargs.
"""
if isinstance(self.unique, str):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verify if the OGR Field contents are acceptable to the model field. If
they are, return the verified value, otherwise raise an exception.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
retrieve the related model for the ForeignKey mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verify the geometry -- construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Return the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as exc:
raise LayerMapError(
'Could not translate between the data source and model geometry.'
) from exc
def geometry_field(self):
"Return the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Save the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user.txt
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except Exception: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| [
"wangwei_198811@163.com"
] | wangwei_198811@163.com |
7df867c895807b675e26661a7c94fcedf8969c23 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/operationalinsights/v20200301preview/get_linked_storage_account.py | c615084e822e9a1b8d05805b45a7c7b14a8e0d07 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 4,552 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetLinkedStorageAccountResult',
'AwaitableGetLinkedStorageAccountResult',
'get_linked_storage_account',
]
@pulumi.output_type
class GetLinkedStorageAccountResult:
"""
Linked storage accounts top level resource container.
"""
def __init__(__self__, data_source_type=None, id=None, name=None, storage_account_ids=None, type=None):
if data_source_type and not isinstance(data_source_type, str):
raise TypeError("Expected argument 'data_source_type' to be a str")
pulumi.set(__self__, "data_source_type", data_source_type)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if storage_account_ids and not isinstance(storage_account_ids, list):
raise TypeError("Expected argument 'storage_account_ids' to be a list")
pulumi.set(__self__, "storage_account_ids", storage_account_ids)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataSourceType")
def data_source_type(self) -> str:
"""
Linked storage accounts type.
"""
return pulumi.get(self, "data_source_type")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccountIds")
def storage_account_ids(self) -> Optional[Sequence[str]]:
"""
Linked storage accounts resources ids.
"""
return pulumi.get(self, "storage_account_ids")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetLinkedStorageAccountResult(GetLinkedStorageAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLinkedStorageAccountResult(
data_source_type=self.data_source_type,
id=self.id,
name=self.name,
storage_account_ids=self.storage_account_ids,
type=self.type)
def get_linked_storage_account(data_source_type: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLinkedStorageAccountResult:
"""
Linked storage accounts top level resource container.
:param str data_source_type: Linked storage accounts type.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataSourceType'] = data_source_type
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:operationalinsights/v20200301preview:getLinkedStorageAccount', __args__, opts=opts, typ=GetLinkedStorageAccountResult).value
return AwaitableGetLinkedStorageAccountResult(
data_source_type=__ret__.data_source_type,
id=__ret__.id,
name=__ret__.name,
storage_account_ids=__ret__.storage_account_ids,
type=__ret__.type)
| [
"noreply@github.com"
] | noreply@github.com |
4a2f0f5acc5c992780889dbfe7670e51c84d8bab | 4426f894af42a7c29ddf20a976f2205834fe3123 | /006_course_documents/computer_vision/week8/project2_my_code/my_dataloader_stage3.py | 3bfddd1df2b5fcf4430d1ca7f6e9e7ce96b871a0 | [] | no_license | yuchen-he/kaikeba_report | f2e90daedfe1699ff9891d266a0d7b70dc75e526 | e0c7b21b574ab1981fe37b1c6b604499ea7a4ef5 | refs/heads/master | 2021-07-07T00:01:34.052718 | 2020-03-24T01:05:18 | 2020-03-24T01:05:18 | 226,459,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,436 | py | # This file is about how to create a dataloader in pytorch way
from __future__ import print_function
import os
import numpy as np
import cv2
from PIL import Image
import argparse
from utils.utils import my_args_parser
import pdb
import torch
from torchvision import transforms
from torch.utils.data import Dataset
input_size = 112
def parse_oneline(line):
line = line.strip().split()
image_path = line[0]
is_face = line[1]
bbox = list(map(int, list(map(float, line[2:6])))) # list: [x1, y1, x2, y2]
if is_face == '1':
x = list(map(float, line[6::2]))
y = list(map(float, line[7::2]))
landmarks = list(zip(x, y)) # list: [[x1,y1], [x2,y2], ...]
elif is_face == '0':
landmarks = np.zeros((21, 2)).tolist()
return image_path, is_face, bbox, landmarks
def channel_norm(img):
# img: ndarray, float32
mean = np.mean(img)
std = np.std(img)
pixels = (img - mean) / (std + 0.0000001)
return pixels
def convert_landmarks(landmarks, input_size, orig_face_size):
# Convert positions of landmarks(ndarrays) in sample to be consistent with image_after_Normalize.
# Output: shape should be (42, ) not (21, 2)
# Resize
w, h = orig_face_size
expand_w, expand_h = input_size / w, input_size / h
landmarks *= np.array([expand_w, expand_h]).astype(np.float32)
return landmarks
class Normalize(object):
"""
Convert a PIL gray image to ndarrays
Resize to train_boarder x train_boarder. Here we use 112 x 112
Then do channel normalization: (image - mean) / std_variation
"""
def __call__(self, sample):
img_crop, is_face, landmarks_orig = sample['image'], sample['is_face'], sample['landmarks']
image = np.asarray(
img_crop.resize((input_size, input_size), Image.BILINEAR),
dtype=np.float32) # Image.ANTIALIAS)
# img normalize
image = channel_norm(image)
# gt normalize
# landmarks = np.array(landmarks_orig).astype(np.float32)
landmarks = convert_landmarks(landmarks_orig, input_size, img_crop.size)
# Normalize
landmarks = channel_norm(landmarks)
# landmarks = landmarks.flatten()
return {'image': image,
'is_face': is_face,
'landmarks': landmarks
}
class Rotation(object):
"""
Input: resized_img (ndarrays); converted_landmarks (ndarrays with shape of (21,2))
"""
def __call__(self, sample):
image, is_face, landmarks = sample['image'], sample['is_face'], sample['landmarks']
angle = np.random.random_integers(-20, 20)
mat = cv2.getRotationMatrix2D((image.shape[0]//2, image.shape[1]//2), angle, 1)
image = cv2.warpAffine(image, mat, (image.shape[0], image.shape[1]))
# landmarks = convert_landmarks(landmarks, input_size, orig_size)
landmarks_rotate = []
for landmark in landmarks:
landmark = (mat[0][0]*landmark[0]+mat[0][1]*landmark[1]+mat[0][2],
mat[1][0]*landmark[0]+mat[1][1]*landmark[1]+mat[1][2])
landmarks_rotate.append(landmark)
return {'image': image,
'is_face': is_face,
'landmarks': landmarks_rotate
}
class ToTensor(object):
"""
Convert ndarrays in sample to Tensors.
Tensors channel sequence: N x C x H x W
"""
# Actually the output here is only (C x H x W),
# the batch_size "N" will be added automatically by torch.utils.data.DataLoader()
def __call__(self, sample):
image, is_face, landmarks = sample['image'], sample['is_face'], sample['landmarks']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
# image = image.transpose((2, 0, 1))
image = np.expand_dims(image, axis=0)
# change (21, 2) -> (42,)
landmarks = np.array(landmarks).astype(np.float32)
landmarks = landmarks.flatten()
return {'image': torch.from_numpy(image),
'is_face': torch.LongTensor([int(is_face)]),
'landmarks': torch.from_numpy(landmarks)}
class FaceLandmarksDataset(Dataset):
def __init__(self, args, lines, transformer=None):
self.args = args
self.lines = lines
self.transformer = transformer
def __len__(self):
return len(self.lines)
def __getitem__(self, index):
'''
:param index:
:return single_sample: information dict of one single image
'''
image_path, is_face, bbox, landmarks = parse_oneline(self.lines[index])
# print("img_path: ", image_path)
img = Image.open(image_path).convert('L') # gray_scale (for calculating mean & std)
img_crop = img.crop(tuple(bbox)) # img.crop(tuple(x1,y1,x2,y2))
if self.args.phase == 'Test' or self.args.phase == 'test': # test only ToTensor()
img_resize = np.asarray(
img_crop.resize((input_size, input_size), Image.BILINEAR),
dtype=np.float32)
landmarks = convert_landmarks(landmarks, input_size, img_crop.size)
single_sample = {
'image': img_resize,
'is_face': is_face,
'landmarks': landmarks
}
single_sample = self.transformer(single_sample)
else:
if self.args.no_normalize: # train without normalize
img_resize = np.asarray(
img_crop.resize((input_size, input_size), Image.BILINEAR),
dtype=np.float32)
landmarks = convert_landmarks(landmarks, input_size, img_crop.size)
single_sample = {
'image': img_resize,
'is_face': is_face,
'landmarks': landmarks
}
single_sample = self.transformer(single_sample)
else: # train with normalize
single_sample = {
'image': img_crop,
'is_face': is_face,
'landmarks': landmarks
}
single_sample = self.transformer(single_sample)
return single_sample
def load_data(args, phase):
'''
:param args:
:return dataset: a instance of class FaceLandmarksDataset(), which will be used by torch.DataLoader
'''
if phase == 'Test' or phase == 'test':
data_path = args.test_set_path_stage3
with open(data_path, 'r') as f:
lines = f.readlines()
transformer = transforms.Compose([
ToTensor()
])
else:
data_path = args.train_set_path_stage3
with open(data_path, 'r') as f:
lines = f.readlines()
if not args.no_normalize:
transformer = transforms.Compose([
Normalize(),
Rotation(),
ToTensor()
])
else:
transformer = transforms.Compose([
Rotation(),
ToTensor()
])
dataset = FaceLandmarksDataset(args, lines, transformer)
return dataset
def dataloader(args):
train_set = load_data(args, 'Train')
valid_set = load_data(args, 'Test')
return train_set, valid_set
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description="Face_Landmarks_Detection")
args = my_args_parser(argparser)
train_set = load_data(args, 'Test')
for i in range(1, len(train_set)):
sample = train_set[i]
img = sample['image']
is_face = sample['is_face']
landmarks = sample['landmarks'] # shape = (42,)
if is_face.numpy() == 1:
img = np.squeeze(np.array(img, dtype=np.uint8))
landmarks = landmarks.reshape(21, 2)
for landmark in landmarks:
landmark = np.uint8(landmark).tolist()
img = cv2.circle(img, tuple(landmark), 3, (0, 0, 255), -1)
elif is_face.numpy() == 0:
print(i, landmarks)
img = np.squeeze(np.array(img, dtype=np.uint8))
msg = 'No face!'
img_bbox = cv2.putText(img, msg, (5, 5), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 3)
cv2.imshow("face_landmarks", img)
key = cv2.waitKey()
if key == 27:
exit(0)
cv2.destroyAllWindows()
| [
"Yuchen_He@n.t.rd.honda.co.jp"
] | Yuchen_He@n.t.rd.honda.co.jp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.