text stringlengths 38 1.54M |
|---|
import os
import selenium
from selenium import webdriver
import time
from PIL import Image
import io
import requests
from webdriver_manager.chrome import ChromeDriverManager
os.chdir('D:/Workspace/Projects/test2py')
#Install driver
opts=webdriver.ChromeOptions()
opts.headless=True
driver = webdriver.Chrome(ChromeDriverManager().install() ,options=opts)
search_url = "https://www.google.com/search?q={q}&tbm=isch&tbs=sur%3Afc&hl=en&ved=0CAIQpwVqFwoTCKCa1c6s4-oCFQAAAAAdAAAAABAC&biw=1251&bih=568"
driver.get(search_url.format(q='Car'))
def scroll_to_end(driver):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(5)#sleep_between_interactions
#no license issues
def getImageUrls(name,totalImgs,driver):
search_url = "https://www.google.com/search?q={q}&tbm=isch&tbs=sur%3Afc&hl=en&ved=0CAIQpwVqFwoTCKCa1c6s4-oCFQAAAAAdAAAAABAC&biw=1251&bih=568"
driver.get(search_url.format(q=name))
img_urls = set()
img_count = 0
results_start = 0
while(img_count<totalImgs): #Extract actual images now
scroll_to_end(driver)
thumbnail_results = driver.find_elements_by_xpath("//img[contains(@class,'Q4LuWd')]")
totalResults=len(thumbnail_results)
print(f"Found: {totalResults} search results. Extracting links from{results_start}:{totalResults}")
for img in thumbnail_results[results_start:totalResults]:
img.click()
time.sleep(2)
actual_images = driver.find_elements_by_css_selector('img.n3VNCb')
for actual_image in actual_images:
if actual_image.get_attribute('src') and 'https' in actual_image.get_attribute('src'):
img_urls.add(actual_image.get_attribute('src'))
img_count=len(img_urls)
if img_count >= totalImgs:
print(f"Found: {img_count} image links")
break
else:
print("Found:", img_count, "looking for more image links ...")
load_more_button = driver.find_element_by_css_selector(".mye4qd")
driver.execute_script("document.querySelector('.mye4qd').click();")
results_start = len(thumbnail_results)
return img_urls
def downloadImages(folder_path,file_name,url):
try:
image_content = requests.get(url).content
except Exception as e:
print(f"ERROR - COULD NOT DOWNLOAD {url} - {e}")
try:
image_file = io.BytesIO(image_content)
image = Image.open(image_file).convert('RGB')
file_path = os.path.join(folder_path, file_name)
with open(file_path, 'wb') as f:
image.save(f, "JPEG", quality=85)
print(f"SAVED - {url} - AT: {file_path}")
except Exception as e:
print(f"ERROR - COULD NOT SAVE {url} - {e}")
def saveInDestFolder(searchNames,destDir,totalImgs,driver):
for name in list(searchNames):
path=os.path.join(destDir,name)
if not os.path.isdir(path):
os.mkdir(path)
print('Current Path',path)
totalLinks=getImageUrls(name,totalImgs,driver)
print('totalLinks',totalLinks)
if totalLinks is None:
print('images not found for :',name)
# continue
else:
for i, link in enumerate(totalLinks):
file_name = f"{i:150}.jpg"
downloadImages(path,file_name,link)
searchNames=['Car','horses']
destDir=f'./Dataset2/'
totalImgs=5
saveInDestFolder(searchNames,destDir,totalImgs,driver) |
import random
import re
import time
from enum import Enum
from typing import Optional, Union
import discord
from discord.ext.commands import CheckFailure
from redbot.core.commands import Cog, Context, check
from redbot.core.i18n import Translator
from redbot.core.utils.chat_formatting import escape as _escape
from redbot.core.utils.common_filters import filter_various_mentions
from .charsheet import Character, Item
from .constants import DEV_LIST, Rarities
_ = Translator("Adventure", __file__)
async def _get_epoch(seconds: int):
epoch = time.time()
epoch += seconds
return epoch
def escape(t: str) -> str:
return _escape(filter_various_mentions(t), mass_mentions=True, formatting=True)
async def smart_embed(
ctx: Optional[Context] = None,
message: Optional[str] = None,
success: Optional[bool] = None,
image: Optional[str] = None,
ephemeral: bool = False,
cog: Optional[Cog] = None,
interaction: Optional[discord.Interaction] = None,
view: Optional[discord.ui.View] = discord.utils.MISSING,
embed_colour: Optional[str] = None,
) -> discord.Message:
interaction_only = interaction is not None and ctx is None
if interaction_only:
bot = interaction.client
guild = interaction.guild
channel = interaction.channel
else:
bot = ctx.bot
guild = ctx.guild
channel = ctx.channel
if success is True:
colour = discord.Colour.dark_green()
elif success is False:
colour = discord.Colour.dark_red()
elif embed_colour is not None:
try:
colour = discord.Colour.from_str(embed_colour)
except (ValueError, TypeError):
colour = await bot.get_embed_colour(channel)
else:
colour = await bot.get_embed_colour(channel)
if cog is None:
cog = bot.get_cog("Adventure")
if guild:
use_embeds = await cog.config.guild(guild).embed()
else:
use_embeds = True or await bot.embed_requested(channel)
if use_embeds:
embed = discord.Embed(description=message, color=colour)
if image:
embed.set_thumbnail(url=image)
if interaction_only:
if interaction.response.is_done():
msg = await interaction.followup.send(embed=embed, ephemeral=ephemeral, view=view, wait=True)
else:
await interaction.response.send_message(embed=embed, ephemeral=ephemeral, view=view)
msg = await interaction.original_response()
return msg
else:
return await ctx.send(embed=embed, ephemeral=ephemeral, view=view)
if interaction_only:
if interaction.response.is_done():
msg = await interaction.followup.send(message, ephemeral=ephemeral, view=view, wait=True)
else:
await interaction.response.send_message(message, ephemeral=ephemeral, view=view)
msg = await interaction.original_response()
return msg
else:
return await ctx.send(message, ephemeral=ephemeral, view=view)
def check_running_adventure(ctx):
for (guild_id, session) in ctx.bot.get_cog("Adventure")._sessions.items():
user_ids: list = []
options = ["fight", "magic", "talk", "pray", "run"]
for i in options:
user_ids += [u.id for u in getattr(session, i)]
if ctx.author.id in user_ids:
return False
return True
async def _title_case(phrase: str):
exceptions = ["a", "and", "in", "of", "or", "the"]
lowercase_words = re.split(" ", phrase.lower())
final_words = [lowercase_words[0].capitalize()]
final_words += [word if word in exceptions else word.capitalize() for word in lowercase_words[1:]]
return " ".join(final_words)
async def _remaining(epoch):
remaining = epoch - time.time()
finish = remaining < 0
m, s = divmod(remaining, 60)
h, m = divmod(m, 60)
s = int(s)
m = int(m)
h = int(h)
if h == 0 and m == 0:
out = "{:02d}".format(s)
elif h == 0:
out = "{:02d}:{:02d}".format(m, s)
else:
out = "{:01d}:{:02d}:{:02d}".format(h, m, s)
return (out, finish, remaining)
def _sell(c: Character, item: Item, *, amount: int = 1):
if item.rarity is Rarities.ascended:
base = (5000, 10000)
elif item.rarity is Rarities.legendary:
base = (1000, 2000)
elif item.rarity is Rarities.epic:
base = (500, 750)
elif item.rarity is Rarities.rare:
base = (250, 500)
else:
base = (10, 100)
price = random.randint(base[0], base[1]) * abs(item.max_main_stat)
price += price * max(int((c.total_cha) / 1000), -1)
if c.luck > 0:
price = price + round(price * (c.luck / 1000))
if c.luck < 0:
price = price - round(price * (abs(c.luck) / 1000))
if price < 0:
price = 0
price += round(price * min(0.1 * c.rebirths / 15, 0.4))
return max(price, base[0])
def is_dev(user: Union[discord.User, discord.Member]):
return user.id in DEV_LIST
def has_separated_economy():
async def predicate(ctx):
if not (ctx.cog and getattr(ctx.cog, "_separate_economy", False)):
raise CheckFailure
return True
return check(predicate)
class ConfirmView(discord.ui.View):
def __init__(self, timeout: float, author: Union[discord.User, discord.Member]):
super().__init__(timeout=timeout)
self.confirmed = None
self.author = author
@discord.ui.button(label=_("Yes"), style=discord.ButtonStyle.green)
async def accept_button(self, interaction: discord.Interaction, button: discord.ui.Button):
await interaction.response.defer()
self.confirmed = True
self.stop()
@discord.ui.button(label=_("No"), style=discord.ButtonStyle.red)
async def reject_button(self, interaction: discord.Interaction, button: discord.ui.Button):
await interaction.response.defer()
self.confirmed = False
self.stop()
async def interaction_check(self, interaction: discord.Interaction) -> bool:
if interaction.user.id != self.author.id:
await interaction.response.send_message(_("You are not authorized to interact with this."), ephemeral=True)
return False
return True
class LootSellEnum(Enum):
put_away = 0
equip = 1
sell = 2
class LootView(discord.ui.View):
def __init__(self, timeout: float, author: discord.User):
super().__init__(timeout=timeout)
self.result = LootSellEnum.put_away
self.author = author
@discord.ui.button(label=_("Equip"), style=discord.ButtonStyle.green)
async def equip_button(self, interaction: discord.Interaction, button: discord.ui.Button):
await interaction.response.defer()
self.result = LootSellEnum.equip
self.stop()
@discord.ui.button(label=_("Sell"), style=discord.ButtonStyle.red)
async def sell_button(self, interaction: discord.Interaction, button: discord.ui.Button):
await interaction.response.defer()
self.result = LootSellEnum.sell
self.stop()
@discord.ui.button(label=_("Put away"), style=discord.ButtonStyle.grey)
async def putaway_button(self, interaction: discord.Interaction, button: discord.ui.Button):
await interaction.response.defer()
self.result = LootSellEnum.put_away
self.stop()
async def interaction_check(self, interaction: discord.Interaction) -> bool:
if interaction.user.id != self.author.id:
await interaction.response.send_message(_("You are not authorized to interact with this."), ephemeral=True)
return False
return True
|
#!/usr/bin/env python
import rospy, Map, Astar, Robot, time
from geometry_msgs.msg import Twist, PoseStamped, PointStamped
rospy.init_node('rwiesenberg_lab3')
robot = Robot.Robot()
goal_sub = rospy.Subscriber('/clicked_point', PointStamped, robot.doWavefront, queue_size=1)
time.sleep(2)
rospy.spin() |
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
class CodePanel:
def __init__(self, master):
self.master = master
self.codePanel = ttk.Frame(master)
master.add(self.codePanel, text="G-Code 编辑器")
self.codeEntry = scrolledtext.ScrolledText(self.codePanel, width=100, height=40)
# self.codeEntry = scrolledtext.ScrolledText(self.codePanel)
self.codeEntry.grid(row=0,column=0,sticky="NEWS") |
import importlib
import inspect
from collections import defaultdict
from functools import partial
from typing import Any, Type, TypeVar
from seedwork.application.command_handlers import CommandResult
from seedwork.application.commands import Command
from seedwork.application.events import EventResult, EventResultSet, IntegrationEvent
from seedwork.application.exceptions import ApplicationException
from seedwork.application.inbox_outbox import InMemoryInbox
from seedwork.application.queries import Query
from seedwork.application.query_handlers import QueryResult
from seedwork.domain.events import DomainEvent
from seedwork.domain.repositories import GenericRepository
from seedwork.utils.data_structures import OrderedSet
def get_function_arguments(func):
handler_signature = inspect.signature(func)
kwargs_iterator = iter(handler_signature.parameters.items())
_, first_param = next(kwargs_iterator)
first_parameter = first_param.annotation
remaining_parameters = {}
for name, param in kwargs_iterator:
remaining_parameters[name] = param.annotation
return first_parameter, remaining_parameters
T = TypeVar("T", CommandResult, EventResult)
def collect_domain_events(result: T, handler_kwargs) -> T:
domain_events = []
repositories = filter(
lambda x: isinstance(x, GenericRepository), handler_kwargs.values()
)
for repo in repositories:
domain_events.extend(repo.collect_events())
result.events.extend(domain_events)
return result
class DependencyProvider:
"""Basic dependency provider that uses a dictionary to store and inject dependencies"""
def __init__(self, **kwargs):
self.dependencies = kwargs
def register_dependency(self, identifier, dependency_instance):
self.dependencies[identifier] = dependency_instance
def get_dependency(self, identifier):
return self.dependencies[identifier]
def _get_arguments(self, func):
return get_function_arguments(func)
def _resolve_arguments(self, handler_parameters) -> dict:
"""Match handler_parameters with dependencies"""
kwargs = {}
for param_name, param_type in handler_parameters.items():
try:
if param_type is inspect._empty:
raise ValueError("No type annotation")
kwargs[param_name] = self.get_dependency(param_type)
continue
except (ValueError, KeyError):
pass
try:
kwargs[param_name] = self.get_dependency(param_name)
continue
except (ValueError, KeyError):
pass
return kwargs
def get_handler_kwargs(self, func, **overrides):
_, handler_parameters = self._get_arguments(func)
kwargs = self._resolve_arguments(handler_parameters)
kwargs.update(**overrides)
return kwargs
def __getitem__(self, key):
return self.get_dependency(key)
def __setitem__(self, key, value):
self.register_dependency(key, value)
class TransactionContext:
"""A context spanning a single transaction for execution of commands and queries
Typically, the following thing happen in a transaction context:
- a command handler is called, which results in aggregate changes that fire domain events
- a domain event is raised, after
- a domain event handler is called
- a command is executed
"""
def __init__(self, app, **overrides):
self.app = app
self.overrides = overrides
self.dependency_provider = app.dependency_provider
self.task = None
self.next_commands = []
self.integration_events = []
def __enter__(self):
"""Should be used to start a transaction"""
self.app._on_enter_transaction_context(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Should be used to commit/end a transaction"""
self.app._on_exit_transaction_context(self, exc_type, exc_val, exc_tb)
def _wrap_with_middlewares(
self, handler_func, command=None, query=None, event=None
):
p = handler_func
for middleware in self.app._transaction_middlewares:
p = partial(middleware, self, p, command, query, event)
return p
def execute_query(self, query) -> QueryResult:
assert (
self.task is None
), "Cannot execute query while another task is being executed"
self.task = query
handler_func = self.app.get_query_handler(query)
handler_kwargs = self.dependency_provider.get_handler_kwargs(
handler_func, **self.overrides
)
p = partial(handler_func, query, **handler_kwargs)
wrapped_handler = self._wrap_with_middlewares(p, query=query)
result = wrapped_handler()
assert isinstance(
result, QueryResult
), f"Got {result} instead of QueryResult from {handler_func}"
return result
def execute_command(self, command) -> CommandResult:
assert (
self.task is None
), "Cannot execute command while another task is being executed"
self.task = command
handler_func = self.app.get_command_handler(command)
handler_kwargs = self.dependency_provider.get_handler_kwargs(
handler_func, **self.overrides
)
p = partial(handler_func, command, **handler_kwargs)
wrapped_handler = self._wrap_with_middlewares(p, command=command)
# execute wrapped command handler
command_result = wrapped_handler() or CommandResult.success()
assert isinstance(
command_result, CommandResult
), f"Got {command_result} instead of CommandResult from {handler_func}"
command_result = collect_domain_events(command_result, handler_kwargs)
self.next_commands = []
self.integration_events = []
event_queue = command_result.events.copy()
while len(event_queue) > 0:
event = event_queue.pop(0)
if isinstance(event, IntegrationEvent):
self.collect_integration_event(event)
elif isinstance(event, DomainEvent):
event_results = self.handle_domain_event(event)
self.next_commands.extend(event_results.commands)
event_queue.extend(event_results.events)
return CommandResult.success(payload=command_result.payload)
def handle_domain_event(self, event) -> EventResultSet:
event_results = []
for handler_func in self.app.get_event_handlers(event):
handler_kwargs = self.dependency_provider.get_handler_kwargs(
handler_func, **self.overrides
)
p = partial(handler_func, event, **handler_kwargs)
wrapped_handler = self._wrap_with_middlewares(p, event=event)
event_result = wrapped_handler() or EventResult.success()
assert isinstance(
event_result, EventResult
), f"Got {event_result} instead of EventResult from {handler_func}"
event_result = collect_domain_events(event_result, handler_kwargs)
event_results.append(event_result)
return EventResultSet(event_results)
def collect_integration_event(self, event):
self.integration_events.append(event)
def get_service(self, service_cls) -> Any:
"""Get a dependency from the dependency provider"""
return self.dependency_provider.get_dependency(service_cls)
def __getitem__(self, item) -> Any:
return self.get_service(item)
@property
def current_user(self):
return self.dependency_provider.get_dependency("current_user")
class ApplicationModule:
def __init__(self, name, version=1.0):
self.name = name
self.version = version
self.command_handlers = {}
self.query_handlers = {}
self.event_handlers = defaultdict(OrderedSet)
def query_handler(self, handler_func):
"""Query handler decorator"""
query_cls, _ = get_function_arguments(handler_func)
self.query_handlers[query_cls] = handler_func
return handler_func
def command_handler(self, handler_func):
"""Command handler decorator"""
command_cls, _ = get_function_arguments(handler_func)
self.command_handlers[command_cls] = handler_func
return handler_func
def domain_event_handler(self, handler_func):
"""Event handler decorator"""
event_cls, _ = get_function_arguments(handler_func)
self.event_handlers[event_cls].add(handler_func)
return handler_func
def import_from(self, module_name):
importlib.import_module(module_name)
def __repr__(self):
return f"<{self.name} v{self.version} {object.__repr__(self)}>"
class Application(ApplicationModule):
def __init__(self, name=__name__, version=1.0, dependency_provider=None, **kwargs):
super().__init__(name, version)
self.dependency_provider = dependency_provider or DependencyProvider(**kwargs)
self._transaction_middlewares = []
self._on_enter_transaction_context = lambda ctx: None
self._on_exit_transaction_context = lambda ctx, exc_type, exc_val, exc_tb: None
self._modules = set([self])
def include_module(self, a_module):
assert isinstance(
a_module, ApplicationModule
), "Can only include ApplicationModule instances"
self._modules.add(a_module)
def on_enter_transaction_context(self, func):
self._on_enter_transaction_context = func
return func
def on_exit_transaction_context(self, func):
self._on_exit_transaction_context = func
return func
def transaction_middleware(self, middleware_func):
"""Middleware for processing transaction boundaries (i.e. running a command or query)"""
self._transaction_middlewares.insert(0, middleware_func)
return middleware_func
def get_query_handler(self, query):
query_cls = type(query)
for app_module in self._modules:
handler_func = app_module.query_handlers.get(query_cls)
if handler_func:
return handler_func
raise Exception(f"No query handler found for command {query_cls}")
def get_command_handler(self, command):
command_cls = type(command)
for app_module in self._modules:
handler_func = app_module.command_handlers.get(command_cls)
if handler_func:
return handler_func
raise Exception(f"No command handler found for command {command_cls}")
def get_event_handlers(self, event):
event_cls = type(event)
event_handlers = []
for app_module in self._modules:
event_handlers.extend(app_module.event_handlers.get(event_cls, []))
return event_handlers
def transaction_context(self, **dependencies):
return TransactionContext(self, **dependencies)
def execute_command(self, command, **dependencies):
with self.transaction_context(**dependencies) as ctx:
return ctx.execute_command(command)
def execute_query(self, query, **dependencies):
with self.transaction_context(**dependencies) as ctx:
return ctx.execute_query(query)
|
from flask import *
import os, sys, json
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse, parse_qs
path = os.path.dirname(__file__)
app = Flask(__name__)
def getVal(querydata, name):
return querydata[name][0] if name in querydata else ''
# extract tradenark Ids from html content
def getTrademarksfromHtml(html):
soup = BeautifulSoup(html, "html.parser")
try:
table_bodys = soup.find('table', id='resultsTable').find_all('tbody')
except:
return []
tradeMarkIds = []
for row in table_bodys:
tradeMarkIds.append(row['data-mark-id'])
return tradeMarkIds
# scrap function
def scrap(url, Count):
if Count > 2000:
Count = 2000
sess = requests.session()
querydata = parse_qs(urlparse(url).query)
res = sess.get(url)
soup = BeautifulSoup(res.text, "html.parser")
names = [
'wv[0]','wt[0]','weOp[0]','wv[1]','wt[1]' ,'wrOp','wv[2]','wt[2]','weOp[1]',
'wv[3]','wt[3]','iv[0]','it[0]','ieOp[0]','iv[1]','it[1]','irOp','iv[2]',
'it[2]','ieOp[1]','iv[3]','it[3]','wp','_sw','classList','ct','status',
'dateType','fromDate','toDate','ia','gsd', 'endo','nameField[0]','name[0]',
'attorney','oAcn','idList','ir','publicationFromDate','publicationToDate',
'i','c','originalSegment'
]
_csrf = soup.find('meta', {'name' : '_csrf'})['content']
data = {}
data['_csrf'] = _csrf
for name in names:
data[name] = getVal(querydata, name)
rest = sess.post(url='https://search.ipaustralia.gov.au/trademarks/search/doSearch', data=data)
tradeMarkIds = getTrademarksfromHtml(rest.text)
URL = rest.url
pages = int(Count/100) + 1 if Count % 100 > 0 else int(Count/100)
for page in range(1, pages):
res = sess.get("%s&p=%s" % (URL, page))
tradeMarkIds = tradeMarkIds + getTrademarksfromHtml(res.text)
return tradeMarkIds
@app.route('/')
def main():
return render_template('index.html')
@app.route('/getCountofResult', methods=['POST'])
def cntofResult():
originUrl = request.form['url']
url = originUrl.replace('advanced','count')
res = requests.get(url)
data = json.loads(res.text)
# scrape url
Ids = scrap(originUrl, data['count'])
return json.dumps({"data" : data, "trademarks" : Ids})
@app.route('/mysearch')
def mysearch():
return render_template("mysearch.html")
if __name__ == '__main__':
app.run(debug=True) |
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pieces that convert audio to predictions
"""
import numpy as np
from abc import abstractmethod, ABCMeta
from importlib import import_module
from os.path import splitext
from typing import *
from typing import BinaryIO
from precise.threshold_decoder import ThresholdDecoder
from precise.model import load_precise_model
from precise.params import inject_params, pr
from precise.util import buffer_to_audio
from precise.vectorization import vectorize_raw, add_deltas
class Runner(metaclass=ABCMeta):
"""
Classes that execute trained models on vectorized audio
and produce prediction values
"""
@abstractmethod
def predict(self, inputs: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def run(self, inp: np.ndarray) -> float:
pass
class TensorFlowRunner(Runner):
"""Executes a frozen Tensorflow model created from precise-convert"""
def __init__(self, model_name: str):
if model_name.endswith('.net'):
print('Warning: ', model_name, 'looks like a Keras model.')
self.tf = import_module('tensorflow')
self.graph = self.load_graph(model_name)
self.inp_var = self.graph.get_operation_by_name('import/net_input').outputs[0]
self.out_var = self.graph.get_operation_by_name('import/net_output').outputs[0]
self.sess = self.tf.Session(graph=self.graph)
def load_graph(self, model_file: str) -> 'tf.Graph':
graph = self.tf.Graph()
graph_def = self.tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
self.tf.import_graph_def(graph_def)
return graph
def predict(self, inputs: np.ndarray) -> np.ndarray:
"""Run on multiple inputs"""
return self.sess.run(self.out_var, {self.inp_var: inputs})
def run(self, inp: np.ndarray) -> float:
return self.predict(inp[np.newaxis])[0][0]
class KerasRunner(Runner):
""" Executes a regular Keras model created from precise-train"""
def __init__(self, model_name: str):
import tensorflow as tf
# ISSUE 88 - Following 3 lines added to resolve issue 88 - JM 2020-02-04 per liny90626
from tensorflow.python.keras.backend import set_session # ISSUE 88
self.sess = tf.Session() # ISSUE 88
set_session(self.sess) # ISSUE 88
self.model = load_precise_model(model_name)
self.graph = tf.get_default_graph()
def predict(self, inputs: np.ndarray):
from tensorflow.python.keras.backend import set_session # ISSUE 88
with self.graph.as_default():
set_session(self.sess) # ISSUE 88
return self.model.predict(inputs)
def run(self, inp: np.ndarray) -> float:
return self.predict(inp[np.newaxis])[0][0]
class Listener:
"""Listener that preprocesses audio into MFCC vectors and executes neural networks"""
def __init__(self, model_name: str, chunk_size: int = -1, runner_cls: type = None):
self.window_audio = np.array([])
self.pr = inject_params(model_name)
self.mfccs = np.zeros((self.pr.n_features, self.pr.n_mfcc))
self.chunk_size = chunk_size
runner_cls = runner_cls or self.find_runner(model_name)
self.runner = runner_cls(model_name)
self.threshold_decoder = ThresholdDecoder(self.pr.threshold_config, pr.threshold_center)
@staticmethod
def find_runner(model_name: str) -> Type[Runner]:
runners = {
'.net': KerasRunner,
'.pb': TensorFlowRunner
}
ext = splitext(model_name)[-1]
if ext not in runners:
raise ValueError('File extension of ' + model_name + ' must be: ' + str(list(runners)))
return runners[ext]
def clear(self):
self.window_audio = np.array([])
self.mfccs = np.zeros((self.pr.n_features, self.pr.n_mfcc))
def update_vectors(self, stream: Union[BinaryIO, np.ndarray, bytes]) -> np.ndarray:
if isinstance(stream, np.ndarray):
buffer_audio = stream
else:
if isinstance(stream, (bytes, bytearray)):
chunk = stream
else:
chunk = stream.read(self.chunk_size)
if len(chunk) == 0:
raise EOFError
buffer_audio = buffer_to_audio(chunk)
self.window_audio = np.concatenate((self.window_audio, buffer_audio))
if len(self.window_audio) >= self.pr.window_samples:
new_features = vectorize_raw(self.window_audio)
self.window_audio = self.window_audio[len(new_features) * self.pr.hop_samples:]
if len(new_features) > len(self.mfccs):
new_features = new_features[-len(self.mfccs):]
self.mfccs = np.concatenate((self.mfccs[len(new_features):], new_features))
return self.mfccs
def update(self, stream: Union[BinaryIO, np.ndarray, bytes]) -> float:
mfccs = self.update_vectors(stream)
if self.pr.use_delta:
mfccs = add_deltas(mfccs)
raw_output = self.runner.run(mfccs)
return self.threshold_decoder.decode(raw_output)
|
#!/usr/bin/python
import os
import argparse
import sys
import subprocess
GTERM = "/usr/bin/gterm"
MINISH = "/usr/bin/minish"
if __name__ == '__main__':
if not os.path.exists(GTERM):
os.system('sudo ln -s /usr/bin/xfce4-terminal %s' % GTERM)
if not os.path.exists(MINISH):
os.system('minish --install')
returned_output=''
try:
returned_output = subprocess.check_output('cat $HOME/.bashrc | grep MININET_DIFFERENT_PROBES_PER_NODES', shell=True)
except Exception:
pass # does nothing
if (not 'MININET' in returned_output):
to_be_appended = """
#MININET_DIFFERENT_PROBES_PER_NODES
myline=`lsns -t net -p $$ | grep 'is mininet'`
for word in $myline; do : ; done
if [[ ! -z $word ]] ; then
IFS=':' read -ra ARRAY <<< "$word"
#echo ${ARRAY[1]}
PS1='< \[\033[01;32m\]${ARRAY[1]}\[\033[00m\] >:\[\033[01;34m\]\w\[\033[00m\] \$ '
fi
"""
with open (os.getenv('HOME')+'/.bashrc','a') as fbashrc :
fbashrc.write(to_be_appended)
|
#!/usr/bin/python
# Statistics for PO files
import sys, os, re, subprocess, os.path, babel, codecs, time
filelist=['yajhfc/src/yajhfc/i18n/messages',
'yajhfc/src/yajhfc/i18n/CommandLineOpts',
'yajhfc-console/i18n/Messages',
'yajhfc-pdf-plugin/i18n/Messages',
'FOPPlugin/i18n/FOPMessages',
'yajhfc-plugin-mail/i18n/Messages']
descriptions=['Main application messages (messages.po)',
'Main application command line option description (CommandLineOpts.po)',
'Console add-on messages',
'PDF plugin messages',
'FOP plugin messages',
'Batch printer and mailer plugin messages']
#GC_base='http://code.google.com/p/yajhfc/source/browse'
#GC_repos=['default',
# 'default',
# 'console',
# 'plugin-pdf',
# 'fopplugin',
# 'plugin-mailer']
BB_base='https://github.com/jwolz/'
BB_repos=['yajhfc',
'yajhfc',
'yajhfc-console',
'yajhfc-plugin-pdf',
'yajhfc-plugin-fop',
'yajhfc-plugin-mailer']
BB_presrc='/blob/master'
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
langs=[]
langpattern=re.compile("^messages_(\w+)\.po$")
for po in os.listdir(os.path.split(filelist[0])[0]):
m = langpattern.match(po)
if (m):
langs.append(m.group(1))
if (len(langs)==0):
print >> sys.stderr, "Error: No translations found at "+filelist[0]+'*.po'
sys.exit(1)
langs.sort()
version="<unknown>"
util_java=codecs.open("yajhfc/src/yajhfc/VersionInfo.java", "rU", "utf-8")
verpattern=re.compile('\s*public static final String AppVersion = "(.*?)";.*')
for line in util_java:
vm = verpattern.match(line)
if (vm):
version=vm.group(1)
break
util_java.close()
msgfmtenv=os.environ.copy()
msgfmtenv['LANG'] = 'C'
msgfmtpattern=re.compile('(\d+) translated.*?(?:, (\d+) fuzzy.*?)?(?:, (\d+) untranslated.*?)?\.')
print "<p>This page shows the status of YajHFC translations for version " + version + " as of " + time.strftime("%Y-%m-%d") + ".</p>"
print "<p>If you are missing your language and would like to create a translation for it or if you would like to complete your language's translation, please <a href=\"/support/email-contact\">contact me</a><br />"
print " An overview of the translation process can be found in the <a href=\"/documentation/wiki/89-translation-guide\">Translation Guide</a>.</p>"
i=0
for i in range(0, len(filelist)):
basefile=filelist[i]
basedesc=descriptions[i]
print '<h3>' + basedesc + '</h3>'
print '<table border="1" width="100%">'
print '<tr><th>Language</th><th>Number of translated messages</th><th>Percent complete</th></tr>'
for lang in langs:
filename = basefile + '_' + lang + '.po'
numtrans=-1
numuntrans=-1
numfuzzy=-1
numtotal=-1
if (os.path.exists(filename)):
msgfmtout = subprocess.Popen(["msgfmt", "--statistics", filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=msgfmtenv).communicate()[1]
mpm = msgfmtpattern.match(msgfmtout)
if (mpm):
numtrans=int(mpm.group(1))
if (mpm.group(2)):
numfuzzy=int(mpm.group(2))
else:
numfuzzy=0
if (mpm.group(3)):
numuntrans=int(mpm.group(3))
else:
numuntrans=0
numtotal = numtrans+numuntrans+numfuzzy
if (numtotal>0):
percentage=100 * (numtrans+numfuzzy) / numtotal
else:
percentage=0
red = min(255, (100-percentage) * 255 * 2 / 100)
green = min(255, percentage * 255 * 2 / 100)
blue = 0
color = ((red * 256) + green) * 256 + blue
#red = 255
#green = min(255, percentage * 255 * 4 / 100)
#blue = green
#colorbg= ((red * 256) + green) * 256 + blue
colorbg=0xffffff
if (numtrans<0 or numtotal<0):
transmsg="No translation available"
else:
if (numfuzzy>0):
transmsg="%d of %d messages (%d fuzzy)" % (numtrans, numtotal, numfuzzy)
else:
transmsg="%d of %d messages" % (numtrans, numtotal)
pos = filename.find('/')
#GC_path = GC_base + filename[pos:] + '?repo=' + GC_repos[i]
GC_path = BB_base + BB_repos[i] + BB_presrc + filename[pos:]
transmsg = '<a href="' + GC_path + '">' + transmsg + '</a>'
#if ('_' in lang):
# langtup = lang.split('_')
# locale = babel.Locale(langtup[0], langtup[1])
#else:
# locale = babel.Locale(lang)
locale = babel.Locale.parse(lang)
print " <tr>"
print " <td><b>%s</b> (%s / %s)</td>" % (lang, locale.get_display_name("en"), locale.get_display_name())
print " <td>%s</td>" % (transmsg)
print " <td><div style=\"background-color: #%06X; position: relative;\">" % (colorbg)
#print " <div style=\"text-align: center; z-index: 10; display: block; position: relative;\">%d%%</div>" % (percentage)
print " <div style=\"position: relative; background-color: #%06X; width: %d%%; text-align: center;\"> %d%%</div>" % (color, percentage, percentage)
print " </div></td>"
print " </tr>"
print """
</table>
<p> </p>"""
pass
|
from pypy.interpreter.error import OperationError
from pypy.interpreter import typedef, gateway, baseobjspace
from pypy.interpreter.gateway import interp2app
from pypy.objspace.std.listobject import W_ListObject, W_TupleObject
from pypy.objspace.std.intobject import W_IntObject
from pypy.rlib.cslib import rdomain as rd
class _FiniteDomain(rd.BaseFiniteDomain):
"""
Variable Domain with a finite set of possible values
"""
def __init__(self, vlist, values):
"""vlist is a list of values in the domain
values is a dictionnary to make sure that there are
no duplicate values"""
#assert isinstance(w_values, W_ListObject)
self.vlist = vlist
self._values = {}
if values is None:
for k in range(len(vlist)):
self._values[k] = True
else:
self._values = values.copy()
self._changed = False
def get_wvalues_in_rlist(self):
w_vals = self.vlist
return [w_vals[idx] for idx in self._values]
def copy(self):
return _FiniteDomain(self.vlist, self._values)
def intersect(self, other):
v1 = self.get_wvalues_in_rlist()
v2 = other.get_wvalues_in_rlist()
inter = [v for v in v1
if v in v2]
return _FiniteDomain(inter, None)
class W_FiniteDomain(baseobjspace.Wrappable):
def __init__(self, w_values, values):
assert isinstance(w_values, W_ListObject)
self.domain = _FiniteDomain(w_values.wrappeditems, values)
def make_fd(space, w_values):
if not isinstance(w_values, W_ListObject):
if not isinstance(w_values, W_TupleObject):
raise OperationError(space.w_TypeError,
space.wrap('first argument must be a list.'))
return W_FiniteDomain(w_values, None)
W_FiniteDomain.typedef = typedef.TypeDef(
"W_FiniteDomain")
|
#CH03-05 연산자의 우선순위
##################################################################
#사용자로부터 3개의 수를 입력받아서 평균을 출력
x = int(input("첫 번째 수: "))
y = int(input("두 번째 수: "))
z = int(input("세 번째 수: "))
avg = (x + y + z) / 3
print("평균 =", avg)
|
from django.db import models
class MenuEntry(models.Model):
parent = models.ForeignKey('self', blank=True, null=True)
caption = models.CharField(max_length=200)
link = models.CharField(max_length=200)
position = models.FloatField()
def __unicode__(self):
return (unicode(self.parent) + ' / ' if self.parent else '') + self.caption
def check_if_selected(self, path):
self.selected = path == self.link
def children(self):
return self.menuentry_set.order_by('position')
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'brochure.views.home'),
url(r'^query/$', 'brochure.views.query'),
url(r'^get_items/$', 'brochure.views.get_items'),
url(r'^add_item/$', 'brochure.views.add_item'),
url(r'^get_watchlist/$', 'brochure.views.get_watchlist'),
url(r'^sync/$', 'brochure.views.sync'),
url(r'^signin/$', 'brochure.views.signin'),
url(r'^signup/$', 'brochure.views.signup'),
url(r'^delete_items/$', 'brochure.views.delete_items'),
url(r'^signout/$', 'brochure.views.signout'),
url(r'^set_mark/$', 'brochure.views.set_mark'),
url(r'^save_settings/$', 'brochure.views.save_settings'),
url(r'^update_price/$', 'brochure.views.update_price'),
url(r'^welcome.html$', 'brochure.views.welcome_html'),
url(r'^product.html$', 'brochure.views.product_html'),
url(r'^page.html$', 'brochure.views.page_html'),
url(r'^add_product.html$', 'brochure.views.add_product_html'),
url(r'^add_page.html$', 'brochure.views.add_page_html'),
url(r'^add_page/$', 'brochure.views.add_page'),
url(r'^get_pages/$', 'brochure.views.get_pages'),
url(r'^page_products.html$', 'brochure.views.page_products_html'),
url(r'^get_page_products/$', 'brochure.views.get_page_products'),
url(r'^update_description/$', 'brochure.views.update_description'),
url(r'^delete_pages/$', 'brochure.views.delete_pages'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += patterns('',
(r'^django-rq/', include('django_rq.urls')),
) |
from anvil import RegionReader
from png import output_png
DIRECTORY = "/Users/jtauber/Library/Application Support/minecraft/saves/Jamax (World 2)-2/region"
reader = RegionReader(DIRECTORY)
def get_biomes(cx, cz):
return reader.get_chunk(cx, cz).get("Level", {}).get("Biomes")
def make_colour(features):
r, g, b = 0, 0, 0
if "stone" in features:
r, g, b = (128, 128, 128)
## ocean
if "ocean" in features:
r, g, b = (0, 0, 255)
## plains
if "plains" in features:
r, g, b = (128, 192, 64)
## desert
if "desert" in features:
r, g, b = (192, 192, 128)
## badlands
## mountains
if "mountains" in features:
r, g, b = (192, 192, 192)
## forest
if "forest" in features:
r, g, b = (0, 128, 32)
if "forest" in features and "mountains" in features:
r, g, b = (128, 192, 96)
## taiga
if "taiga" in features:
r, g, b = (192, 224, 192)
if "taiga" in features and "mountains" in features:
r, g, b = (224, 255, 224)
## swamp
if "swamp" in features:
r, g, b = (32, 96, 128)
## river
if "river" in features:
r, g, b = (0, 128, 255)
## beach
if "beach" in features:
r, g, b = (255, 255, 192)
## savanna
if "savanna" in features:
r, g, b = (128, 192, 0)
## jungle
if "jungle" in features:
r, g, b = (32, 192, 64)
## mushroom fields?
## tundra
if "tundra" in features:
r, g, b = (224, 240, 255)
if "frozen" in features:
r = min(255, r + 64)
g = min(255, g + 64)
b = min(255, b + 64)
if "cold" in features:
r = min(255, r + 32)
g = min(255, g + 32)
b = min(255, b + 32)
if "deep" in features:
r = max(0, r - 64)
g = max(0, g - 64)
b = max(0, b - 32)
if "hills" in features:
r = min(255, r + 32)
g = min(255, g + 32)
b = min(255, b + 32)
if "tall" in features:
r = max(0, r - 16)
g = max(0, g - 16)
b = max(0, b - 16)
if "dark" in features:
r = max(0, r - 32)
g = max(0, g - 32)
b = max(0, b - 16)
assert 0 <= r <= 255, features
assert 0 <= g <= 255, features
assert 0 <= b <= 255, features
if (r, g, b) == (0, 0, 0):
print(features)
return r, g, b
PALETTE = {
0: make_colour({"ocean"}),
1: make_colour({"plains"}),
2: make_colour({"desert"}),
3: make_colour({"mountains"}),
4: make_colour({"forest"}),
5: make_colour({"taiga"}),
6: make_colour({"swamp"}),
7: make_colour({"river"}),
10: make_colour({"frozen", "ocean"}),
16: make_colour({"beach"}),
17: make_colour({"desert", "hills"}),
18: make_colour({"forest", "hills"}), # wooded hills
19: make_colour({"taiga", "hills"}),
21: make_colour({"jungle"}),
22: make_colour({"jungle", "hills"}),
23: make_colour({"jungle", "edge"}),
24: make_colour({"deep", "ocean"}),
25: make_colour({"stone", "shore"}),
27: make_colour({"birch", "forest"}),
28: make_colour({"birch", "forest", "hills"}),
29: make_colour({"dark", "forest"}),
32: make_colour({"tall", "taiga"}), # giant tree taiga
33: make_colour({"tall", "taiga", "hils"}), # giant tree taiga hills
34: make_colour({"forest", "mountains"}), # wooded mountains
35: make_colour({"savanna"}),
36: make_colour({"savanna", "hills"}), # savanna plateau
44: make_colour({"warm", "ocean"}),
45: make_colour({"lukewarm", "ocean"}),
46: make_colour({"cold", "ocean"}),
48: make_colour({"deep", "lukewarm", "ocean"}),
49: make_colour({"deep", "cold", "ocean"}),
50: make_colour({"deep", "frozen", "ocean"}),
129: make_colour({"sunflower", "plains"}),
130: make_colour({"desert", "lakes"}),
132: make_colour({"flower", "forest"}),
133: make_colour({"taiga", "mountains"}),
134: make_colour({"swamp", "hills"}),
149: make_colour({"modified", "jungle"}),
155: make_colour({"tall", "birch", "forest"}),
156: make_colour({"tall", "birch", "hills"}),
157: make_colour({"dark", "forest", "hills"}),
160: make_colour({"tall", "spruce", "taiga"}), # giant spruce taiga
162: make_colour({"modified", "gravelly", "mountains"}),
163: make_colour({"shattered", "savanna"}),
164: make_colour({"shattered", "savanna", "hills"}), # shattered savanna plateau
168: make_colour({"bamboo", "jungle"}),
169: make_colour({"bamboo", "jungle", "hills"}),
}
def map_biomes(sx, ex, sz, ez, output_filename):
unknown_biomes = set()
pixels = {}
for cx in range(sx, ex):
print(cx)
for cz in range(sz, ez):
biomes = get_biomes(cx, cz)
if biomes:
y = 1
for x in range(4):
for z in range(4):
biome = biomes[(y * 4 + z) * 4 + x]
if biome not in PALETTE:
PALETTE[biome] = (128, 0, 0)
unknown_biomes.add(biome)
colour = PALETTE[biome]
pixels[(cx - sx) * 4 + x, (cz - sz) * 4 + z] = colour
output_png(output_filename, (ex - sx) * 4, (ez - sz) * 4, pixels)
print(f"{unknown_biomes=}")
if __name__ == "__main__":
map_biomes(-32, 48, -32, 48, "biomes-local.png")
map_biomes(-256, 256, -256, 300, "biomes.png")
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 26 12:46:34 2015
@author: as3g15
"""
import math
def degree(x):
"""Returns x radians converted into degrees"""
return x*(180.0/math.pi)
def min_max(xs):
"""Returns a tuple consisiting of xmin and xmax
in the list xs"""
return min(xs), max(xs)
def geometric_mean(xs):
"""Returns the geomatric mean of numbers in list xs"""
p = 1.0
for num in xs:
p = p * num
return (p)**(1.0/len(xs))
|
# Generated by Django 3.0.6 on 2020-11-09 08:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('materials', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='incomingstockentry',
name='include_trading',
field=models.BooleanField(default=False),
),
migrations.CreateModel(
name='Trading',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('buying_price', models.FloatField()),
('selling_price', models.FloatField()),
('entry', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='materials.Stock')),
],
),
]
|
#!/usr/bin/env python
'''Script to determine guide star options in a
given 2dF field of view.
17 December 2018 - Matt Taylor - alpha development
16 January 2019 - Matt Taylor - added UCAC for guide star catalogue
'''
#-----------------------------------------------------------------------
import numpy as np
from astropy.io import fits, ascii
from astropy.stats import sigma_clip
from astropy.coordinates import Angle, SkyCoord, match_coordinates_sky
import astropy.units as u
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from astropy.visualization.wcsaxes import SphericalCircle
from astropy.visualization import (MinMaxInterval, LogStretch,
ImageNormalize)
from matplotlib.patches import Circle
from astroquery.vizier import Vizier
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.colorbar import Colorbar
import sys, os
import warnings
warnings.filterwarnings("ignore")
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
version = '17 December 2018'
# coord_2dF = SkyCoord(float(sys.argv[1]), float(sys.argv[2]), unit=(u.degree,u.degree))
if ":" in sys.argv[1]:
coord_2dF = SkyCoord(sys.argv[1], sys.argv[2], unit=(u.hourangle,u.degree))
else:
coord_2dF = SkyCoord(sys.argv[1], sys.argv[2], unit=(u.degree,u.degree))
def usage():
print ''
print 'NAME'
print ' find_my_guidestars.py - Find guide star options for a given 2dF pointing\n'
print 'SYNOPSIS'
print ' find_my_guidestars.py RA Dec -> RA and Dec in either hh:mm:ss dd:mm:ss or decimal degree formats\n'
print 'DESCRIPTION'
print ' Find a list of guide star options from 2MASS based on SCABS catalogues.'
print ' '
print 'VERSION'
print ' ', version
print ''
raise SystemExit
#-----------------------------------------------------------------------
def main():
gs_coo_2mass, gs_vmag = get_gs_cat(coord_2dF) #find catalogue point sources within the 2dF field centred at the requested coordinates
gv_gs_coords, gv_gs_mags = scabs_xmatch(gs_coo_2mass, gs_vmag) #match 2MASS sources to SCABS g-band catalogue
save_results(gv_gs_coords, gv_gs_mags)
plot_results(coord_2dF, gv_gs_coords, gv_gs_mags)
make_stamps(gv_gs_coords)
plt.show()
#----------Query 2MASS and Tycho-2 catalogues for stars in the given field-------------
def get_gs_cat(coo):
Vizier.ROW_LIMIT = -1
# print "querying gaia..."
# result = Vizier.query_region(coo, radius=Angle(1.,"deg"), catalog="GSC2.3")
# data = result["I/305/out"]
# mask = np.logical_and(data["Vmag"] >= 10, data["Vmag"] <= 15)
# data = data[mask]
# print data
# exit()
#UCAC
viz = Vizier(columns=["RAJ2000", "DEJ2000", "Vmag"])
viz.ROW_LIMIT = -1
result_ucac = viz.query_region(coo, radius=Angle(1.,"deg"), catalog="UCAC4")
data_ucac = result_ucac['I/322A/out']
mask = np.logical_and(data_ucac["Vmag"] >= 10., data_ucac["Vmag"] <= 18.)
data_ucac = data_ucac[mask]
gv_ucac_coo = SkyCoord(data_ucac["RAJ2000"], data_ucac["DEJ2000"], unit=(u.degree,u.degree))
gv_ucac_mag = data_ucac["Vmag"]
#2MASS point source coordinates
viz = Vizier(columns=["RAJ2000", "DEJ2000", "Bmag", "Rmag"])
viz.ROW_LIMIT = -1
result_2mass = viz.query_region(coo, radius=Angle(1.,"deg"), catalog="2MASS-PSC")
gv_2mass_coo = SkyCoord(result_2mass["II/246/out"]["RAJ2000"], result_2mass["II/246/out"]["DEJ2000"], unit=(u.degree,u.degree))
#Tycho-2 coordinates and V-band mags
# result_tycho = Vizier.query_region(coo, radius=Angle(1.,"deg"), catalog="Tycho-2")
# gv_tycho_coo = SkyCoord(result_tycho["I/259/tyc2"]["RA_ICRS_"], result_tycho["I/259/tyc2"]["DE_ICRS_"], unit=(u.degree,u.degree))
# gv_tycho_mag = result_tycho["I/259/tyc2"]["VTmag"]
#GSC2.3 coordinates and V mags
# result_gsc = Vizier.query_region(coo, radius=Angle(1.,"deg"), catalog="GSC2.3")
# data_gsc = result_gsc["I/305/out"]
# mask = np.logical_and(data_gsc["Vmag"] >= 10., data_gsc["Vmag"] <= 25.)
# data_gsc = data_gsc[mask]
# gv_gsc_coo = SkyCoord(data_gsc["RAJ2000"], data_gsc["DEJ2000"], unit=(u.degree,u.degree))
# gv_gsc_mag = data_gsc["Vmag"]
# gv_gs_inds, d2d, d3d = match_coordinates_sky(gv_tycho_coo,gv_2mass_coo)
# gv_gs_inds, d2d, d3d = match_coordinates_sky(gv_gsc_coo,gv_2mass_coo)
gv_gs_inds, d2d, d3d = match_coordinates_sky(gv_ucac_coo,gv_2mass_coo)
gv_2mass_coo = SkyCoord([gv_2mass_coo[ii].ra.deg for ii in gv_gs_inds], [gv_2mass_coo[ii].dec.deg for ii in gv_gs_inds], unit=(u.degree,u.degree))
return gv_2mass_coo, gv_ucac_mag# gv_gsc_mag # gv_tycho_mag
#----------Cross-match to SCABS catalogues for sources within 0.1"-------------
def scabs_xmatch(gs_star_coo,gs_star_mag):
#Load SCABS g-band source catalogue
prefix = "/Users/mtaylor/Projects/SCABS/manuscripts/SCABS1-data/data"
# prefix = "/Users/mtaylor/Dropbox/Projects/CenA_2dF_GCs/SCABS_Tile1-7_rband"
scabs_data = fits.open(prefix+"/SCABS_Tiles1-7_sources_r-band.fits")[1].data
scabs_coo = SkyCoord(scabs_data["ALPHA_J2000"], scabs_data["DELTA_J2000"], unit=(u.degree,u.degree))
print "Begin matching..." #find a match for each catalogue point source
gv_scabs_ind, gv_scabs_d2d, gv_scabs_d3d = match_coordinates_sky(gs_star_coo,scabs_coo)
gv_sep = 0.3 #separation limit in arcsec
mask = (gv_scabs_d2d.to(u.arcsec).value <= gv_sep)
gv_scabs_ind = gv_scabs_ind[mask] ; gv_scabs_d2d = gv_scabs_d2d[mask] #keep only good matches
print 'Limiting to matches within %.1f"....found %i matches.' % (gv_sep, len(gv_scabs_ind))
gv_scabs_coo = SkyCoord([scabs_data["ALPHA_J2000"][ii] for ii in gv_scabs_ind], [scabs_data["DELTA_J2000"][ii] for ii in gv_scabs_ind], unit=(u.degree,u.degree))
gv_inds, d2d, d3d = match_coordinates_sky(gv_scabs_coo,gs_star_coo)
gv_star_mag = np.array([gs_star_mag[ii] for ii in gv_inds])
#Limit sources to within given magnitude range
mag_faint_lim = 13.5 #faint limit in magnitude
mag_bright_lim = 12.5 #bright limit
mask = np.logical_and(gv_star_mag >= mag_bright_lim, gv_star_mag <= mag_faint_lim)
gv_scabs_coo = gv_scabs_coo[mask]
gv_star_mag = gv_star_mag[mask]
print "Limiting to guide stars in %.1f <= V <= %.1f...found %i matches." % (mag_bright_lim, mag_faint_lim, len(gv_star_mag))
# plt.figure()
# plt.plot(gs_star_coo.ra.deg, gs_star_coo.dec.deg,'bo',alpha=0.5)
# plt.plot(gv_scabs_coo.ra.deg, gv_scabs_coo.dec.deg,'ro',alpha=0.5)
# plt.show()
# exit()
return gv_scabs_coo, gv_star_mag
#----------Save guidestar coordinates to file-------------
def save_results(gs_coo,gs_mag):
fileout = open("2dF_guidestars_%s%s.txt" % (sys.argv[1], sys.argv[2]),"w")
for ii in range(len(gs_coo)):
print >> fileout, "%03d %f %f %.2f" % (ii+1, gs_coo.ra.deg[ii], gs_coo.dec.deg[ii], gs_mag[ii])
fileout.close()
# exit()
#----------Plot guidestar options on 2dF field of view-------------
def plot_results(fov_coords, gs_coo, gs_mag):
#Load the dss background image for wcs
dss_hdu = fits.open("dss_image.fits")
dss_im = dss_hdu[0].data
dss_wcs = WCS(dss_hdu[0].header)
fig = plt.figure(figsize=(10,10))
gs = gridspec.GridSpec(2,2, height_ratios=[0.03,1], width_ratios=[1,0.03])
gs.update(left=0.05, right=0.95, bottom=0.08, top=0.93, wspace=0.02, hspace=0.03)
ax = fig.add_subplot(gs[1,0], projection=dss_wcs)
fov = SphericalCircle((fov_coords.ra.deg,fov_coords.dec.deg)*u.degree,1.*u.degree, edgecolor="None", facecolor="k",transform=ax.get_transform('fk5'), linestyle='solid', lw=2, alpha=0.1,zorder=1)
ax.add_patch(fov)
# ax.plot(gs_coo.ra.deg,gs_coo.dec.deg,'o',transform=ax.get_transform("fk5"))
ax.plot([fov_coords.ra.deg], [fov_coords.dec.deg],'kX',ms=15,transform=ax.get_transform("fk5"))
gs_ax = ax.scatter(gs_coo.ra.deg,gs_coo.dec.deg,c=gs_mag,
marker='o', vmin=np.min(gs_mag), vmax=np.max(gs_mag),
edgecolor="None",cmap="viridis",s=75,transform=ax.get_transform("fk5"))
ax.grid()
ax.set_xlabel(r"RA (J2000)",fontsize=18)
ax.set_ylabel(r"Dec (J2000)",fontsize=18)
#Label each GS with it's corresponding number to match them with postage stamps
for ii in range(len(gs_coo)):
ax.text(gs_coo[ii].ra.deg,gs_coo[ii].dec.deg, ii+1, color="k", fontsize=10, fontdict={"weight": "bold"}, transform=ax.get_transform('fk5'))#horizontalalignment="center", verticalalignment="center", transform=ax.get_transform('fk5'))
cbax = fig.add_subplot(gs[1,1])
cb = Colorbar(ax=cbax, mappable=gs_ax, orientation='vertical', ticklocation='right')
cb.set_label(r"Guide Star $m_V$ (mag)",fontsize=18)#,labelpad=20)
#Load GC catalogue
gc_data = ascii.read("gcs_Tile1-7_coords_ugriz.txt")
gc_coo = SkyCoord(gc_data["RA"], gc_data["Dec"], unit=(u.hourangle,u.degree))
#isolate GCs falling within 2dF fov
seps = fov_coords.separation(gc_coo)
mask = (seps.to(u.degree).value < 1.)
gc_data = gc_data[mask] ; gc_coo = gc_coo[mask]
#Calculate V-band mags and limit to 17 < V < 21.5
gc_vmag = gc_data["g_mag"] - 0.58*(gc_data["g_mag"]-gc_data["r_mag"]) - 0.01
mask = np.logical_and(gc_vmag >= 17., gc_vmag <= 21.5)
gc_vmag = gc_vmag[mask] ; gc_data = gc_data[mask] ; gc_coo = gc_coo[mask]
print "Number of GC targets available in the field = %i" % (len(gc_vmag))
#Save the list of GC targets
fileout = open("2dF_GCs_%s%s.txt" % (sys.argv[1], sys.argv[2]),"w")
for ii in range(len(gc_coo)):
print >> fileout, "%03d %f %f %.2f" % (ii+1, gc_coo.ra.deg[ii], gc_coo.dec.deg[ii], gc_vmag[ii])
fileout.close()
#plot 'em
gc_ax = ax.scatter(gc_coo.ra.deg,gc_coo.dec.deg,c=gc_vmag,
marker='o', vmin=np.min(gc_vmag), vmax=np.max(gc_vmag),
edgecolor="None",cmap="copper",s=20,transform=ax.get_transform("fk5"),alpha=0.75)
cbax = fig.add_subplot(gs[0,0])
cb = Colorbar(ax=cbax, mappable=gc_ax, orientation='horizontal', ticklocation='top')
cb.set_label(r"GC Candidate $m_V$ (mag)",fontsize=18)#,labelpad=20)
plt.savefig("2dF_guidestar_search_%s%s.pdf" % (sys.argv[1], sys.argv[2]),bbox_inches="tight",overwrite=True)
# plt.show()
#----------Create r'-band postage stamp images for visual inspection-------------
def make_stamps(gs_coo):
#Directory prefix for images
prefix = "/Users/mtaylor/Projects/SCABS/stacks"
# prefix = "/Users/mtaylor/Dropbox/Projects/CenA_2dF_GCs/SCABS_Tile1-7_rband"
# hdu = fits.open(prefix+"/survey_tile1_r_short_ALIGNi.fits")
# wcs = WCS(hdu[0].header)
# data = hdu[0].data
#Check which tile the cutout should come from
tiles_coo = {"1": SkyCoord(201.365062792, -43.0191125833, unit=(u.degree,u.degree)), #central tile coordinates
"2": SkyCoord(203.167159865, -41.8681243611, unit=(u.degree,u.degree)),
"3": SkyCoord(200.944693054, -41.2104168056, unit=(u.degree,u.degree)),
"4": SkyCoord(199.128469057, -42.3614050278, unit=(u.degree,u.degree)),
"5": SkyCoord(199.556484167, -44.1701008056, unit=(u.degree,u.degree)),
"6": SkyCoord(201.803072602, -44.8278083611, unit=(u.degree,u.degree)),
"7": SkyCoord(203.588227236, -43.6768201389, unit=(u.degree,u.degree))}
#Check for the existence of the cutout directory and create if not there
if not os.path.exists("cutouts_%s%s" % (sys.argv[1], sys.argv[2])): os.makedirs("cutouts_%s%s" % (sys.argv[1], sys.argv[2]))
#Make cutouts
size = u.Quantity((120.,150.), u.arcsec) #Angular size of the cutout
for ii in range(len(gs_coo)):
#for each guide star, check tiles that include the guide star (e.g. is the GS coordinates within +/- 1 degree of tile centre?)
temp_tiles = []
for jj in tiles_coo:
temp_coo = tiles_coo[str(jj)]
if temp_coo.separation(gs_coo[ii]).to(u.degree).value <= 1.0:
temp_tiles.append(str(jj))
# print temp_tiles
for tile_num in temp_tiles:
#Open r-band image for the given tile
# print tile_num
hdu = fits.open(prefix+"/survey_tile%s_r_short_ALIGNi.fits" % tile_num)
wcs = WCS(hdu[0].header)
data = hdu[0].data
cutout = Cutout2D(data, gs_coo[ii], size, wcs=wcs)
norm = ImageNormalize(cutout.data, stretch=LogStretch())
fig = plt.figure()
ax1 = plt.subplot(projection=cutout.wcs)
ax1.imshow(cutout.data,origin='lower',norm=norm,cmap='gray_r',vmin=-5,vmax=750,aspect='auto')
ax1.grid(color='C0', ls='dashed', lw=1.5)
#Add circle to indicate 2dF fibre extent
fibre = SphericalCircle((gs_coo[ii].ra.deg,gs_coo[ii].dec.deg)*u.degree,1.*u.arcsec, edgecolor="green", facecolor="None",transform=ax1.get_transform('fk5'), linestyle='solid', lw=2, alpha=0.9,zorder=1)
ax1.add_patch(fibre)
lon = ax1.coords[0]
lat = ax1.coords[1]
lon.set_axislabel(r"$\alpha$ (J2000)",fontsize=22)
lon.set_major_formatter("dd:mm")
lon.set_ticks(size=6,width=2)
lon.set_ticklabel(size=14)
lon.set_ticks_position('b')
lat.set_axislabel(r"$\delta$ (J2000)",fontsize=22)
lat.set_major_formatter("dd:mm")
lat.set_ticks(size=6,width=2)
lat.set_ticklabel(size=14)
lat.set_ticks_position('lr')
plt.savefig("cutouts_%s%s/gs_%03d_tile%s.pdf" % (sys.argv[1], sys.argv[2], ii+1, tile_num),bbx_inches="tight",overwrite=True)
hdu.close()
plt.close("all")
if __name__ == "__main__":
main() |
from pathlib import Path
from os.path import splitext
from detectron2.structures import BoxMode
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.visualizer import Visualizer
import os
import cv2
import pandas as pd
import random
box20List = ["IMG_170406_035932_0022_RGB4.JPG", "IMG_170406_035933_0023_RGB3.JPG", "IMG_170406_035939_0028_RGB3.JPG",
"IMG_170406_040009_0053_RGB1.JPG", "IMG_170406_040033_0073_RGB2.JPG", "IMG_170406_040105_0099_RGB4.JPG",
"IMG_170406_040108_0102_RGB3.JPG", "IMG_170406_040108_0102_RGB4.JPG", "IMG_170406_040156_0142_RGB1.JPG",
"IMG_170406_040202_0147_RGB3.JPG", "IMG_170406_040202_0147_RGB4.JPG", "IMG_170406_040308_0202_RGB1.JPG"]
def get_dicts(root_dir):
dataset_dicts = []
root = Path(root_dir)
imgs = sorted(root.glob("*.JPG"))
for idx, img in enumerate(imgs):
fname = str(img).replace(f'{root_dir}/', '')
height, width = cv2.imread(os.path.join(root_dir, fname)).shape[:2]
record = dict()
record['file_name'] = os.path.join(root_dir, fname)
record['image_id'] = idx
record['height']= height
record['width']= width
objs = list()
result = pd.read_csv(os.path.join('../rice_data/train_label', fname.replace('JPG', 'csv')), names=['x', 'y'])
for i in range(0, result.shape[0]):
if fname in box20List:
xMin, yMin, xMax, yMax = result.x[i]-10, result.y[i]-10, result.x[i]+10, result.y[i]+10
else:
xMin, yMin, xMax, yMax = result.x[i]-20, result.y[i]-20, result.x[i]+20, result.y[i]+20
XYXY = [xMin, yMin, xMax, yMax]
boxes = list(map(float,[XYXY[0], XYXY[1], XYXY[2], XYXY[3]]))
obj = {
"bbox": boxes,
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": 0,
"iscrowd": 0
}
objs.append(obj)
record['annotations'] = objs
dataset_dicts.append(record)
return dataset_dicts
if __name__ == "__main__":
folder_name = '../rice_data'
for dir in ["train"]:
DatasetCatalog.register(dir, lambda d = dir: get_dicts(folder_name + "/" + d))
MetadataCatalog.get(dir).set(thing_classes=['plant'], stuff_classes=[], thing_colors=[(0,0,0)])
tttt = MetadataCatalog.get("train")
dataset_dicts = get_dicts(folder_name + "/train")
# print(dataset_dicts)
# for d in random.sample(dataset_dicts, 1):
# print(d['file_name'])
# img = cv2.imread(d["file_name"])
# visualizer = Visualizer(img[:, :, ::-1], metadata=tttt, scale=0.5)
# out = visualizer.draw_dataset_dict(d)
# # imshow(out.get_image()[:, :, ::-1])
# cv2.imshow('image',out.get_image()[:, :, ::-1])
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
class Node:
def __init__(self, e: object):
self.element = e
self.parent = None
self.child = []
def insert_child(self, n): # 해당 node에 자식을 삽입할 때 사용하는 함수
self.child.append(n)
def del_child(self, n): # 현재 node의 자식 node 중 특정 node를 제거
for i in self.child:
if i==n:
self.child.remove(i)
break
def set_parent(self, n): # 현재 node의 부모를 바꾸는 함수
self.parent = n
class Tree:
def __init__(self, e: object): # root가 될 node의 element 받음
node = Node(e) # root가 될 node
self.root = node
self.node_list = [] # 해당 tree node의 주소들을 저장해 놓는 list
self.node_list.append(node)
def size(self)-> int: # 해당 tree의 크기 반환
return len(self.node_list)
def insert_node(self, par_el: object, el: object): # 해당 부모 node를 찾고, 자식 node를 만들어 추가
node = Node(el)
tmp = True
for i in self.node_list:
if i.element==par_el:
i.insert_child(node) # 자식에 추가
self.node_list.append(node) # tree의 node 리스트에 추가
node.parent = i
tmp = False
break
if tmp: # 해당 부모 node가 없다면
print(f'error: there is no node with {par_el}')
def del_node(self, el: object): # 해당 node 삭제, 자식 node들을 부모의 자식으로 추가
tmp = True
if el==self.root.element:
print(f"error: you can't delete root")
tmp = False
else:
for i in self.node_list:
if i.element==el:
tmp_chi = i.child
tmp_par = i.parent
tmp_par.del_child(i) # 부모에서 해당 node 제거
for k in tmp_chi:
k.parent = tmp_par
tmp_par.insert_child(k)
self.node_list.remove(i) # tree의 node 리스트에 제거
tmp = False
break
if tmp: # 해당 element를 지닌 node가 없다면
print(f'error: there is no node with {el}')
def print_chi(self, el: object): # 해당 node의 자식들 출력함
tmp = True
for i in self.node_list:
if i.element==el:
if len(i.child)==0:
print(f'There is no child for node:{el}')
tmp = False
else:
res_list = []
for k in i.child:
res_list.append(k.element)
print(f'child list: {res_list}')
tmp = False
if tmp:
print(f'error: there is no node with {el}')
def print_sib(self, el: object): # 해당 node와 같은 부모를 가지는 node 출력
tmp = True
for i in self.node_list:
if i.element==el:
tmp_par = i.parent
res_list = []
for k in tmp_par.child:
res_list.append(k.element)
print(f'sibling list: {res_list}')
tmp = False
break
if tmp:
print(f'error: there is no node with {el}')
def pre_order(n):
print(n.element, end=' ')
for i in n.child:
pre_order(i)
def post_order(n):
for i in n.child:
post_order(i)
print(n.element, end=' ')
if __name__ == "__main__":
tree = Tree(1) # root가 1인 tree 만들기
tree.insert_node(1, 2)
tree.insert_node(1, 3)
tree.insert_node(1, 4)
tree.insert_node(2, 5)
tree.insert_node(2, 6)
tree.insert_node(3, 7)
tree.insert_node(3, 8)
tree.insert_node(3, 9)
print('Preorder Traversal: ') # 전위 순회 출력
pre_order(tree.root)
print()
print('Postorder Traversal: ') # 후위 순회 출력
post_order(tree.root)
print() |
import unittest
from bst import BST
class BSTTests(unittest.TestCase):
def setUp(self):
self.bst = BST()
self.bst.insert(10)
self.bst.insert(6)
self.bst.insert(15)
self.bst.insert(1)
self.bst.insert(20)
def test_inorder_walk(self):
self.bst.inorder_walk(self.bst.root)
self.assertEqual(0, 0)
def test_preorder_walk(self):
print '*'*20
self.bst.preorder_walk(self.bst.root)
print '*'*20
self.assertEqual(0, 0)
def test_min(self):
min = self.bst.min(self.bst.root)
self.assertEqual(min.data, 1)
def test_search(self):
node = self.bst.search(20)
self.assertEqual(node.data, 20)
def test_remove(self):
node = self.bst.search(10)
self.bst.inorder_walk(self.bst.root)
self.bst.remove(node)
self.bst.inorder_walk(self.bst.root)
node = self.bst.search(10)
self.assertEqual(node, None)
if __name__ == '__main__':
unittest.main()
|
# https://leetcode.com/problems/find-original-array-from-doubled-array/
"""
An integer array original is transformed into a doubled array changed by appending twice the value of every element in original, and then randomly shuffling the resulting array.
Given an array changed, return original if changed is a doubled array. If changed is not a doubled array, return an empty array. The elements in original may be returned in any order.
Example 1:
Input: changed = [1,3,4,2,6,8]
Output: [1,3,4]
Explanation: One possible original array could be [1,3,4]:
- Twice the value of 1 is 1 * 2 = 2.
- Twice the value of 3 is 3 * 2 = 6.
- Twice the value of 4 is 4 * 2 = 8.
Other original arrays could be [4,3,1] or [3,1,4].
Example 2:
Input: changed = [6,3,0,1]
Output: []
Explanation: changed is not a doubled array.
Example 3:
Input: changed = [1]
Output: []
Explanation: changed is not a doubled array.
Constraints:
1 <= changed.length <= 105
0 <= changed[i] <= 105
"""
from collections import Counter, defaultdict, deque
def find_original_array(changed: list[int]) -> list[int]:
n = len(changed)
if n % 2 == 1:
return []
changed.sort()
counter = defaultdict(deque)
for i, v in enumerate(changed):
counter[v].append(i)
visited = set()
ans = []
for i, v in enumerate(changed):
if i in visited:
continue
visited.add(counter[v].popleft())
ans.append(v)
if not counter[v * 2]:
return []
visited.add(counter[v * 2].popleft())
return ans
def find_original_array(changed: list[int]) -> list[int]:
n = len(changed)
if n % 2 == 1:
return []
c = Counter(changed)
for x in sorted(c):
if c[x] > c[x * 2]:
return []
c[x * 2] -= c[x] if x else c[x] // 2
return list(c.elements())
if __name__ == "__main__":
find_original_array([1, 3, 4, 2, 6, 8])
|
"""User Manager used by Improved User; may be extended"""
from django.contrib.auth.models import BaseUserManager
class UserManager(BaseUserManager):
"""Manager for Users; overrides create commands for new fields
Meant to be interacted with via the user model.
.. code:: python
User.objects # the UserManager
User.objects.all() # has normal Manager/UserManager methods
User.objects.create_user # overrides methods for Improved User
Set to :attr:`~django.db.models.Model.objects` by
:attr:`~improved_user.models.AbstractUser`
"""
def _create_user(
self, email, password, is_staff, is_superuser, **extra_fields
):
"""Save a User with improved user fields; helper method"""
if not email:
raise ValueError("An email address must be provided.")
if "username" in extra_fields:
raise ValueError(
"The Improved User model does not have a username; "
"it uses only email"
)
user = self.model(
email=self.normalize_email(email),
is_staff=is_staff,
is_superuser=is_superuser,
**extra_fields,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email=None, password=None, **extra_fields):
"""Save new User with email and password"""
extra_fields.setdefault("is_staff", False)
extra_fields.setdefault("is_superuser", False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
"""Save new User with is_staff and is_superuser set to True"""
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_superuser", True)
if extra_fields.get("is_staff") is not True:
raise ValueError("Superuser must have is_staff=True.")
if extra_fields.get("is_superuser") is not True:
raise ValueError("Superuser must have is_superuser=True.")
return self._create_user(email, password, **extra_fields)
|
import sys
c2j_file = open("c2j.txt")
j2c_file = open("j2c.txt")
while 1:
c2j_line = c2j_file.readline().replace('\r', '').replace('\n', '')
j2c_line = j2c_file.readline().replace('\r', '').replace('\n', '')
if c2j_line == '' or j2c_line == '':
exit()
print("C: " + c2j_line)
print("J: " + j2c_line)
print('\n')
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Performs project's configuration loading."""
import os
from configparser import ConfigParser
from dotenv.main import dotenv_values
import autobot.config as config
class Config:
"""Interact with configuration variables."""
dotenv_path = os.path.join(os.path.abspath(os.path.join(__file__, "..")), ".env")
ini_parser = ConfigParser()
ini_path = os.path.join(os.path.abspath(os.path.join(__file__, "..")), "config.ini")
config = {}
@classmethod
def __init__(cls, **kwargs):
"""Initialize configuration."""
cls.load_config(**kwargs)
@classmethod
def load_config(cls, **kwargs):
"""Get autobot configuration values."""
cls.config = cls.env_config()
cls.ini_parser.optionxform = str
cls.ini_parser.read(cls.ini_path)
cls.config.update(cls.ini_config())
cls.config.update(kwargs)
py_config = cls.py_config()
defaults = {
key: py_config[key] for key in py_config if key not in cls.config.keys()
}
cls.config.update(defaults)
cls.config = {key: cls.config[key] for key in py_config}
@classmethod
def env_config(cls):
"""Get autobot configuration values from .env file."""
return dotenv_values(cls.dotenv_path)
@classmethod
def ini_config(cls):
"""Get autobot configuration values from config.ini file."""
return cls.ini_parser["AUTOBOT"]
@classmethod
def py_config(cls):
"""Get autobot configuration values from config.py file."""
res = {}
for k in dir(config):
if k.startswith("AUTOBOT_"):
res.setdefault(k, getattr(config, k))
return res
|
from geopy.distance import geodesic
import datetime
def get_distance_in_km(lprev, lnew):
# l = (lat, lon) tuple
distace = geodesic(lprev, lnew)
distace_in_km = distace.km
return distace_in_km
def get_speed_in_kmph(lprev, lnew, tprev, tnew):
# l = (lat, lon) tuple of lattitude and logitude
# t = datetime.datetime object
distance_in_km = get_distance_in_km(lprev, lnew)
diff = tnew - tprev
diff_in_sec = diff.seconds
diff_in_hr = diff_in_sec / 3600
try:
speed_in_kmph = distance_in_km / diff_in_hr
except:
speed_in_kmph=0
return speed_in_kmph
|
import copy
from collections import deque
from collections import defaultdict
from numpy import fft
import time
from activeNote import ActiveNote
from noteParser import Note
from noteProcessors.abstractNoteProcessor import AbstractNoteProcessor
from noteProcessors.continuousNoteProcessor.columnManager import ColumnManager
from noteProcessors.continuousNoteProcessor.constants import Constants
from noteProcessors.continuousNoteProcessor.message import Message, MessageType, NewNoteMessage
from noteProcessors.continuousNoteProcessor.rowGenerators import ContinuousGenerator, RecursiveGenerator
import utils
class ContinuousNoteProcessor(AbstractNoteProcessor):
def __init__(self, waveform, sampleRate, noteParser=None, shapeStrategy=None):
super(ContinuousNoteProcessor, self).__init__(waveform, sampleRate, noteParser)
# Estimates how much the whole song is offtune by. This will increase search capabilities.
# The assumption here is that all notes of a song are offtune by a similar amount.
# TODO: Change this so each section of the song has its own set of approximation.
def getOutOfTune(self):
def getLocalMax(arr):
localMax = []
for i in range(1, len(arr) - 1):
if arr[i] > arr[i - 1] and arr[i] > arr[i + 1]:
localMax.append(i)
return localMax
# Number of notes to take when calculating the offset.
referenceNoteCount = 6
# Duration of time to take as samples when calculating offset
referenceDuration = self.sampleRate * 5
# Arbitrary start index of offset
referenceStartIndex = 10000
referenceFFT = fft.fft(self.waveform[referenceStartIndex: referenceStartIndex + referenceDuration])
referenceFFT = [abs(f) for f in referenceFFT][:int(len(referenceFFT) / 2)]
localMax = getLocalMax(referenceFFT)
localMax.sort(key=lambda x: -referenceFFT[x])
i = 0
existingNotes = []
percentDiffs = []
# Get double the reference note count
while len(existingNotes) <= referenceNoteCount * 2:
index = localMax[i]
i += 1
frequency = index * self.sampleRate / referenceDuration
closestNote, percentDiff = self.getClosestNote(frequency)
if closestNote in existingNotes:
continue
existingNotes.append(closestNote)
percentDiffs.append(percentDiff)
# Remove the notes that increase deviation within the set the most
iterations = len(existingNotes) - referenceNoteCount
for i in range(referenceNoteCount):
avg = sum(percentDiffs) / len(percentDiffs)
percentDiffs.sort(key=lambda x: abs(x - avg))
percentDiffs = percentDiffs[:-1]
outOfTune = sum(percentDiffs) / len(percentDiffs)
return outOfTune
# The purpose of this function is to meld the instances where a row is 0x0x0x0... where x is a positive number.
# In these cases, it is likely a case of interference from a previous note in the same frame.
# Temporarily deprecated
def meldRow(self, row, notesFoundInFrame):
def isLowValue(value, referenceValue):
if value < 0:
return True
if value < referenceValue and utils.percentDiff(value, referenceValue) > 4:
return True
return False
def isHighValue(value, referenceValue):
if referenceValue < 0:
referenceValue = 0
if utils.percentDiff(value, value - referenceValue) < 0.1:
return True
return False
interpolateIndices = []
for index, loudness in notesFoundInFrame.items():
elements3 = [row[i] for i in range(index - 3, index + 4)]
referenceValue = max(elements3)
value = loudness / Constants.COLUMN_PROCESSOR_DATA_WIDTH
if utils.percentDiff(referenceValue, value) > 1 and value < referenceValue:
return
referenceIndex = index - 1 + elements3.index(referenceValue)
for direction in [-1, 1]:
searchIndex = referenceIndex
while abs(searchIndex - referenceIndex) < 8:
searchIndex += direction
if isLowValue(row[searchIndex], referenceValue):
if isHighValue(row[searchIndex + 1], row[searchIndex]) and isHighValue(row[searchIndex - 1], row[searchIndex]):
if isLowValue(row[searchIndex - 2], referenceValue) or isLowValue(row[searchIndex + 2], referenceValue):
interpolateIndices.append(searchIndex)
print(loudness)
for index in interpolateIndices:
row[index] = (row[index - 1] + row[index + 1]) / 2
def run(self):
samplesPerInterval = 512
intervalsPerFrame = 128
samplesPerFrame = samplesPerInterval * intervalsPerFrame
rowGenerator = ContinuousGenerator(samplesPerInterval, intervalsPerFrame, self.waveform)
rows = rowGenerator.generate()
outOfTune = self.getOutOfTune()
columnManager = ColumnManager(outOfTune, self.noteParser, self.sampleRate, samplesPerFrame, samplesPerInterval)
visualise = False
for row in rows:
if visualise:
d2Array.append(d2Row[:2000])
columnManager.processNewDataRow(row)
if visualise:
for columnIndex in range(len(d2Array[0])):
dataIndex = len(d2Array)
negativeCollector = 0
while dataIndex > 0:
dataIndex -= 1
if d2Array[dataIndex][columnIndex] < 0: # Propagate negative number up
negativeCollector += d2Array[dataIndex][columnIndex]
d2Array[dataIndex][columnIndex] = 0
else:
if negativeCollector < 0:
if d2Array[dataIndex][columnIndex] > -negativeCollector:
d2Array[dataIndex][columnIndex] += negativeCollector
negativeCollector = 0
else:
negativeCollector += d2Array[dataIndex][columnIndex]
d2Array[dataIndex][columnIndex] = 0
utils.d2Plot(d2Array, "out/continous2.png", widthCompression=100, heightCompression=10)
return columnManager.getActiveNotes()
|
import platform
import sys
from pathlib import Path
PYTHON_VERSION_STR = f"{sys.version_info[0]}.{sys.version_info[1]}"
# Platform logic
if sys.platform == "darwin":
FULL_PLATFORM = "macos" + platform.release().split(".")[0]
elif sys.platform == "win32":
FULL_PLATFORM = "win"
else:
FULL_PLATFORM = "unix"
def get_platform_list_path(package_list_dir_path: Path) -> Path:
platform_package_list_path = (
package_list_dir_path / f"{FULL_PLATFORM}-python{PYTHON_VERSION_STR}.txt"
)
return platform_package_list_path
def get_platform_packages_dir_path(pipx_package_cache_path: Path) -> Path:
platform_packages_dir_path = pipx_package_cache_path / f"{PYTHON_VERSION_STR}"
return platform_packages_dir_path
|
""" A global dictionary of text variables
"""
#import logging
import pickle
class Dictionary(object):
ENGLISH = "en"
LANGUAGES = [ENGLISH]
__singletons = dict()
def __init__(self, file_path):
with open(file_path, "rb") as dic:
self.__map = pickle.load(dic)
dic.close()
if int(self.__map["_port_"]) != 80:
port_txt = ":{0}".format(self.__map["_port_"])
else:
port_txt = ""
self.__map["_root_"] = "{0}://{1}{2}{3}".format(
self.__map["_protocol_"],
self.__map["_hostname_"],
port_txt,
self.__map["_base_path_"])
@staticmethod
def load(language, file_path):
if language in Dictionary.__singletons:
return
if language not in Dictionary.LANGUAGES:
raise Exception("No such language: " + language)
Dictionary.__singletons[language] = Dictionary(file_path)
@staticmethod
def get_default():
return Dictionary.__singletons[Dictionary.ENGLISH]
@staticmethod
def get_language(language):
if language not in Dictionary.__singletons:
raise Exception("Language not loaded: " + language)
return Dictionary.__singletons[language]
def get(self, key):
if key in self.__map:
return self.__map[key]
return None
def get_map(self):
return self.__map
def format(self, value, variables=None):
if variables:
temp = self.__map.copy()
temp.update(variables)
return value.format(**temp)
return value.format(**self.__map)
def format_date(self, date_fmt, date):
fmt = self.__map[date_fmt]
return fmt.format(date.year, date.month, date.day,
date.hour, date.minute, date.isoweekday())
|
class Pin:
def __init__(self):
wire = None # attached wire, identification of one of wire's end
return
def connect_node(self, node):
return
def disconnect_node(self):
return
class Instance:
def __init__(self, w, h):
self.w = w
self.h = h
|
class Hospital(object):
def __init__(self, name, capacity):
self.patients = []
self.name = name
self.capacity = capacity
def admit(self, var1, var2):
if len(self.patients) < self.capacity:
self.patients.append(var1)
var1.bed_number = var2
# var1.var2 = var1.bed_number
else:
print "admission complete, hospital full"
return self
def discharge(self, arg1):
for i in self.patients:
if i == arg1:
self.patients.remove(arg1)
arg1.bed_number = 0
return self
def displayHospital(self):
print "hospital_name: ", self.name
print "hospital_capacity: ", self.capacity
print "hospital_patients: ", self.patients
return self
class Patient(object):
def __init__(self, id, patient_name):
self.allergies = []
self.bed_number = 0
self.id = id
self.patient_name = patient_name
def displayPatient(self):
print "patient_id: ", self.id
print "patient_name: ", self.patient_name
print "patient_allergies: ", self.allergies
print "patient_bed: ", self.bed_number
return self
hospital = Hospital("Hospital_Name", 3)
# hospital.displayHospital()
patient1 = Patient(23, "Patient_Name_1")
# patient1.displayPatient()
hospital.admit(patient1, 5).displayHospital()
patient1.displayPatient()
hospital.discharge(patient1)
patient1.displayPatient()
hospital.displayHospital() |
import gym
import numpy as np
import math
import matplotlib.pyplot as plt
from collections import deque
env = gym.make('CartPole-v0')
# Prepare the Q Table
# create one bucket for each of the 4 feature
# features are : [cart_position, cart_velocity, pole_position, pole_volecity]
# the first two features are less important and coded as 1
n_buckets = (1, 1, 6, 3)
# define the number of actions = 2; [left_move, right_move]
n_actions = env.action_space.n
# define the limits of the state space aslower and upper bound
l_bound = env.observation_space.low
u_bound = env.observation_space.high
state_bounds = np.column_stack((l_bound, u_bound))
state_bounds[1] = [-0.5, 0.5]
state_bounds[3] = [-np.radians(50), np.radians(50)]
# define Q table to store each [state,action] pairs' value
q_table = np.zeros(n_buckets + (n_actions,))
# define a learning schedule for random action parameter epsilon and learning rate
# values decrease over time
# the values have been floored: min is 1 and 0.5 respectively
# and bound at upper 0.01 for both
def learning_schedule(i, use_schedule):
use_schedule = use_schedule
if use_schedule == 0:
epsilon = max(0.01, min( 1, 1.0 - np.log10(( i+1 ) / 25 )))
learning_rate = max(0.01, min( 0.5, 1.0 - np.log10(( i+1 ) / 25 )))
elif use_schedule == 1:
epsilon = 1
learning_rate = max(0.01, min( 0.5, 1.0 - np.log10(( i+1 ) / 25 )))
elif use_schedule == 2:
epsilon = 0.5
learning_rate = max(0.01, min( 0.5, 1.0 - np.log10(( i+1 ) / 25 )))
elif use_schedule == 3:
epsilon = 0.1
learning_rate = max(0.01, min( 0.5, 1.0 - np.log10(( i+1 ) / 25 )))
return(epsilon, learning_rate, use_schedule)
# reward discount factor gamma
gamma = 0.99
# initialize list for results for schedule learning on and off
learning_schedule_on = []
learning_schedule_off_1 = []
learning_schedule_off_2 = []
learning_schedule_off_3 = []
last_100_rewards = deque(maxlen=100)
avg_100_reward = []
avg_reward = []
def pick_action(state, q_table, action_space, epsilon):
# chance that a random action will be chosen
if np.random.random_sample() < epsilon:
return action_space.sample()
# select the action based on the existing policy, that is, in the
# current state in the Q table, select the action with the largest Q value
else:
return np.argmax(q_table[state])
def get_discrete_state(observation, n_buckets, state_bounds):
# initalize state as length of observation vector
state = np.zeros(len(observation), dtype = int)
for i, s in enumerate(observation):
# lower bound, upper bound of each feature value
lower = state_bounds[i][0]
upper = state_bounds[i][1]
# below the lower bound or equal to assign as zero
if s <= lower:
state[i] = 0
# if its higher or equal to the upper bound assign max
elif s >= upper:
state[i] = n_buckets[i] - 1
# if within the bounds use a proportional distribution
else:
state[i] = int(((s - lower) / (upper - lower)) * n_buckets[i])
return tuple(state)
# Q-learning
n_episodes = 250
n_time_steps = 200
# initalize boolian to use learning schedule or not
schedule_switch = [0, 1, 2, 3]
for on_off in range(len(schedule_switch)):
np.random.seed(1486438)
use_schedule = schedule_switch[on_off]
for i_episode in range(n_episodes):
epsilon = learning_schedule(i_episode, use_schedule)[0]
learning_rate = learning_schedule(i_episode, use_schedule)[1]
use_schedule = learning_schedule(i_episode, use_schedule)[2]
observation = env.reset()
rewards = 0
# convert continious values to discrete values
state = get_discrete_state(observation, n_buckets, state_bounds)
for t in range(n_time_steps):
#env.render()
action = pick_action(state, q_table, env.action_space, epsilon)
observation, reward, done, info = env.step(action)
rewards += reward
next_state = get_discrete_state(observation, n_buckets, state_bounds)
# update Q table
# compute a given states next q value
q_next_max = np.amax(q_table[next_state])
# update Q values using update rule/equation
q_table[state + (action,)] += learning_rate * (reward + gamma * q_next_max - q_table[state + (action,)])
# move to next state
state = next_state
if done and use_schedule == 0:
learning_schedule_on.append(rewards)
avg_reward.append(np.mean(learning_schedule_on))
last_100_rewards.append(rewards)
avg_100_reward.append(np.mean(last_100_rewards))
#print('Decreasing epsil.: Episode finished after {} timesteps, total rewards {}'.format(t+1, rewards))
break
elif done and use_schedule == 1:
learning_schedule_off_1.append(rewards)
#print('Large epsil.: Episode finished after {} timesteps, total rewards {}'.format(t+1, rewards))
break
elif done and use_schedule == 2:
learning_schedule_off_2.append(rewards)
#print('Medium epsil.: Episode finished after {} timesteps, total rewards {}'.format(t+1, rewards))
break
elif done and use_schedule == 3:
learning_schedule_off_3.append(rewards)
#print('Small epsil.: Episode finished after {} timesteps, total rewards {}'.format(t+1, rewards))
break
env.close()
# Plot model comparison
plt.figure(figsize=(8, 6))
plt.plot(learning_schedule_on, linewidth=1.5)
plt.plot(learning_schedule_off_1, linewidth=1.5)
plt.plot(learning_schedule_off_2, linewidth=1.5)
plt.plot(learning_schedule_off_3, linewidth=1.5)
plt.title('Q-Table: Total Reward after each episode', fontsize = 20)
plt.xlabel('Number of episodes', fontsize = 18)
plt.ylabel('Total reward', fontsize = 18)
plt.legend([r'decrease $\epsilon$', r'large $\epsilon$', r'medium $\epsilon$', r'small $\epsilon$'],
prop={'size': 18}, frameon=False)
plt.show()
def running_mean(x):
x = np.array(x)
N=100
kernel = np.ones(N)
conv_len = x.shape[0]-N
y = np.zeros(conv_len)
for i in range(conv_len):
y[i] = kernel @ x[i:i+N]
y[i] /= N
return y
# Plot model comparison
plt.figure(figsize=(8, 6))
plt.plot(running_mean(learning_schedule_on), linewidth=1.5)
plt.plot(running_mean(learning_schedule_off_1), linewidth=1.5)
plt.plot(running_mean(learning_schedule_off_2), linewidth=1.5)
plt.plot(running_mean(learning_schedule_off_3), linewidth=1.5)
plt.title('Q-Table: Mean last 100 rewards per episode', fontsize = 20)
plt.xlabel('Number of episodes', fontsize = 18)
plt.ylabel('Mean Reward', fontsize = 18)
plt.legend([r'decrease $\epsilon$', r'large $\epsilon$', r'medium $\epsilon$', r'small $\epsilon$'],
prop={'size': 18}, frameon=False)
plt.show() |
import numpy as np
from scipy.stats import sem
from uncertainties import ufloat
import uncertainties.unumpy as unp
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def Mean_Std(Werte):
s = 1/np.sqrt(len(Werte))
return ufloat(np.mean(Werte), s * np.std(Werte, ddof = 1))
# Wheatstone
Messung_Wert_10 = np.array([[1000, 196, 804],
[664 , 268, 732],
[332 , 422, 578]])
Messung_Wert_12 = np.array([[332 , 543, 457],
[664 , 373, 627],
[1000, 284, 716]])
Wheatstone10= np.array([row[0] * row[1] / row[2] for row in Messung_Wert_10])
Wheatstone12 = np.array([row[0] * row[1] / row[2] for row in Messung_Wert_12])
print("Ergebnisse für Wheatstone: ", '\n', "Wert 10: ", Wheatstone10, Mean_Std(Wheatstone10), '\n', "Wert 12: ", Wheatstone12, Mean_Std(Wheatstone12), '\n')
# Kapazität
Messung_Wert_3 = np.array([[450, 519, 481],
[399, 490, 510],
[597, 590, 410]])
Messung_Wert_1 = np.array([[450, 407, 593],
[399, 380, 620],
[597, 478, 522]])
Kapazitäten3 = np.array([row[0] * row[2] / row[1] for row in Messung_Wert_3])
Kapazitäten1 = np.array([row[0] * row[2] / row[1] for row in Messung_Wert_1])
print("Ergebnisse für Kapazitäten: ", '\n', "Wert 3: ", Kapazitäten3, Mean_Std(Kapazitäten3), '\n', "Wert 1: ", Kapazitäten1, Mean_Std(Kapazitäten1), '\n')
# RC - Glied
Messung_Wert_8 = np.array([[450, 371, 606, 394],
[399, 418, 578, 422],
[597, 278, 673, 327]])
Messung_Wert_9 = np.array([[450, 466, 511, 489],
[399, 524, 482, 518],
[597, 352, 581, 419]])
Kapazitäten8 = np.array([row[0] * row[3] / row[2] for row in Messung_Wert_8])
Kapazitäten9 = np.array([row[0] * row[3] / row[2] for row in Messung_Wert_9])
Wiederstand8 = np.array([row[1] * row[2] / row[3] for row in Messung_Wert_8])
Wiederstand9 = np.array([row[1] * row[2] / row[3] for row in Messung_Wert_9])
print("Ergebnisse für RC-Glied: ", '\n')
print("Ergebnisse Kapazitäten: ", '\n', "Wert 8: ", Kapazitäten8, Mean_Std(Kapazitäten8), '\n', "Wert 9: ", Kapazitäten9, Mean_Std(Kapazitäten9))
print("Ergebnisse Wiederstände: ", '\n', "Wert 8: ", Wiederstand8, Mean_Std(Wiederstand8), '\n', "Wert 9: ", Wiederstand9, Mean_Std(Wiederstand9), '\n')
# RL - Glied klassisch
Klassisch_Wert_16 = np.array([[14.6, 45, 907, 83],
[20.1, 57, 875, 125],
[27.5, 85, 837, 163]])
Klassisch_Wert_18 = np.array([[14.6, 108, 775, 225],
[20.1, 143, 715, 285],
[27.5, 197, 648, 352]])
Induktivität16 = np.array([row[0] * row[2] / row[3] for row in Klassisch_Wert_16])
Induktivität18 = np.array([row[0] * row[2] / row[3] for row in Klassisch_Wert_18])
Wiederstand16 = np.array([row[1] * row[2] / row[3] for row in Klassisch_Wert_16])
Wiederstand18 = np.array([row[1] * row[2] / row[3] for row in Klassisch_Wert_18])
print("Ergebnisse für RL-Glied klassisch: ", '\n')
print("Ergebnisse Induktivität: ", '\n', "Wert 16: ", Induktivität16, Mean_Std(Induktivität16), '\n', "Wert 18: ", Induktivität18, Mean_Std(Induktivität18))
print("Ergebnisse Wiederstände: ", '\n', "Wert 16: ", Wiederstand16, Mean_Std(Wiederstand16), '\n', "Wert 18: ", Wiederstand18, Mean_Std(Wiederstand18), '\n')
# RL - Glied Maxwell
C4 = 399 * 10**(-6)
Maxwell_Wert_18 = np.array([[1000, 128, 347],
[664, 193, 349],
[332, 382, 348]])
Maxwell_Wert_16 = np.array([[1000, 347, 829],
[664, 523, 829],
[332, 1036, 829]])
mInduktivität16 = np.array([row[0] * row[1] * C4 for row in Maxwell_Wert_16])
mInduktivität18 = np.array([row[0] * row[1] * C4 for row in Maxwell_Wert_18])
mWiederstand16 = np.array([row[1] * row[0] / row[2] for row in Maxwell_Wert_16])
mWiederstand18 = np.array([row[1] * row[0] / row[2] for row in Maxwell_Wert_18])
print("Ergebnisse für RL-Glied Maxwell: ", '\n')
print("Ergebnisse Induktivität: ", '\n', "Wert 16: ", mInduktivität16, Mean_Std(mInduktivität16), '\n', "Wert 18: ", mInduktivität18, Mean_Std(mInduktivität18))
print("Ergebnisse Wiederstände: ", '\n', "Wert 16: ", mWiederstand16, Mean_Std(mWiederstand16), '\n', "Wert 18: ", mWiederstand18, Mean_Std(mWiederstand18), '\n')
|
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Field, Fieldset, HTML, Div, Layout, Submit
from crispy_forms.bootstrap import FormActions
from django import forms
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from core.forms import RequestUserValidationMixin
from .models import Review
class ReviewUpdateForm(RequestUserValidationMixin, forms.ModelForm):
class Meta:
model = Review
exclude = ('reviewer', 'proposal', )
def __init__(self, proposal, *args, **kwargs):
super().__init__(*args, **kwargs)
self._proposal = proposal
# Make sure user submit the overall impact score
for score_field in [
# 'significance', 'innovation',
# 'approach', 'investigator', 'environment',
'overall_impact'
]:
self.fields[score_field].required = True
def save(self, commit=True):
review = super().save(commit=False)
review.reviewer = self._request.user
review.proposal = self._proposal
if commit:
review.save()
return review
@cached_property
def helper(self):
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-lg-2'
helper.field_class = 'col-lg-8'
helper.layout = Layout(
Fieldset(
'Criteria',
'significance',
'innovation',
'approach',
'investigator',
'environment',
'overall_impact',
HTML("""
<p>
Scores: 1-3 from unbelievable to excellent;
4-6 from good to so so;
7-9 from "will not be discussed" to "will not be discussed. <br>
Ref: <a href="https://grants.nih.gov/grants/peer/guidelines_general/scoring_system_and_procedure.pdf" target="_blank">
NIH scoring system and procedure</a>
</p>
"""),
),
Fieldset(
'Comment',
'comment',
'comment_disclosure',
),
FormActions(
Submit(
'save', _('Submit'), css_class='btn-lg btn-block',
)
)
)
return helper
|
#
# PyBullet gym env for Franka Emika robot.
#
# @contactrika
#
import os
import numpy as np
np.set_printoptions(precision=4, linewidth=150, threshold=np.inf, suppress=True)
from gym_bullet_extensions.bullet_manipulator import BulletManipulator
from gym_bullet_extensions.envs.manipulator_env import ManipulatorEnv
class FrankaEnv(ManipulatorEnv):
def __init__(self, num_objects, max_episode_steps,
control_mode='velocity', visualize=False, debug_level=0):
self.pos_only_state = True # don't include velocities in state
robot = BulletManipulator(
os.path.join('franka_robot', 'franka_small_fingers.urdf'),
control_mode=control_mode,
ee_joint_name='panda_joint7', ee_link_name='panda_hand',
base_pos=[0,0,0], dt=1.0/500.0,
kp=([200.0]*7 + [1.0]*2),
kd=([2.0]*7 + [0.1]*2),
min_z=0.00,
visualize=visualize)
table_minmax_x_minmax_y = np.array([0.0, 1.50, -0.45, 0.45])
super(FrankaEnv, self).__init__(
robot, num_objects, table_minmax_x_minmax_y, 'cylinder_block.urdf',
max_episode_steps, visualize, debug_level)
self.set_join_limits_for_forward_workspace()
def set_join_limits_for_forward_workspace(self):
# Set reasonable joint limits for operating the space mainly in front
# of the robot. Our main workspace is the table in front of the robot,
# so we are not interested in exploratory motions outside of the main
# workspace.
minpos = np.copy(self.robot.get_minpos())
maxpos = np.copy(self.robot.get_maxpos())
# operate in the workspace in front of the robot
minpos[0] = -0.5; maxpos[0] = 0.5
minpos[1] = 0.0
minpos[2] = -0.5; maxpos[2] = 0.5
#minpos[3] = -3.0; maxpos[3] = -1.0 # don't stretch out the elbo
self.robot.set_joint_limits(minpos, maxpos)
def get_init_pos(self):
ee_pos = np.array([0.25,0.30,0.30])
ee_quat = np.array(self.robot.sim.getQuaternionFromEuler([np.pi,0,0]))
fing_dist = 0.0
#init_qpos = self.robot.ee_pos_to_qpos(
# ee_pos, ee_quat, fing_dist=fing_dist)
#assert(init_qpos is not None)
# Use a manual qpos (to avoid instabilities of IK solutions).
init_qpos = np.array([ 0.4239, 0., 0.4799, -2.727,
0.2047, 2.4689, 1.5125, 0., 0.])
return init_qpos, ee_pos, ee_quat, fing_dist
def get_all_init_object_poses(self, num_objects):
all_init_object_poses = np.array([
[0.32,0.15,0.11], [0.50,0.15,0.11]])
init_object_quats = [[0,0,0,1]]*num_objects
return all_init_object_poses[0:num_objects], init_object_quats
def get_is_bad(self, debug=False):
bad = super(FrankaEnv, self).get_is_bad(debug)
return bad
|
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (15, 5)
df = pd.read_csv('graf.csv')
print(df)
df.set_index('Tiempo (segundos)', inplace=True)
df.plot(ylabel='Throughput (Request/sec)',grid=True, figsize=(15, 5), c='b')
plt.show() |
from datetime import datetime
from flask import render_template, session, redirect, url_for
from . import main
from .. import db
from ..models import User
@main.route('/', methods=['GET', 'POST'])
def index():
#return redirect(url_for('.index'))
#return render_tmplate('index.html',form=form, name=session.get('name'),known=session.get('known', False),current_time=datetime.utcnow())
return "Hello Flask!2"
|
from flask import Flask
# from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__, static_url_path='')
app.config.from_object('config')
# db = SQLAlchemy(app)
from app.routes import index
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import psutil
import signal
import socket
import subprocess
import sys
import threading
import time
import traceback
from oslo_config import cfg
from oslo_log import log
from oslo_service import loopingcall
from oslo_service import service as os_service
from tooz import coordination
from monasca_transform.config.config_initializer import ConfigInitializer
from monasca_transform.log_utils import LogUtils
CONF = cfg.CONF
SPARK_SUBMIT_PROC_NAME = "spark-submit"
def main():
transform_service = TransformService()
transform_service.start()
def shutdown_all_threads_and_die():
"""Shut down all threads and exit process.
Hit it with a hammer to kill all threads and die.
"""
LOG = log.getLogger(__name__)
LOG.info('Monasca Transform service stopping...')
os._exit(1)
def get_process(proc_name):
"""Get process given string in process cmd line."""
LOG = log.getLogger(__name__)
proc = None
try:
for pr in psutil.process_iter():
for args in pr.cmdline():
if proc_name in args.split(" "):
proc = pr
return proc
except BaseException:
# pass
LOG.error("Error fetching {%s} process..." % proc_name)
return None
def stop_spark_submit_process():
"""Stop spark submit program."""
LOG = log.getLogger(__name__)
try:
# get the driver proc
pr = get_process(SPARK_SUBMIT_PROC_NAME)
if pr:
# terminate (SIGTERM) spark driver proc
for cpr in pr.children(recursive=False):
LOG.info("Terminate child pid {%s} ..." % str(cpr.pid))
cpr.terminate()
# terminate spark submit proc
LOG.info("Terminate pid {%s} ..." % str(pr.pid))
pr.terminate()
except Exception as e:
LOG.error("Error killing spark submit "
"process: got exception: {%s}" % str(e))
class Transform(os_service.Service):
"""Class used with Openstack service."""
LOG = log.getLogger(__name__)
def __init__(self, threads=1):
super(Transform, self).__init__(threads)
def signal_handler(self, signal_number, stack_frame):
# Catch stop requests and appropriately shut down
shutdown_all_threads_and_die()
def start(self):
try:
# Register to catch stop requests
signal.signal(signal.SIGTERM, self.signal_handler)
main()
except BaseException:
self.LOG.exception("Monasca Transform service "
"encountered fatal error. "
"Shutting down all threads and exiting")
shutdown_all_threads_and_die()
def stop(self):
stop_spark_submit_process()
super(os_service.Service, self).stop()
class TransformService(threading.Thread):
previously_running = False
LOG = log.getLogger(__name__)
def __init__(self):
super(TransformService, self).__init__()
self.coordinator = None
self.group = CONF.service.coordinator_group
# A unique name used for establishing election candidacy
self.my_host_name = socket.getfqdn()
# periodic check
leader_check = loopingcall.FixedIntervalLoopingCall(
self.periodic_leader_check)
leader_check.start(interval=float(
CONF.service.election_polling_frequency))
def check_if_still_leader(self):
"""Return true if the this host is the leader"""
leader = None
try:
leader = self.coordinator.get_leader(self.group).get()
except BaseException:
self.LOG.info('No leader elected yet for group %s' %
(self.group))
if leader and self.my_host_name == leader:
return True
# default
return False
def periodic_leader_check(self):
self.LOG.debug("Called periodic_leader_check...")
try:
if self.previously_running:
if not self.check_if_still_leader():
# stop spark submit process
stop_spark_submit_process()
# stand down as a leader
try:
self.coordinator.stand_down_group_leader(
self.group)
except BaseException as e:
self.LOG.info("Host %s cannot stand down as "
"leader for group %s: "
"got exception {%s}" %
(self.my_host_name, self.group,
str(e)))
# reset state
self.previously_running = False
except BaseException as e:
self.LOG.info("periodic_leader_check: "
"caught unhandled exception: {%s}" % str(e))
def when_i_am_elected_leader(self, event):
"""Callback when this host gets elected leader."""
# set running state
self.previously_running = True
self.LOG.info("Monasca Transform service running on %s "
"has been elected leader" % str(self.my_host_name))
if CONF.service.spark_python_files:
pyfiles = (" --py-files %s"
% CONF.service.spark_python_files)
else:
pyfiles = ''
event_logging_dest = ''
if (CONF.service.spark_event_logging_enabled and
CONF.service.spark_event_logging_dest):
event_logging_dest = (
"--conf spark.eventLog.dir="
"file://%s" %
CONF.service.spark_event_logging_dest)
# Build the command to start the Spark driver
spark_cmd = "".join((
"export SPARK_HOME=",
CONF.service.spark_home,
" && ",
"spark-submit --master ",
CONF.service.spark_master_list,
" --conf spark.eventLog.enabled=",
CONF.service.spark_event_logging_enabled,
event_logging_dest,
" --jars " + CONF.service.spark_jars_list,
pyfiles,
" " + CONF.service.spark_driver))
# Start the Spark driver
# (specify shell=True in order to
# correctly handle wildcards in the spark_cmd)
subprocess.call(spark_cmd, shell=True)
def run(self):
self.LOG.info('The host of this Monasca Transform service is ' +
self.my_host_name)
# Loop until the service is stopped
while True:
try:
self.previously_running = False
# Start an election coordinator
self.coordinator = coordination.get_coordinator(
CONF.service.coordinator_address, self.my_host_name)
self.coordinator.start()
# Create a coordination/election group
try:
request = self.coordinator.create_group(self.group)
request.get()
except coordination.GroupAlreadyExist:
self.LOG.info('Group %s already exists' % self.group)
# Join the coordination/election group
try:
request = self.coordinator.join_group(self.group)
request.get()
except coordination.MemberAlreadyExist:
self.LOG.info('Host already joined to group %s as %s' %
(self.group, self.my_host_name))
# Announce the candidacy and wait to be elected
self.coordinator.watch_elected_as_leader(
self.group,
self.when_i_am_elected_leader)
while self.previously_running is False:
self.LOG.debug('Monasca Transform service on %s is '
'checking election results...'
% self.my_host_name)
self.coordinator.heartbeat()
self.coordinator.run_watchers()
if self.previously_running is True:
try:
# Leave/exit the coordination/election group
request = self.coordinator.leave_group(self.group)
request.get()
except coordination.MemberNotJoined:
self.LOG.info("Host has not yet "
"joined group %s as %s" %
(self.group, self.my_host_name))
time.sleep(float(CONF.service.election_polling_frequency))
self.coordinator.stop()
except BaseException as e:
# catch any unhandled exception and continue
self.LOG.info("Ran into unhandled exception: {%s}" % str(e))
self.LOG.info("Going to restart coordinator again...")
traceback.print_exc()
def main_service():
"""Method to use with Openstack service."""
ConfigInitializer.basic_config()
LogUtils.init_logger(__name__)
launcher = os_service.ServiceLauncher(cfg.CONF, restart_method='mutate')
launcher.launch_service(Transform())
launcher.wait()
# Used if run without Openstack service.
if __name__ == "__main__":
sys.exit(main())
|
def backward_string_by_word(text: str) -> str:
return " ".join([i[::-1] for i in text.split(" ")]) |
#este o programa nao aceita
from numpy import*
from numpy.linalg import*
horas=array(eval(input("horas: ")))
zeros=zeros(shape(horas)[0], dtype=int)
for j in range(shape(horas)[0]):
zeros[j]=sum(horas[:,j])
for p in range(shape(horas)[0]):
if (zeros[p]==max(zeros)):
print(p+1)
# esse o programa aceita
from numpy import *
from numpy.linalg import *
n=array(eval(input("numero: ")))
c=shape(n)[1]
l=shape(n)[0]
z=zeros(7,dtype=int)
for j in range(c):
z[j]=sum(n[:,j])
for p in range(c):
if z[p]== max(z):
print(p+1)
|
'''
def line():
print("---------------------------")
'''
'''
def line(size):
c = '-'
line = c*size
print(line)
'''
def line(size,c='-'):
line = c*size
print(line)
def rectangle(width,height,c='-'):
line=c*width # generated -------
h=0
while (h <= height):
print(line)
h=h+1
line(10)
line(100,'o')
line(100,'*')
rectangle(40,10,'*')
'''
print()
rectangle(40,20)
rectangle(100,25)
'''
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import find_peaks_cwt
from DrawPlots import DrawPlots
from Fft import Fft
import scipy.io.wavfile as wav
# fs, audio_data = wav.read('../wav/pick/normal_pick_4.wav')
# fft = Fft.get_fft(audio_data, 2.75)
#DrawPlots.fft_plot((fft, ), 'log', 'Amplitude', 'Frequency')
# d = int(round(len(abs_fft)/2))
#
# fig = plt.figure(figsize=(8, 6))
# fft_plot = fig.add_subplot(111, axisbg='#FFFFCC')
#
# fft_plot.semilogy(abs_fft[:(d-1)], 'r')
# # plt.plot(audio)
# fft_plot.set_ylabel('Audio time plot')
# # plt.show()
# # set useblit = True on gtkagg for enhanced performance
# cursor = Cursor(fft_plot, useblit=True, color='red', linewidth=2)
# plt.show()
# peaks = find_peaks_cwt(abs_fft[0:2000], np.arange(1, 50))
# max_a = max(abs_fft)
# th = max_a/100
# main_fqs = {}
# for peak in peaks:
# if abs_fft[peak] >= th:
# main_fqs[peak] = abs_fft[peak]
#
# print(main_fqs)
|
import turtle
import winsound
win = turtle.Screen()
win.title("Pong by Sean Moore")
win.bgcolor("black")
win.setup(width = 800, height = 600)
win.tracer(0) # stops window from updating automatically
# Score
score_a = 0
score_b = 0
# Paddle A
paddle_a = turtle.Turtle()
paddle_a.speed(0) # Speed of animation (max speed)
paddle_a.shape("square")
paddle_a.color("white")
paddle_a.shapesize(stretch_wid = 5, stretch_len = 1)
paddle_a.penup()
paddle_a.goto(-350, 0) # Starts paddle a on left side in the middle
# Paddle B
paddle_b = turtle.Turtle()
paddle_b.speed(0) # Speed of animation (max speed)
paddle_b.shape("square")
paddle_b.color("white")
paddle_b.shapesize(stretch_wid = 5, stretch_len = 1)
paddle_b.penup() # So the object does not draw straight up
paddle_b.goto(350, 0) # Starts paddle a right side in the middle
# Ball
ball = turtle.Turtle()
ball.speed(0) # Speed of animation (max speed)
ball.shape("square")
ball.color("white")
ball.penup() # So the object does not draw straight up
ball.goto(0, 0) # centers ball
ball.dx = 0.09 # ball movement speed
ball.dy = 0.09
# Pen
pen = turtle.Turtle()
pen.speed(0)
pen.color("white")
pen.penup()
pen.hideturtle() # Hides pen, only need the text
pen.goto(0, 260)
pen.write("Player A: 0 Player B: 0", align = "center", font = ("Courier", 20, "normal"))
# Function for keyboard inputs
def paddle_a_up():
y = paddle_a.ycor() # returns y cordinate
y += 40
paddle_a.sety(y)
def paddle_a_down():
y = paddle_a.ycor() # returns y cordinate
y -= 40
paddle_a.sety(y)
def paddle_b_up():
y = paddle_b.ycor() # returns y cordinate
y += 40
paddle_b.sety(y)
def paddle_b_down():
y = paddle_b.ycor() # returns y cordinate
y -= 40
paddle_b.sety(y)
# Keyboard binding
win.listen() # Tells program to listen for input
win.onkeypress(paddle_a_up, "w") # When "w" is pressed, move up
win.listen() # Tells program to listen for input
win.onkeypress(paddle_a_down, "s") # When "s" is pressed, move down
win.listen() # Tells program to listen for input
win.onkeypress(paddle_b_up, "Up") # When "up arrow" is pressed, move up
win.listen() # Tells program to listen for input
win.onkeypress(paddle_b_down, "Down") # When "down arrow" is pressed, move down
#main game loop
while True:
win.update() # every time loop runs it updates screen
# Move the ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
# Border checking
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1 #reversed direction once boarder is found
winsound.PlaySound("sounds/bounce.wav", winsound.SND_ASYNC)
if ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1 #reversed direction once boarder is found
winsound.PlaySound("sounds/bounce.wav", winsound.SND_ASYNC)
if ball.xcor() > 390:
ball.goto(0, 0) # Centers ball
ball.dx *= -1
score_a += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(score_a, score_b), align = "center", font = ("Courier", 20, "normal"))
if ball.xcor() < -390:
ball.goto(0, 0) # Centers ball
ball.dx *= -1
score_b += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(score_a, score_b), align = "center", font = ("Courier", 20, "normal"))
# Paddle and ball collisions
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddle_b.ycor() + 50 and ball.ycor() > paddle_b.ycor() - 50):
ball.setx(340)
ball.dx *= -1
winsound.PlaySound("sounds/bounce.wav", winsound.SND_ASYNC)
if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddle_a.ycor() + 50 and ball.ycor() > paddle_a.ycor() - 50):
ball.setx(-340)
ball.dx *= -1
winsound.PlaySound("sounds/bounce.wav", winsound.SND_ASYNC)
|
from UIAutomation.Page.Mobile.LongCardPage import LongCardPage
from UIAutomation.TestCase.BaseTestCase import BaseTestCase
from UIAutomation.Utils import get_user_id
from UIAutomation.Utils.HttpWrapper import eject_logged_user
from .FunSmoke004SQL import reduction_transport_contract, get_new_transport_contract, delete_transport_contract
from UIAutomation.Page.Mobile.Android.ContractCarriage.ContractCarriagePage import ContractCarriage
from UIAutomation.Page.Mobile.LoginPage import LoginPage
__author__ = 'zhoujin'
class FunSmoke014(BaseTestCase):
"""
签订运输供应契约
先生成一个运输供应的契约,然后签订该契约
"""
def setUp(self):
delete_transport_contract()
reduction_transport_contract()
mobile = 15711041212
password = 123456
self.user_id = get_user_id(mobile)
BaseTestCase.setUp(self)
# eject_logged_user(mobile, password)
LoginPage(self.driver).login(username=mobile, password=password) # 登录
pass
def test_TransportContract(self):
LongCardPage(self.driver).click_expected_card(self.user_id, '找运输供应')
signed_carriage_supply_contract_instant = ContractCarriage(self.driver)
signed_carriage_supply_contract_instant.page_factory()
# 签订运输契约
signed_carriage_supply_contract_instant.click_the_first_carriage_card() # 点击第一个卡片
signed_carriage_supply_contract_instant.choice_amount_and_submit() # 全选并提交
new_transport_contract = get_new_transport_contract(self.user_id, contract_ukid='51817000000009223')
new_transport_contract_no = int(new_transport_contract['new_transport_contract_no'])
operation_record_no = int(new_transport_contract['operation_record_no'])
rs_repository_no = int(new_transport_contract['rs_repository_no'])
print(new_transport_contract_no, rs_repository_no, operation_record_no)
assert new_transport_contract_no * 2 == rs_repository_no == operation_record_no * 2
print('签订运输供应契约成功')
def tearDown(self):
BaseTestCase.tearDown(self)
pass
|
# encoding=utf8
import urllib2
import re
from bs4 import BeautifulSoup
class Tie_Ba_Spider(object):
"""docstring for Tie_Ba_Spider"""
def __init__(self, baseUrl , seeLZ):
super(Tie_Ba_Spider, self).__init__()
self.baseUrl = baseUrl
self.seeLZ = '?see_lz=' + str(seeLZ)
def saveCont(self , mytxt ,filename=None):
file = './tieba/'+filename
f = open(file,'a')
for coni in mytxt:
f.write(coni.encode('utf-8'))
f.close()
def getPage_Content(self , pageNum):
try:
url = self.baseUrl + self.seeLZ + '&pn=' + str(pageNum)
print url
requset = urllib2.Request(url)
response = urllib2.urlopen(requset)
return response.read()
except urllib2.URLError , e:
if hasattr(e, 'reason'):
print 'error' , e.reason
return None
def parse(self , html_cont):
contents = [ ]
next_line = u'\n-------------------------------------------------------------------------------------------------------------------------------------------\n'
soup =BeautifulSoup(cc,'html.parser' )
title_node = soup.find("h3", class_="core_title_txt pull-left text-overflow ")
contents.append(title_node.get_text())
contents.append(next_line)
con = soup.find_all("div", class_="d_post_content j_d_post_content ")
for i in con:
contents.append(i.get_text())
contents.append(next_line)
return contents
def get_Maxpage(self , html_cont):
patternPage = re.compile(r'<span class="red">(.*?)</span>')
result = re.search(patternPage,cc)
return int( result.group(1) )
if __name__=='__main__':
baseURL = 'http://tieba.baidu.com/p/3138733512'
tbs = Tie_Ba_Spider(baseURL , 1)
cc = tbs.getPage_Content(1)
print tbs.get_Maxpage(cc)
for i in range(1 , tbs.get_Maxpage(cc)+1 ):
tbs = Tie_Ba_Spider(baseURL , 1)
cc = tbs.getPage_Content( i )
ttt = tbs.parse(cc)
tbs.saveCont(ttt , 'r.txt')
# soup =BeautifulSoup(cc,'html.parser' )
# title_node = soup.find("h3", class_="core_title_txt pull-left text-overflow ")
# print title_node.get_text() , type(title_node.get_text())
# file = open('./tieba/r.txt' , 'w')
# con = soup.find_all("div", class_="d_post_content j_d_post_content ")
# for i in con:
# print '----------------------------------------------------------------------\n'
# print i.get_text()
# file.write( i.get_text().encode('utf-8') )
# file.write('\n--------------------------------------------------------------------------------------\n')
# patternPage = re.compile(r'<span class="red">(.*?)</span>')
# all_page = soup.find_all(patternPage)
# print all_page
# # pattern = re.compile('<li class="l_reply_num.*?</span>.*?<span.*?>(.*?)</span>',re.S)
# result = re.search(patternPage,cc)
# print result.group()
# print result.group(1)
|
"""added pipeline_catalog logic
Revision ID: f7208a6fdec4
Revises: d809ee2de92e
Create Date: 2016-08-01 19:41:29.335590
"""
# revision identifiers, used by Alembic.
revision = 'f7208a6fdec4'
down_revision = 'd809ee2de92e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('pipeline_catalog',
sa.Column('pipeline_type_id', sa.Integer(), nullable=False),
sa.Column('current_task_type_id', sa.Integer(), nullable=True),
sa.Column('next_task_type_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['current_task_type_id'], ['task_types.id'], ),
sa.ForeignKeyConstraint(['next_task_type_id'], ['task_types.id'], ),
sa.ForeignKeyConstraint(['pipeline_type_id'], ['pipeline_types.id'], ),
sa.PrimaryKeyConstraint('pipeline_type_id')
)
op.drop_constraint(u'pipeline_types_ibfk_4', 'pipeline_types', type_='foreignkey')
op.drop_constraint(u'pipeline_types_ibfk_7', 'pipeline_types', type_='foreignkey')
op.drop_constraint(u'pipeline_types_ibfk_6', 'pipeline_types', type_='foreignkey')
op.drop_constraint(u'pipeline_types_ibfk_5', 'pipeline_types', type_='foreignkey')
op.drop_constraint(u'pipeline_types_ibfk_2', 'pipeline_types', type_='foreignkey')
op.drop_constraint(u'pipeline_types_ibfk_3', 'pipeline_types', type_='foreignkey')
op.drop_constraint(u'pipeline_types_ibfk_1', 'pipeline_types', type_='foreignkey')
op.drop_column(u'pipeline_types', 'post_tasktype_id')
op.drop_column(u'pipeline_types', 'prun_tasktype_id')
op.drop_column(u'pipeline_types', 'split_tasktype_id')
op.drop_column(u'pipeline_types', 'merge_tasktype_id')
op.drop_column(u'pipeline_types', 'init_tasktype_id')
op.drop_column(u'pipeline_types', 'finish_tasktype_id')
op.drop_column(u'pipeline_types', 'pre_tasktype_id')
op.add_column(u'pipelines', sa.Column('current_task_id', sa.Integer(), nullable=True))
op.drop_constraint(u'pipelines_ibfk_14', 'pipelines', type_='foreignkey')
op.drop_constraint(u'pipelines_ibfk_15', 'pipelines', type_='foreignkey')
op.drop_constraint(u'pipelines_ibfk_12', 'pipelines', type_='foreignkey')
op.drop_constraint(u'pipelines_ibfk_17', 'pipelines', type_='foreignkey')
op.drop_constraint(u'pipelines_ibfk_11', 'pipelines', type_='foreignkey')
op.drop_constraint(u'pipelines_ibfk_16', 'pipelines', type_='foreignkey')
op.drop_constraint(u'pipelines_ibfk_13', 'pipelines', type_='foreignkey')
op.create_foreign_key(None, 'pipelines', 'tasks', ['current_task_id'], ['id'])
op.drop_column(u'pipelines', 'split_task_id')
op.drop_column(u'pipelines', 'pre_task_id')
op.drop_column(u'pipelines', 'name')
op.drop_column(u'pipelines', 'current_state')
op.drop_column(u'pipelines', 'merge_task_id')
op.drop_column(u'pipelines', 'post_task_id')
op.drop_column(u'pipelines', 'prun_task_id')
op.drop_column(u'pipelines', 'init_task_id')
op.drop_column(u'pipelines', 'finish_task_id')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'pipelines', sa.Column('finish_task_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column(u'pipelines', sa.Column('init_task_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column(u'pipelines', sa.Column('prun_task_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column(u'pipelines', sa.Column('post_task_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column(u'pipelines', sa.Column('merge_task_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column(u'pipelines', sa.Column('current_state', mysql.VARCHAR(length=256), nullable=True))
op.add_column(u'pipelines', sa.Column('name', mysql.VARCHAR(length=256), nullable=True))
op.add_column(u'pipelines', sa.Column('pre_task_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column(u'pipelines', sa.Column('split_task_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.drop_constraint(None, 'pipelines', type_='foreignkey')
op.create_foreign_key(u'pipelines_ibfk_13', 'pipelines', 'tasks', ['prun_task_id'], ['id'])
op.create_foreign_key(u'pipelines_ibfk_16', 'pipelines', 'tasks', ['split_task_id'], ['id'])
op.create_foreign_key(u'pipelines_ibfk_11', 'pipelines', 'tasks', ['init_task_id'], ['id'])
op.create_foreign_key(u'pipelines_ibfk_17', 'pipelines', 'tasks', ['finish_task_id'], ['id'])
op.create_foreign_key(u'pipelines_ibfk_12', 'pipelines', 'tasks', ['merge_task_id'], ['id'])
op.create_foreign_key(u'pipelines_ibfk_15', 'pipelines', 'tasks', ['post_task_id'], ['id'])
op.create_foreign_key(u'pipelines_ibfk_14', 'pipelines', 'tasks', ['pre_task_id'], ['id'])
op.drop_column(u'pipelines', 'current_task_id')
op.add_column(u'pipeline_types', sa.Column('pre_tasktype_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column(u'pipeline_types', sa.Column('finish_tasktype_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column(u'pipeline_types', sa.Column('init_tasktype_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column(u'pipeline_types', sa.Column('merge_tasktype_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column(u'pipeline_types', sa.Column('split_tasktype_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column(u'pipeline_types', sa.Column('prun_tasktype_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column(u'pipeline_types', sa.Column('post_tasktype_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.create_foreign_key(u'pipeline_types_ibfk_1', 'pipeline_types', 'task_types', ['finish_tasktype_id'], ['id'])
op.create_foreign_key(u'pipeline_types_ibfk_3', 'pipeline_types', 'task_types', ['merge_tasktype_id'], ['id'])
op.create_foreign_key(u'pipeline_types_ibfk_2', 'pipeline_types', 'task_types', ['init_tasktype_id'], ['id'])
op.create_foreign_key(u'pipeline_types_ibfk_5', 'pipeline_types', 'task_types', ['pre_tasktype_id'], ['id'])
op.create_foreign_key(u'pipeline_types_ibfk_6', 'pipeline_types', 'task_types', ['prun_tasktype_id'], ['id'])
op.create_foreign_key(u'pipeline_types_ibfk_7', 'pipeline_types', 'task_types', ['split_tasktype_id'], ['id'])
op.create_foreign_key(u'pipeline_types_ibfk_4', 'pipeline_types', 'task_types', ['post_tasktype_id'], ['id'])
op.drop_table('pipeline_catalog')
### end Alembic commands ### |
double_count = 0
tripple_count = 0
for index, word in enumerate(open('input.txt', 'r', 1)):
found_double = False
found_tripple = False
for char in word:
count = word.count(char)
if (found_double and found_tripple):
break
elif (count == 2 and not found_double):
found_double = True
double_count += 1
elif (count == 3 and not found_tripple):
found_tripple = True
tripple_count += 1
print('Checksum:', double_count * tripple_count) |
import argparse
import json
parser = argparse.ArgumentParser(description='Write version of package to env-file', prog='version')
parser.add_argument('--version-file', dest='version_file', help='File storing version number', type=str, required=True)
parser.add_argument('--env-file', dest='env_file', help='File storing environment variables', type=str, required=True)
if __name__ == '__main__':
args = parser.parse_args()
with open(args.version_file, 'r', encoding='utf8') as file:
version_dict = json.load(file)
with open(args.env_file, 'a', encoding='utf8') as file:
file.write(f'PKG_VERSION={version_dict["version"]}')
|
import face_recognition
from PIL import Image, ImageDraw
import cv2
import numpy as np
import ffmpeg
import math
import os
# some variables
INPUT_FILE = 'example/horns.mp4' # input video file
SEARCH_FILE = 'example/search.jpg' # image of the face to recognise and replace
REPLACE_FILE = 'example/replace.png' # transparent png to use as the face replacement
OUTPUT_FILE = 'output.mp4' # output video file
DETECTION_THRESH = 0.68 # threshold for face detection - 0.6 is the library default
UPSAMPLES = 2 # how many times to upscale the source image by - larger number finds smaller faces
SCALE_FACTOR = 1.5 # how much to scale the replacement face by
def draw_image(src, dest, src_landmarks, dest_landmarks, factor=1.0):
transformed = src.copy()
src_center = src_landmarks['nose_tip'][0]
dest_center = dest_landmarks['nose_tip'][0]
src_size = math.hypot(src_landmarks['right_eye'][1][0] - src_landmarks['left_eye'][0][0], src_landmarks['right_eye'][1][1] - src_landmarks['left_eye'][0][1])
dest_size = math.hypot(dest_landmarks['right_eye'][1][0] - dest_landmarks['left_eye'][0][0], dest_landmarks['right_eye'][1][1] - dest_landmarks['left_eye'][0][1])
scale = float(dest_size) / float(src_size)
scale *= factor
transformed = transformed.resize((int(src.width * scale), int(src.height * scale)), Image.LANCZOS)
box = (
dest_center[0] - int(src_center[0] * scale),
dest_center[1] - int(src_center[1] * scale),
transformed.width + dest_center[0] - int(src_center[0] * scale),
transformed.height + dest_center[1] - int(src_center[1] * scale),
)
dest.paste(transformed, box, transformed)
search_picture = face_recognition.load_image_file(SEARCH_FILE)
search_encodings = face_recognition.face_encodings(search_picture)
if len(search_encodings) != 1:
print("Search image should contain 1 face. Found {}.".format(len(search_encodings)))
exit(2)
search_encoding = search_encodings[0]
replace_picture = face_recognition.load_image_file(REPLACE_FILE, mode='RGB')
replace_locations = face_recognition.face_locations(replace_picture, model="cnn")
if len(replace_locations) != 1:
print("Replace image should contain 1 face. Found {}.".format(len(replace_locations)))
exit(3)
replace_image = Image.open(REPLACE_FILE).convert('RGBA')
replace_landmarks = face_recognition.face_landmarks(replace_picture, [replace_locations[0]], model='small')[0]
input_movie = cv2.VideoCapture(INPUT_FILE)
frame_count = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
output_movie = cv2.VideoWriter('temp.mp4', fourcc, input_movie.get(cv2.CAP_PROP_FPS), (int(input_movie.get(cv2.CAP_PROP_FRAME_WIDTH)), int(input_movie.get(cv2.CAP_PROP_FRAME_HEIGHT))))
for index in range(frame_count):
success, frame = input_movie.read()
if not success:
print("DEATH CAME TOO SOON! :'(")
exit(1)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
d = ImageDraw.Draw(image)
face_locations = face_recognition.face_locations(frame, number_of_times_to_upsample=UPSAMPLES, model="cnn")
face_landmarks = face_recognition.face_landmarks(frame, face_locations, model='small')
if len(face_locations) > 0:
encodings = face_recognition.face_encodings(frame, known_face_locations=face_locations, num_jitters=10)
if len(encodings) != len(face_locations):
print("That was unexpected.")
exit(4)
distances = []
for face_index in range(len(face_locations)):
distances.append(face_recognition.face_distance([search_encoding], encodings[face_index])[0])
best_index = np.argmin(distances)
if distances[best_index] < DETECTION_THRESH:
draw_image(replace_image, image, replace_landmarks, face_landmarks[best_index], SCALE_FACTOR)
print("I found {}/{} face(s) in frame {}/{}.".format((1 if distances[best_index] < DETECTION_THRESH else 0), len(face_locations), index, frame_count))
else:
print("I found 0 faces in frame {}/{}".format(index, frame_count))
frame = np.array(image)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
output_movie.write(frame)
input_movie.release()
output_movie.release()
audio_input = ffmpeg.input(INPUT_FILE)
video_input = ffmpeg.input('temp.mp4')
(
ffmpeg
.output(video_input.video, audio_input.audio, 'output.mp4', codec='copy')
.overwrite_output()
.run()
)
os.remove('temp.mp4')
|
#
# @lc app=leetcode id=389 lang=python3
#
# [389] Find the Difference
#
# @lc code=start
class Solution1:
'''using sort'''
def findTheDifference(self, s: str, t: str) -> str:
s = sorted(s)
t = sorted(t)
for i in range(min(len(s), len(t))):
if s[i] != t[i]:
return t[i]
return t[-1]
from collections import Counter
class Solution:
def findTheDifference(self, s: str, t: str) -> str:
counter_s = Counter(s)
counter_t = Counter(t)
for key, val in counter_t.items():
if counter_s[key] != val:
return key
# @lc code=end
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
from bs4 import BeautifulSoup
from translate import Translator
#gender=f 또는 gender=m 를 url에 포함시킨다 --> 각각 따로 분석하려함
#sellitem = 1을 url에 포함시킨다 --> 현재 판매중인 옷을 입은 사진만 보여줌
#.area=001,002,004,007,003를 url에 포함시킨다 --> 서울지역만 보여줌
#.&age=10,20,30를 ,url에 포함시킨다 --> 10~30대까지만 보여줌
#마지막 페이지 번호는 .totalPagingNum에서 가져온다
firstPage = 1
lastPage = 1
# 로그인 세션 만들어서, 이 안에서 코드 실행
with requests.session() as s:
musinsa_data_list = []
url = "https://store.musinsa.com/app/api/login/make_login"
data = {
"referer" : "https://www.musinsa.com/index.php",
"id" : "wkdalsgur85",
"pw" : "cnj140535",
}
response = s.post(url, data=data)
response.raise_for_status
# print(response.text)
r = requests.get('https://www.musinsa.com/index.php?m=street&_y=2018&gender=f&area=001,002,004,007,003')
html = r.text
# print(html)
soup = BeautifulSoup(html, 'html.parser')
# 마지막 페이지 번호를 구함
lastPage = soup.find('span',class_='totalPagingNum').get_text()
# print(lastPage) #lastPage는 str타입
# 바깥 페이지에서 모든 페이지에 하나하나 접근함(성별, 지역, 연령, 시기는 밑의 url에 포함)
for i in range(firstPage, int(lastPage) + 1):
r = requests.get('https://www.musinsa.com/index.php?m=street&_y=2018&gender=f&area=001,002,004,007,003&p='+ str(i))
html = r.text
soup = BeautifulSoup(html, 'html.parser')
# 각 아이템에 접근함
tmp_list = soup.findAll('a', class_='creplyCnt')
for item in tmp_list:
if 'href' in item.attrs:
uid = item.attrs['href']
uid = uid[-17:-8]
# print(uid)
# 각 아이템의 링크에 들어감
new_url = "https://www.musinsa.com/index.php?m=street&" + uid
r = s.post(new_url, data=data)
# print(r.text)
beautifulSoup = BeautifulSoup(r.text, 'html.parser')
# print(beautifulSoup)
# 한 아이템 내에서 아우터, 상의, 하의의 사진과 정보만 골라서 분류함
photo1 = []
photo2 = []
photo3 = []
num = 1
for tag in beautifulSoup.select('ul > li > div.itemImg > a > img'):
# print(num)
if num == 1:
raw_explanations = beautifulSoup.select('ul > li:nth-of-type(1) > div.itemImg > div > ul > li > a > span')
if any("아우터" in s or "상의" in s or '하의' in s for s in raw_explanations):
raw_photos = beautifulSoup.select('ul > li:nth-of-type(1) > div.itemImg > a > img')
photo1.append(raw_photos[0].get("src"))
for i in raw_explanations:
photo1.append(i.get_text())
# print(photo1)
elif num == 2:
raw_explanations = beautifulSoup.select('ul > li:nth-of-type(2) > div.itemImg > div > ul > li > a > span')
if any("아우터" in s or "상의" in s or '하의' in s for s in raw_explanations):
raw_photos = beautifulSoup.select('ul > li:nth-of-type(2) > div.itemImg > a > img')
photo2.append(raw_photos[0].get("src"))
for i in raw_explanations:
photo2.append(i.get_text())
# print(photo2)
elif num == 3:
raw_explanations = beautifulSoup.select('ul > li:nth-of-type(3) > div.itemImg > div > ul > li > a > span')
if any("아우터" in s or "상의" in s or '하의' in s for s in raw_explanations):
raw_photos = beautifulSoup.select('ul > li:nth-of-type(3) > div.itemImg > a > img')
photo2.append(raw_photos[0].get("src"))
for i in raw_explanations:
photo3.append(i.get_text())
# print(photo3)
num += 1
# 아이템중 아우터, 상의, 하의중 아무것도 없는 것은 제외함
if not photo1 and not photo2 and not photo3:
continue
# # 구글번역기로 영어로 번역
# # https://pypi.org/project/translate/
# # googletrans issue.......... 다른 라이브러리로 대체
# translator= Translator(to_lang="en", from_lang = "ko")
# # translation = translator.translate("펜")
# # print(translation)
# # photo1 영어로 번역
# num = 0
# tmp = []
# for name in photo1:
# if num > 0:
# tmp.append(translator.translate(name))
# else:
# num = 1
# del photo1[1:]
# photo1.extend(tmp)
# # print(photo1)
# #photo2 영어로 번역
# num = 0
# tmp = []
# for name in photo2:
# if num > 0:
# tmp.append(translator.translate(name))
# else:
# num = 1
# del photo2[1:]
# photo2.extend(tmp)
# # print(photo2)
# # photo3 영어로 번역
# num = 0
# tmp = []
# for name in photo3:
# if num > 0:
# tmp.append(translator.translate(name))
# else:
# num = 1
# del photo3[1:]
# photo3.extend(tmp)
# # print(photo3)
# 아이템의 정보
raw_list = beautifulSoup.select('table > tbody > tr > td > span')
info_list = []
for i in raw_list:
info_list.append(i.get_text())
# 예외 없애줌
if '2018 F/W 헤라 서울패션위크' in info_list:
info_list.pop(3)
elif '2018 서머 뮤직 페스티벌' in info_list:
info_list.pop(3)
elif "2018 MUSINSA MD'S PICK" in info_list:
info_list.pop(3)
elif '2018 F/W 하우스 오브 반스' in info_list:
info_list.pop(3)
elif '2018 신학기 스타일 가이드 북' in info_list:
info_list.pop(3)
elif '2018 스웨트 페스티벌' in info_list:
info_list.pop(3)
elif '2018 S/S 일본 트렌드 리포트' in info_list:
info_list.pop(3)
elif '2018 F/W 버쉬카 도쿄 리포트' in info_list:
info_list.pop(3)
elif '2018 아우터 페스티벌' in info_list:
info_list.pop(3)
elif '2019 S/S 헤라 서울패션위크' in info_list:
info_list.pop(3)
elif '2018 서머 뮤직 페스티벌' in info_list:
info_list.pop(3)
# print(info_list)
date = info_list[1]
style = info_list[5]
views_like = info_list[6]
# 각각 따로 모은 데이터들을 합치기
if photo1:
photo1.append(date)
photo1.append(style)
photo1.append(views_like)
musinsa_data_list.append(photo1)
# print(photo1)
if photo2:
photo2.append(date)
photo2.append(style)
photo2.append(views_like)
musinsa_data_list.append(photo2)
# print(photo2)
if photo3:
photo3.append(date)
photo3.append(style)
photo3.append(views_like)
musinsa_data_list.append(photo3)
# print(photo3)
# print(musinsa_data_list)
# print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
# print('a')
for s in musinsa_data_list:
if "//image.musinsa.com" not in s[0]:
musinsa_data_list.remove(s)
for s in musinsa_data_list:
if "2018-" in s[1]:
musinsa_data_list.remove(s)
for s in musinsa_data_list:
for k in s[1:]:
if "//image.musinsa.com" in k:
musinsa_data_list.remove(s)
print(musinsa_data_list)
# In[37]:
"""import pandas as pd
data=pd.read_csv('0513data.csv',encoding='cp949')
data.head()"""
# In[162]:
#수집한 데이터를 데이터프레임에 저장
data=pd.DataFrame(musinsa_data_list)
# In[163]:
data.head()
# In[5]:
data.shape
# In[38]:
#수집한 데이터들의 컬럼을 임의로 수정
data.columns=['url','tag1','tag2','tag3','tag4','tag5','tag6','tag7']
# In[39]:
del data['url']
# ## 변수 전처리
# - 날짜, 신체 치수 제거
# - 영어 변환을 위한 단어 교정
# - 특수한 메이커 제거
# In[40]:
#null값을 0으로 수정
col = ['tag1','tag2','tag3','tag4','tag5','tag6','tag7']
for i in col:
data.loc[data[i].isnull(),i] = 0
# In[41]:
data_copy=data.copy()
# - "/"으로 나뉘어진 패션 카테고리 분리
# In[45]:
import numpy as np
data_copy['tag5_1']= 0
data_copy['tag5_2']= 0
data_copy['tag5_1']=data_copy.loc[data_copy['tag5'].isin(['유니크/키치','섹시/페미닌','스트리트/힙합','심플/캐주얼','워크/밀리터리']),'tag5'].apply(lambda x: x.split('/')[0])
data_copy['tag5_2']=data_copy.loc[data_copy['tag5'].isin(['유니크/키치','섹시/페미닌','스트리트/힙합','심플/캐주얼','워크/밀리터리']),'tag5'].apply(lambda x: x.split('/')[1])
# In[46]:
import numpy as np
data_copy['tag6_1']= 0
data_copy['tag6_2']= 0
data_copy['tag6_1']=data_copy.loc[data_copy['tag6'].isin(['유니크/키치','섹시/페미닌','스트리트/힙합','심플/캐주얼','워크/밀리터리','포멀/클래식','아티스트/뮤지션','전문직/프리랜서']),'tag6'].apply(lambda x: x.split('/')[0])
data_copy['tag6_2']=data_copy.loc[data_copy['tag6'].isin(['유니크/키치','섹시/페미닌','스트리트/힙합','심플/캐주얼','워크/밀리터리','포멀/클래식','아티스트/뮤지션','전문직/프리랜서']),'tag6'].apply(lambda x: x.split('/')[1])
# In[47]:
import numpy as np
data_copy['tag7_1']= 0
data_copy['tag7_2']= 0
data_copy['tag7_1']=data_copy.loc[data_copy['tag7'].isin(['유니크/키치','섹시/페미닌','스트리트/힙합','심플/캐주얼','포멀/클래식']),'tag7'].apply(lambda x: x.split('/')[0])
data_copy['tag7_2']=data_copy.loc[data_copy['tag7'].isin(['유니크/키치','섹시/페미닌','스트리트/힙합','심플/캐주얼','포멀/클래식']),'tag7'].apply(lambda x: x.split('/')[1])
# ## null값 제거
# In[62]:
data_copy=data_copy.fillna('null')
# In[67]:
data_copy=data_copy.replace(0,'null')
# In[164]:
data.to_csv('0513_data_original.csv',mode='w',index=False,encoding='cp949')
# ## 한글을 영어로 변환하는 과정
# In[64]:
#!pip install googletrans
from googletrans import Translator
#translator = Translator()
# In[138]:
col2 = ['tag1','tag2','tag3','tag4','tag5_1','tag5_2','tag6_1','tag6_2','tag7_1','tag7_2']
for i in col2:
translator = Translator()
data_copy['en_tag1'] = data_copy['tag1'].apply(translator.translate, src='ko', dest='en').apply(getattr, args=('text',))
# In[70]:
# 각 컬럼에 있던 태그들을 하나의 컬럼에 리스트로 저장
data_copy['total_tags']=data_copy.apply(lambda x: [x['tag1'],x['tag2'],x['tag3'],x['tag4'],x['tag5_1'],x['tag5_2'],x['tag6_1'],x['tag6_2'],x['tag7_1'],x['tag7_2']],axis=1)
# In[58]:
# 저장한 컬럼들에 있는 숫자 값 제거
import re
def cleannum(readData):
text = re.sub('[0-9]','',readData)
return text
data_copy['total_tags']=data_copy['total_tags'].apply(lambda x: cleannum(str(x)))
# In[73]:
data_copy['total_tags']=data_copy['total_tags'].apply(lambda x: [i for i in x if "null" not in i] )
# ## 공백 제거
# In[55]:
def cleannone(readData):
text = re.sub('nan','',readData)
return text
def cleanspace(readData):
text = re.sub(' ','',readData)
return text
data_copy['total_tags']=data_copy['total_tags'].apply(lambda x: cleannone(str(x)))
# ## 단어들의 벡터화
# In[74]:
from gensim.models.word2vec import Word2Vec
import ast
import pandas as pd
# In[151]:
sentence = data_copy['total_tags'].tolist()
model = Word2Vec(sentence, min_count=1, iter=20, size=300, sg=1)
model.init_sims(replace=True)
print("섹시과 관련된 키워드 : ", model.most_similar("섹시"))
print("스트리트와 관련된 키워드 : ", model.most_similar("스트리트"))
print("심플와 관련된 키워드 : ", model.most_similar("심플"))
# ## 학습된 모델 사용시 벡터의 평균 점수 사용
# •밑의 함수에 대한 요약 해석
#
# : 한 사진마다 태그가 없으면 벡터화할 단어가 없으므로 0 값
#
# : 단어가 있다면 태그 하나당 수치화(벡터화)의 수준을 300개로 할당
#
# -- ex) crop에 대한 점수(벡터)- 300개
#
# •결론: 각 단어마다 300개의 수치화된 점수로 구성된다.
#
# In[152]:
import numpy as np
def get_average_word2vec(tokens_list, vector, generate_missing=False, k=300):
if len(tokens_list)<1:
return np.zeros(k)
if generate_missing:
vectorized = [vector[word] if word in vector else np.random.rand(k) for word in tokens_list]
else:
vectorized = [vector[word] if word in vector else np.zeros(k) for word in tokens_list]
length = len(vectorized)
summed = np.sum(vectorized, axis=0)
averaged = np.divide(summed, length)
return averaged
def get_word2vec_embeddings(vectors, clean_comments, generate_missing=False):
embeddings = data_copy['total_tags'].apply(lambda x: get_average_word2vec(x, vectors,
generate_missing=generate_missing))
return list(embeddings)
# In[153]:
training_embeddings = get_word2vec_embeddings(model, data_copy, generate_missing=True)
# In[90]:
# 모든 벡터에 대한 단어 저장
vocabs = word_vectors.vocab.keys()
# 우리의 태그들에 대한 벡터
word_vectors_list= [word_vectors[v] for v in vocabs]
# In[157]:
from sklearn.decomposition import PCA
from matplotlib import pyplot
# 태그들을 벡터화 시킨 값을 x에 저장
# 2개의 성분으로 벡터들을 축소
pca = PCA(n_components=2)
X = pca.fit_transform(training_embeddings)
# kmeans 클러스터링을 이용해 군집
from sklearn import cluster
from sklearn import metrics
NUM_CLUSTERS=5
kmeans = cluster.KMeans(n_clusters=NUM_CLUSTERS)
kmeans.fit(X)
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
final=pd.DataFrame(X)
final.columns=['x1','x2']
final['label']=labels
final.head()
# In[158]:
final.label.value_counts()
# In[159]:
final['tags']=data_copy['total_tags']
final.head()
final.shape
# In[160]:
final.head()
# In[166]:
final.to_csv('clustering_수정.csv',mode='w',index=False,encoding='cp949')
# In[167]:
final.shape
# In[ ]:
|
import os
def check_pid(pid):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
|
__author__ = 'Administrator'
# coding = utf-8
import copy
import datetime
import matplotlib.pyplot as plt
import numpy as np
import os
import socket
import sys
import threading
import time
from ctypes import*
from datetime import datetime
from math import e
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from norlib_python.Thread import *
from Api.DATASPAN import *
class MdThread(threading.Thread):
def __init__(self,ip,port):
threading.Thread.__init__(self)
self.__ip = ip
self.__port = port
self.dtData = {}
self.__ontickcbs = []
def run(self):
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
sock.bind((self.__ip,self.__port))
print("Starting Receiving......")
while True:
try:
(data,addr) = sock.recvfrom(1024)
t = copy.deepcopy( cast( data, POINTER(ThostFtdcDepthMarketDataField)).contents)
#self.dtData.__setitem__(t.InstrumentID, t)
for cb in self.__ontickcbs:
try:cb(t)
finally:None
except Exception,ex:
print ex
break
def RegTick(self,callback):
self.__ontickcbs.append(callback)
def GetKLineDatas(self, id, dataspan, start, next):
pass
class View:
def __init__(self, md, id):
self.__md = md
self.__id = id
self.__datas = []
self.__times = []
self.__ShowData()
def Open(self):
self.__md.RegTick(self.__OnTick)
def __OnTick(self, tick):
if tick.InstrumentID == 'IF1403':
self.__datas.append(tick.LastPrice)
strDate = tick.TradingDay
strTime = tick.UpdateTime
ms = tick.UpdateMillisec
dtime = datetime.strptime(" ".join([strDate, strTime, str(ms)]), "%Y%m%d %H:%M:%S %f")
self.__times.append(dtime)
pass
@SetInterval(2)
def __ShowData(self):
if not self.__datas:
return
if len(self.__datas)>0:
try:
#p1 = plt.subplot(110)
plt.plot(self.__times, self.__datas)
plt.draw()
plt.show()
except a,b:
print(a)
print(b)
#print self.__datas[-1].LastPrice
#CTP Depth Market Data
class ThostFtdcDepthMarketDataField(Structure):
_fields_ = [
("TradingDay",c_char*9),
("InstrumentID",c_char*31),
("ExchangeID",c_char*9),
("ExchangeInstID",c_char*31),
("LastPrice",c_double),
("PreSettlementPrice",c_double),
("PreClosePrice",c_double),
("PreOpenInterest",c_double),
("OpenPrice",c_double),
("HighestPrice",c_double),
("LowestPrice",c_double),
("Volume",c_int),
("Turnover",c_double),
("OpenInterest",c_double),
("ClosePrice",c_double),
("SettlementPrice",c_double),
("UpperLimitPrice",c_double),
("LowerLimitPrice",c_double),
("PreDelta",c_double),
("CurrDelta",c_double),
("UpdateTime",c_char*9),
("UpdateMillisec",c_int),
("BidPrice1",c_double),
("BidVolume1",c_int),
("AskPrice1",c_double),
("AskVolume1",c_int),
("BidPrice2",c_double),
("BidVolume2",c_int),
("AskPrice2",c_double),
("AskVolume2",c_int),
("BidPrice3",c_double),
("BidVolume3",c_int),
("AskPrice3",c_double),
("AskVolume3",c_int),
("BidPrice4",c_double),
("BidVolume4",c_int),
("AskPrice4",c_double),
("AskVolume4",c_int),
("BidPrice5",c_double),
("BidVolume5",c_int),
("AskPrice5",c_double),
("AskVolume5",c_int),
("AveragePrice",c_double),
("ActionDay",c_char*9),
]
if __name__ == "__main__":
md = MdThread("127.0.0.1",12345)
md.start()
#s = datetime.datetime.now()
#n = s.AddDays(1)
#md.GetKLineDatas("IF1403" ,DataSpan.min1, s , n)
view = View(md,"IF1403")
#time.sleep(3)
view.Open()
print("opened")
#plt.show()
|
from gym.envs.registration import register
#---------------------------------------------#
#Dumb Loop - Learning a sequence
register(
id='DumbLoop-v0',
entry_point='Games.Dumb_Loop.loop_perimeter:LoopEnv',
max_episode_steps = 200,
)
register(
id='DumbLoop-v1',
entry_point='Games.Dumb_Loop.loop_off_perimeter:LoopEnv',
max_episode_steps = 200,
)
register(
id='DumbLoop-v2',
entry_point='Games.Dumb_Loop.loop_area:LoopEnv',
max_episode_steps = 200,
)
#---------------------------------------------#
#ODE Reactions
register(
id='Reaction-v0',
entry_point='Games.ODE_Reactions.Reaction_1:Reaction_Env',
max_episode_steps=100,
)
register(
id='Reaction-v1',
entry_point='Games.ODE_Reactions.Reaction_2:Reaction_Env',
max_episode_steps=100,
)
register(
id='Reaction-v2',
entry_point='Games.ODE_Reactions.Reaction_3:Reaction_Env',
max_episode_steps=100,
)
register(
id='Reaction-v3',
entry_point='Games.ODE_Reactions.Reaction_4:Reaction_Env',
max_episode_steps=100,
)
#---------------------------------------------#
#Water Heating
#up or down increase
register(
id='WaterHeater-v0',
entry_point='Games.WaterHeater.heat_game_v0:HeatEnv',
max_episode_steps = 100,
)
#discrete 'clicks' on a dial
register(
id='WaterHeater-v1',
entry_point='Games.WaterHeater.heat_game_v1:HeatEnv',
max_episode_steps = 100,
)
#continuous dial
register(
id='WaterHeater-v2',
entry_point='Games.WaterHeater.heat_game_v2:HeatEnv',
max_episode_steps = 100,
)
#---------------------------------------------#
#Function Learning - Unable to achieve anything with these
#test and train as one action
register(
id = 'DropOut_Sin-v0',
entry_point = 'Games.Dropout_FunctionLearning.SinGame_v0:SinEnv',
max_episode_steps = 100,
)
#separated actions
register(
id = 'DropOut_Sin-v1',
entry_point = 'Games.Dropout_FunctionLearning.SinGame_v1:SinEnv2',
max_episode_steps = 100,
)
#different example - remains unfinished, moved on to Gaussian Processes
register(
id = 'DropOut_Water-v0',
entry_point = 'Games.Dropout_FunctionLearning.WaterGame:BoilEnv',
max_episode_steps = 100,
)
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import psana
from time import time
from Detector.AreaDetector import AreaDetector
from Detector.GlobalUtils import print_ndarr
import numpy as np
##-----------------------------
ntest = int(sys.argv[1]) if len(sys.argv)>1 else 1
print('Test # %d' % ntest)
##-----------------------------
#dsname, src = 'exp=amob5114:run=403:idx', psana.Source('DetInfo(Camp.0:pnCCD.0)')
dsname, src = 'exp=amo86615:run=159:idx', 'Camp.0:pnCCD.1'
print('Example for\n dataset: %s\n source : %s' % (dsname, src))
# Non-standard calib directory
#psana.setOption('psana.calib-dir', './calib')
#psana.setOption('psana.calib-dir', './empty/calib')
#tsec, tnsec, fid = 1434301977, 514786085, 44835
#et = psana.EventTime(int((tsec<<32)|tnsec),fid)
ds = psana.DataSource(dsname)
tsecnsec, fid = 6178962762198708138, 0xf762
et = psana.EventTime(int(tsecnsec),fid)
run = next(ds.runs())
evt = run.event(et)
#evt = ds.events().next()
env = ds.env()
for key in evt.keys() : print(key)
##-----------------------------
det = AreaDetector(src, env, pbits=0, iface='C')
ins = det.instrument()
print(80*'_', '\nInstrument: ', ins)
#det.set_print_bits(511);
#det.set_def_value(-5.);
#det.set_mode(1);
#det.set_do_offset(True); # works for ex. Opal1000
det.print_attributes()
shape_nda = det.shape(evt)
print_ndarr(shape_nda, 'shape')
print('size of ndarray: %d' % det.size(evt))
print('ndim of ndarray: %d' % det.ndim(evt))
peds = det.pedestals(evt)
print_ndarr(peds, 'pedestals')
t0_sec = time()
nda_raw = det.raw(evt)
print('%s\n **** consumed time to get raw data = %f sec' % (80*'_', time()-t0_sec))
print_ndarr(nda_raw, 'raw data')
nda_cdata = det.calib(evt)
print_ndarr(nda_cdata, 'calibrated data')
fname = 'nda-%s-%s-Camp.0:pnCCD.1.txt' % (env.experiment(), evt.run())
print('Save ndarray in file %s' % fname)
nda_cdata.shape = (512*4,512)
np.savetxt(fname, nda_cdata)
img = det.image(evt)
print_ndarr(img, 'img')
##-----------------------------
if img is None :
print('Image is not available')
sys.exit('FURTHER TEST IS TERMINATED')
import pyimgalgos.GlobalGraphics as gg
ave, rms = img.mean(), img.std()
gg.plotImageLarge(img, amp_range=(ave-1*rms, ave+2*rms))
gg.show()
##-----------------------------
sys.exit(0)
##-----------------------------
|
from pyrogram import (
Client,
Filters,
Message,
ReplyKeyboardRemove,
InlineKeyboardMarkup,
InlineKeyboardButton
)
@Client.on_message(Filters.create(lambda _, m: m.text == 'My bots / channels / groups') | Filters.command('my'))
async def my_tg_objects_handler(_client: Client, message: Message):
msg = await message.reply("Editing the buttons...", reply_markup=ReplyKeyboardRemove())
await msg.delete()
await message.reply("What do you want to see?",
reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton('Bots', 's|bot')],
[InlineKeyboardButton('Channels', 's|channel')],
[InlineKeyboardButton('Groups', 's|group')],
[InlineKeyboardButton('All', 's|all')]
])
)
|
import unittest2 as unittest
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import shorturl.parts
import shorturl.filestore
import shorturl.dirstore
dir_path = os.path.dirname(os.path.realpath(__file__))
class UrlPartTest(unittest.TestCase):
def test(self):
part = shorturl.parts.generate_url_part()
self.assertEqual(len(part), 6)
class UrlSaveTest(unittest.TestCase):
def test(self):
part = shorturl.parts.generate_url_part()
url = 'http://google.com'
path = '{0}/urls.csv'.format(dir_path)
shorturl.filestore.save_url(path, part, url)
num_lines = sum(1 for line in open(path))
self.assertEqual(num_lines, 1)
os.remove(path)
class UrlLoadTest(unittest.TestCase):
def test(self):
part = shorturl.parts.generate_url_part()
url = 'http://google.com'
path = '{0}/urls.csv'.format(dir_path)
shorturl.filestore.save_url(path, part, url)
num_lines = sum(1 for line in open(path))
self.assertEqual(num_lines, 1)
url = shorturl.filestore.load_url(path, part)
self.assertEqual(url, 'http://google.com')
os.remove(path)
class DirUrlSaveTest(unittest.TestCase):
def test(self):
part = shorturl.parts.generate_url_part()
url = 'http://google.com'
path = dir_path
shorturl.dirstore.save_url(path, part, url)
exists = os.path.exists(os.path.join(path, part))
os.remove(os.path.join(path, part))
self.assertEqual(True, exists)
class DirUrlLoadTest(unittest.TestCase):
def test(self):
part = shorturl.parts.generate_url_part()
url = 'http://google.com'
path = dir_path
shorturl.dirstore.save_url(path, part, url)
os.path.exists(os.path.join(path, part))
url = shorturl.dirstore.load_url(path, part)
os.remove(os.path.join(path, part))
self.assertEqual(url, 'http://google.com', part) |
import db
def normalize(hashtag):
""" lowercase """
hashtag = hashtag.lower()
return hashtag
def map():
""" create hashtag -> (user_id, tweet_id) list map
frequency of hashtag = len(hashtag_map[hashtag])
rank all hashtags = sorted(hashtag_map.keys(), key = lambda hashtag: len(hashtag_map[hashtag]), reverse = True) """
table = db.execute(db.mk_connection(), 'select user_id, tweet_id, hashtag from tweets_hashtags')
print(len(table), 'Table Entries')
hashtag_map = {}
for user_id, tweet_id, hashtag in table:
hashtag = normalize(hashtag)
if hashtag not in hashtag_map:
hashtag_map[hashtag] = []
hashtag_map[hashtag].append((user_id, tweet_id))
return hashtag_map
|
#ΔΗΜΙΟΥΡΓΙΑ ΛΙΣΤΑΣ ΜΕ ΤΟΥΣ ΣΥΝΔΥΑΣΜΟΥΣ
def toString(List):
return ''.join(List)
# ΣΥΝΑΡΤΗΣΗ ΠΟΥ ΠΑΡΑΓΕΙ ΤΟΥΣ ΣΥΝΔΥΑΣΜΟΥΣ ΑΠΟ ΤΗ ΛΕΞΗ ΠΟΥ ΔΙΝΩ
# Οι 3 παράμετροι μου είναι:
# 1. Ενα string
# 2. Αρχη του string
# 3. Τέλος του string.
def permute_fun(a, s, e):
if s == e:
print(toString(a) )
else:
for i in range(s, e + 1):
a[s], a[i] = a[i], a[s]
permute_fun(a, s + 1, e)
a[s], a[i] = a[i], a[s] # backtrack
# To πρόγραμμα καλει την συνάρτηση permute για τη λέξη
# που θα δώσω από την κονσόλα
my_start_word = input('Δώσε μία λέξη: ')
n = len(my_start_word)
a = list(my_start_word)
permute_fun(a, 0, n-1)
# This code is contributed by Bhavya Jain |
"""
通过输入线上的获取第三方验证码图片地址,将其保存到本地image/origin
"""
import json
import requests
import os
import time
import base64
def main():
with open("conf/app_config.json", "r") as f:
app_conf = json.load(f)
# 图片路径
origin_dir = app_conf["origin_image_dir"]
headers = {
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/65.0.3325.146 Safari/537.36",
}
for index in range(1):
# 请求
while True:
try:
response = requests.request("GET", "http://zxgk.court.gov.cn/shixin/captchaNew.do?captchaId"
"=U3qUb6wgg93Hcs9TDXm6CFPTc8uL9BM0&random=0.6914391412872873",
headers=headers, timeout=6)
if response.text:
break
else:
print("retry, response.text is empty")
except Exception as ee:
print(ee)
# 识别对应的验证码
headers = {
'Content-Type': "application/json",
}
base64_data = base64.b64encode(response.content).decode()
params_json = json.dumps({'channelId': '1', 'imageBase64': base64_data})
res = requests.post("", data=params_json, headers=headers)
print(res.text)
# 保存文件
data = json.loads(res.text)
img_name = "{}_{}.{}".format(data['result'], str(time.time()).replace(".", ""), "jpg")
path = os.path.join(origin_dir, img_name)
with open(path, "wb") as f:
f.write(response.content)
print("============== end ==============")
if __name__ == '__main__':
main()
|
# This script is used for implement thrshlhold probing of matched filter response
# generated by mfr.py
# The input image is matched filter reponse with Gaussian filter.
# The output file is the binary image after thresholded by probes.
import numpy as np
import cv2
import sys
import timeit
import copy as cp
import os
def inbounds(shape, indices):
'''
Test if the given coordinates inside the given image.
The first input parameter is the shape of image (height, weight) and the
second parameter is the coordinates to be tested (y, x)
The function returns True if the coordinates inside the image and vice versa.
'''
assert len(shape) == len(indices)
for i, ind in enumerate(indices):
if ind < 0 or ind >= shape[i]:
return False
return True
def setlable(img, labimg, x, y, label):
'''
This fucntion is used for label image.
The first two input images are the image to be labeled and an output image with
labeled region. "x", "y" are the coordinate to be tested, "label" is the ID
of a region.
'''
if img[y][x] and not labimg[y][x]:
labimg[y][x] = label
if inbounds(img.shape, (y, x+1)):
setlable(img, labimg, x+1, y,label)
if inbounds(img.shape, (y+1, x)):
setlable(img, labimg, x, y+1,label)
if inbounds(img.shape, (y, x-1)):
setlable(img, labimg, x-1, y,label)
if inbounds(img.shape, (y-1, x)):
setlable(img, labimg, x, y-1,label)
if inbounds(img.shape, (y+1, x+1)):
setlable(img, labimg, x+1, y+1,label)
if inbounds(img.shape, (y+1, x-1)):
setlable(img, labimg, x-1, y+1,label)
if inbounds(img.shape, (y-1, x+1)):
setlable(img, labimg, x+1, y-1,label)
if inbounds(img.shape, (y-1, x-1)):
setlable(img, labimg, x-1, y-1,label)
def labelvessel(img, labimg, point, thresh, size, listcd):
'''
This fucntion is used for generating a piece with paint-fill technique.
The first two input images are the image to be labeled and an output image with
labeled region. "point" is the coordinate to be tested, the "thresh" value the
threshld value of paint-fill, size is used to limit maximum size of a region and
"listcd" is the list of coordinates of the pixels that are classified as vessel
in the piece.
'''
if img[point[1]][point[0]] >= thresh and not labimg[point[1]][point[0]] and thresh:
# print "img value: ", img[point[1]][point[0]], "thresh: ", thresh
labimg[point[1]][point[0]] = 1
x = point[0]
y = point[1]
listcd.append([x, y])
size += 1
try:
if size > 500:
return False
if inbounds(img.shape, (y, x+1)):
labelvessel(img, labimg, (x+1, y),thresh, size, listcd)
if inbounds(img.shape, (y+1, x)):
labelvessel(img, labimg, (x, y+1),thresh, size, listcd)
if inbounds(img.shape, (y, x-1)):
labelvessel(img, labimg, (x-1, y),thresh, size, listcd)
if inbounds(img.shape, (y-1, x)):
labelvessel(img, labimg, (x, y-1),thresh, size, listcd)
if inbounds(img.shape, (y+1, x+1)):
labelvessel(img, labimg, (x+1, y+1),thresh, size, listcd)
if inbounds(img.shape, (y+1, x-1)):
labelvessel(img, labimg, (x-1, y+1),thresh, size, listcd)
if inbounds(img.shape, (y-1, x-1)):
labelvessel(img, labimg, (x-1, y-1),thresh, size, listcd)
if inbounds(img.shape, (y-1, x+1)):
labelvessel(img, labimg, (x+1, y-1),thresh, size, listcd)
except Exception, e:
print "error: ", Exception, " in paint_fill..."
class Probe:
'''
The class Probe is to implement probes in the region of interest.
To inicialize the probe, we need histogram threshold value, the minimum
and maximum size of the generated region, the maximum value of fringing
and maximum value of branch (Ttree).
The init_queue function is to generate the inicial queue of probes with
given image.
The paint_fill function is to implement region growing with given threshld
value.
The test function is to test the given region with 5 different tests.
The label fucntion is to mark the given piece into vessel.
The addpoints funciton is to add new probes to the end of queue.
The deletepoint funciton is to delete the probes that locate the previous
veesel-classified pixel.
'''
def __init__(self, thresh, smin, smax, fringe, tree):
self.th = thresh
self.smin = smin
self.smax = smax
self.fg = fringe
self.tree = tree
def init_queue(self, mfr0):
# generate the histogram of MFR
originalimg = cp.copy(mfr0)
mfr = cp.copy(mfr0)
h, w = mfr.shape
hist,bins = np.histogram(mfr.ravel(),256,[0,256])
for i in range(len(hist)):
if hist[i] > self.th:
hist[i] = 0
h, w = mfr.shape
for y in range(h):
for x in range(w):
if not hist[mfr[y][x]]:
mfr[y][x] = 0
else:
mfr[y][x] = 1
threshimg = cp.copy(mfr)
# optimal option
# ret,mfr = cv2.threshold(mfr, 10, 255, cv2.THRESH_BINARY)
# thinning the threshold image
thmfr = thinning(mfr)
thinningimage = cp.copy(thmfr)
# erase branchpoints
for y in range(1,h-1,1):
for x in range(1,w-1,1):
if x == 0 or y == 0 or x == w-1 or y == h-1:
continue
p2 = int(thmfr[y-1, x])
p3 = int(thmfr[y-1, x+1])
p4 = int(thmfr[y, x+1])
p5 = int(thmfr[y+1, x+1])
p6 = int(thmfr[y+1, x])
p7 = int(thmfr[y+1, x-1])
p8 = int(thmfr[y, x-1])
p9 = int(thmfr[y-1,x-1])
num = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9
if num >= 3:
thmfr[y, x] = 0
nonbranch = cp.copy(thmfr)
# discard segments < 10 pixels
lab = 1
label = np.zeros(thmfr.shape)
for y in range(h):
for x in range(w):
if not label[y][x] and thmfr[y][x]:
setlable(thmfr, label, x, y, lab)
lab += 1
num = np.zeros(lab)
for y in range(h):
for x in range(w):
num[label[y][x]-1] += 1
for y in range(h):
for x in range(w):
if num[label[y][x]-1] <= 10:
thmfr[y][x] = 0
remove = cp.copy(thmfr)
# return initialized probe queue
# find endpoints for queue
queue = []
for y in range(1,h-1,1):
for x in range(1,w-1,1):
if x == 0 or y == 0 or x == w-1 or y == h-1:
continue
p2 = int(thmfr[y-1, x])
p3 = int(thmfr[y-1, x+1])
p4 = int(thmfr[y, x+1])
p5 = int(thmfr[y+1, x+1])
p6 = int(thmfr[y+1, x])
p7 = int(thmfr[y+1, x-1])
p8 = int(thmfr[y, x-1])
p9 = int(thmfr[y-1,x-1])
num = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9
if num == 1:
queue.append([x, y])
pointimg = cp.copy(thmfr)
return queue
def paint_fill(self, img, labelimg, p, T):
size = 0
listcd = []
labelvessel(img, labelimg, p, T, size, listcd)
return (np.count_nonzero(labelimg), labelimg, listcd)
def tests(self, size, piece, T, vessel, listcd):
if size > 30:
print "--test 0 pass--"
# first, the size must less than smax
if size > self.smax:
print "--test 1 false--"
return False
# second, the threshold must be positive
if T <= 1:
print "--test 2 false--"
return False
# third, the piece cannot touch the vessel-classied pixel
logpiece = piece > 0
logvessel = vessel > 0
result = logpiece & logvessel
if result.sum() > 0:
print "--test 3 false--"
return False
# fourth, border-pixels-touching-another-piece / total-pixel-in-piece
h, w = piece.shape[:2]
border = 0
for x, y in listcd:
if x == 0 or y == 0 or x == w-1 or y == h-1:
continue
p2 = int(piece[y-1, x])
p3 = int(piece[y-1, x+1])
p4 = int(piece[y, x+1])
p5 = int(piece[y+1, x+1])
p6 = int(piece[y+1, x])
p7 = int(piece[y+1, x-1])
p8 = int(piece[y, x-1])
p9 = int(piece[y-1,x-1])
num = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9
vp2 = int(vessel[y-1, x])
vp3 = int(vessel[y-1, x+1])
vp4 = int(vessel[y, x+1])
vp5 = int(vessel[y+1, x+1])
vp6 = int(vessel[y+1, x])
vp7 = int(vessel[y+1, x-1])
vp8 = int(vessel[y, x-1])
vp9 = int(vessel[y-1,x-1])
touch = vp2 + vp3 + vp4 + vp5 + vp6 + vp7 + vp8 + vp9
if num != 8 and touch:
border += 1
if (border / logpiece.sum()) > self.fg:
print "--test 4 false--"
return False
# fifth, total-pixel-in-piece / branches-in-piece
listcd.sort()
temppiece, indexskeleton = indirectindexing(listcd, piece)
branch = 0
for x, y in indexskeleton:
if x == 0 or y == 0 or x == w-1 or y == h-1:
continue
p2 = int(temppiece[y-1, x])
p3 = int(temppiece[y-1, x+1])
p4 = int(temppiece[y, x+1])
p5 = int(temppiece[y+1, x+1])
p6 = int(temppiece[y+1, x])
p7 = int(temppiece[y+1, x-1])
p8 = int(temppiece[y, x-1])
p9 = int(temppiece[y-1,x-1])
num = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9
if num >= 3:
branch += 1
if (logpiece.sum() / branch) < self.tree:
print "--test 5 false--"
return False
print "--tests pass!--"
return True
else:
print "--test 0 false--"
# second, the threshold must be positive
if T <= 1:
print "--test 2 false--"
return False
# third, the piece cannot touch the vessel-classied pixel
logpiece = piece > 0
logvessel = vessel > 0
result = logpiece & logvessel
if result.sum() > 0:
print "--test 3 false--"
return False
return True
def label(self, vessel, tempvessel):
return (vessel | tempvessel)
def addpoints(self, queue, vesselpiece, vessel, listcd):
tempvessel, indexskeleton = indirectindexing(listcd, vesselpiece)
h, w = piece.shape[:2]
for x, y in indexskeleton:
if x == 0 or y == 0 or x == w-1 or y == h-1:
continue
p2 = int(tempvessel[y-1, x])
p3 = int(tempvessel[y-1, x+1])
p4 = int(tempvessel[y, x+1])
p5 = int(tempvessel[y+1, x+1])
p6 = int(tempvessel[y+1, x])
p7 = int(tempvessel[y+1, x-1])
p8 = int(tempvessel[y, x-1])
p9 = int(tempvessel[y-1,x-1])
num = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9
if num == 1:
point = [x, y]
if not checkidentical(queue, point, vessel):
queue.append(point)
return queue
def deletepoint(self, queue, vessel, num):
que = cp.copy(queue)
count = 0
for j in range(num, len(queue), 1):
p = [0, 0]
p[1], p[0] = queue[j][1], queue[j][0]
num = vessel[p[1]][p[0]] + vessel[p[1]+1][p[0]] + vessel[p[1]][p[0]+1] +\
vessel[p[1]][p[0]-1] + vessel[p[1]-1][p[0]]
if num > 2:
que.pop(j-count)
count += 1
return que
def checkidentical(l, point, vessel):
'''
This function is used for check if the given two points are same.
'''
for i in l:
if i == point:
return True
return False
def indirectindexing(listcd, img):
'''
This function used indrect index approach to thin the given piece.
'''
prev = np.zeros_like(img)
diff = np.ones_like(img)
indexskeleton = []
while cv2.countNonZero(diff) > 15:
print " find skeleton using indirect image indexing..."
img, indexskeleton = indirectIteration(listcd, img, indexskeleton, 0)
img, indexskeleton = indirectIteration(listcd, img, indexskeleton, 1)
diff = cv2.absdiff(img, prev)
prev = cp.copy(img)
return img, indexskeleton
def indirectIteration(listcd, im, indexskeleton, iter):
'''
This function is the interation of indirectindexing.
'''
h, w = im.shape[:2]
marker = np.ones(im.shape)
for x, y in listcd:
if x == 0 or y == 0 or x == w-1 or y == h-1:
continue
p2 = int(im[y-1, x])
p3 = int(im[y-1, x+1])
p4 = int(im[y, x+1])
p5 = int(im[y+1, x+1])
p6 = int(im[y+1, x])
p7 = int(im[y+1, x-1])
p8 = int(im[y, x-1])
p9 = int(im[y-1,x-1])
A = (p2 == 0 and p3 == 1) + (p3 == 0 and p4 == 1) + \
(p4 == 0 and p5 == 1) + (p5 == 0 and p6 == 1) + \
(p6 == 0 and p7 == 1) + (p7 == 0 and p8 == 1) + \
(p8 == 0 and p9 == 1) + (p9 == 0 and p2 == 1)
B = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9
if iter == 0:
m1 = p2 * p4 * p6
m2 = p4 * p6 * p8
else:
m1 = p2 * p4 * p8
m2 = p2 * p6 * p8
if A == 1 and (B >= 2 and B <= 6) and m1 == 0 and m2 == 0:
marker[y,x] = 0
for x, y in listcd:
if im[y, x] and marker[y, x]:
im[y, x] = 1
indexskeleton.append([x, y])
else:
im[y, x] = 0
return im, indexskeleton
def thinning(img):
'''
This function is to thin the image to generate initical queue of probes.
'''
prev = np.zeros_like(img)
diff = np.ones_like(img)
while cv2.countNonZero(diff) > 15:
print " thinning..."
img = thinningIteration(img, 0)
img = thinningIteration(img, 1)
diff = cv2.absdiff(img, prev)
prev = cp.copy(img)
return img
def thinningIteration(im, iter):
'''
This function is the interation of the thinning funciton.
'''
h, w = im.shape[:2]
marker = np.ones(im.shape)
for y in range(1,h-1,1):
for x in range(1,w-1,1):
if x == 0 or y == 0 or x == w-1 or y == h-1:
continue
p2 = int(im[y-1, x])
p3 = int(im[y-1, x+1])
p4 = int(im[y, x+1])
p5 = int(im[y+1, x+1])
p6 = int(im[y+1, x])
p7 = int(im[y+1, x-1])
p8 = int(im[y, x-1])
p9 = int(im[y-1,x-1])
A = (p2 == 0 and p3 == 1) + (p3 == 0 and p4 == 1) + \
(p4 == 0 and p5 == 1) + (p5 == 0 and p6 == 1) + \
(p6 == 0 and p7 == 1) + (p7 == 0 and p8 == 1) + \
(p8 == 0 and p9 == 1) + (p9 == 0 and p2 == 1)
B = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9
if iter == 0:
m1 = p2 * p4 * p6
m2 = p4 * p6 * p8
else:
m1 = p2 * p4 * p8
m2 = p2 * p6 * p8
if A == 1 and (B >= 2 and B <= 6) and m1 == 0 and m2 == 0:
marker[y,x] = 0
for y in range(h):
for x in range(w):
if im[y, x] and marker[y, x]:
im[y, x] = 1
else:
im[y, x] = 0
return im
def touchpieces(vessel, temp):
'''
This function is to test if the new piece connects two previous pieces.
'''
piece = cp.copy(temp)
vessel = vessel > 0
count = 0
for i in range(len(temp)):
piece[i] = piece[i] > 0
touch = vessel & piece[i]
if np.count_nonzero(touch):
count += 1
if count == 2 and len(temp):
return True
else:
return False
# img is the input image of matched filter response.
img = cv2.imread(sys.argv[1])
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# define a maximum number of loop in the iteration of threshold probing.
loop_limit = 5000
# to avoid the chaging of original image, we make a copy of the image.
tempimg = cp.copy(img)
# set the parameters
thresh = 5000
smin = 140
smax = 3000
fringe = 0.18
tree = 252
# initialize the variable of probe.
probe = Probe(thresh, smin, smax, fringe, tree)
# temp is to store the previous piece [piece0, piece1, ...]
temp = []
# start to initialized queue
que = probe.init_queue(tempimg)
num = 0 # number of element in queue
# vessel is to store the final vessel piexl
vessel = np.zeros_like(img)
while num < len(que) and num < loop_limit: # avoide too large # of probes
print " queue loop: ", num, "lenth of queue: ", len(que)
# tempvessel is to store the temperary piece to be tested
tempvessel = np.zeros_like(img)
# define the initial threshold value
T = img[que[num][1]][que[num][0]]
if T <= 0:
num += 1
continue
size = 0 # the size of piece
# start paint_fill pocessing
(size, piece, listcd) = probe.paint_fill(img, tempvessel, que[num], T) # piece equals tempvessel
# start testing
while probe.tests(size, piece, T, vessel, listcd):
T -= 1
if T <= 0:
print "--Threshold to low--"
break
# est pass! T = T - 1
# start paint_fill pocessing
(size, piece, listcd) = probe.paint_fill(img, tempvessel, que[num], T)
print " piece size: ", size
if size < smax and size > smin or touchpieces(vessel, temp):
# test failed! start label vessel
temp.append(tempvessel)
vessel = probe.label(vessel, tempvessel)
# finish labeling, start to add endpoints to queue
que = probe.addpoints(que, tempvessel, vessel, listcd)
# delete the point within the vessel
que = probe.deletepoint(que, vessel, num)
else:
# test failed, but piece size is out of bound or contact more than two piece
pass
# to next point in queue
num += 1
# write image
cv2.imwrite("vessel"+".png", vessel*255)
print " finish all pocessing! Start showing imagesimage"
|
from reading import *
from database import *
# Below, write:
# *The cartesian_product function
# *All other functions and helper functions
# *Main code that obtains queries from the keyboard,
# processes them, and uses the below function to output csv results
# NOTE: the tables used in most examples if not all examples are as follows:
# {'book.year': ['1979', '2014', '2015', '2014'],
# 'book.title': ['Godel Escher Bach', 'What if?', 'Thing Explainer', 'Alan Turing: The Enigma'],
# 'book.author': ['Douglas Hofstadter', 'Randall Munroe', 'Randall Munroe', 'Andrew Hodges']}
# {'alpha.#': ['1', '2', '3'],
# 'alpha.span': ['uno', 'dos', 'tres'],
# 'alpha.a': ['a', 'b', 'c']}
# {'o.year': ['2010', '2003', '1997', '1997'],
# 'o.category': ['Animated Feature Film', 'Directing', 'Directing', 'Best Picture'],
# 'o.title': ['Toy Story 3', 'The Lord of the Rings: The Return of the King', 'Titanic', 'Titanic']}
def cartesian_product(table1, table2):
'''(Table, Table) -> Table
a function that combines two tables. It will multiply the tables together,
and then return a new table.
REQ: this function gets two tables
REQ: none of the given tables are empty.
'''
# get the table columns and coloumn names of both tables given in the
# parameter
t1_col_names = table1.get_column_names()
t2_col_names = table2.get_column_names()
t1_rows = table1.get_rows()
t2_rows = table2.get_rows()
# assign the column names to one big list
t_12_col_names = t1_col_names + t2_col_names
# multiply the rows in the first table
hold_list = []
# for the amount of rows that the first table has
for i in range(len(t1_rows)):
# for the amount of rows the second table has
for x in range(len(t2_rows)):
# add a list version of every row in t1 to a new_list.
hold_list += [(t1_rows[i])]
#for the length of hold list
for i in range(len(hold_list)):
# get the remainder of the spot i is at divided by the length of t2_rows
a = i % (len(t2_rows))
# save z to a copy of of hold_list at each index
z = hold_list[i].copy()
# do this twice
for x in range(1):
# add the rows at the index of t2_rows at a to the list z
z += t2_rows[a]
# add all of this to the list copy
hold_list[i] = z
# save this list, which is now the rows of the new table, to a variable.
t_12_rows = hold_list
# create a dictiionary with both of these news coloumns and rows.
cart_dicto = Table(t_12_col_names, t_12_rows)
return cart_dicto
def run_query(base, user_input):
'''(Database, str) -> Table
Runs the query.
query will be in the form:
[select, col_names, from, table_names, where, where clause]
REQ: user input is in proper SQuEaL format.
'''
# split user input to list
query = user_input.split()
#get all of the where clauses
if (len(query) > 4):
where_clause = query[5].split(',')
# select each row and get that list for the rows
table_names = query[3].split(',')
# get all the column names
col_names = []
for e in table_names:
col_names += base.get_table(e).get_column_names()
# get the table needed if its only from one table
if (len(table_names) == 1):
needed_rows = create_row_list(table_names, col_names, base)
# initialize the variable t2 just in case where is called on one
t2 = Table(col_names,needed_rows)
# getting from two different tables
# from
else:
# get the first two tables
t1 = base.get_table(table_names[0])
t2 = base.get_table(table_names[1])
# multiply the first two tables in a cartesian product
t2 = cartesian_product(t1, t2)
# get the rows needed from the cartesian product
needed_rows = cart_create_row_list(t2, base)
# if more than two tables are being multiplied, aka if more then 2
# tables are called
if (len(table_names) > 2):
# counter for while loop, starts at 1 because first table has
# already gone through the function
i = 1
# while i is less than the length of the tables called MINUS 2
# because two tables have already been multiplied
while (i <= (len(table_names) - 2)):
# cartesian product each table called
t2 = cartesian_product(t2,base.get_table(table_names[i+1]))
# get the rows from the new cartesian table
needed_rows = cart_create_row_list(t2, base)
i+=1
#use the table init to initialize the new table
# where
# if the where clause was inputted
if (len(query) > 4):
# save the where clauses to list
where_clause = query[5].split(',')
# do the first where clause
needed_rows = do_where_clause(where_clause[0], t2)
#do the where clause more than once if there is more than one.
if (len(where_clause) >= 2):
# create the next table for the where clause
next_t_for_clause = Table(col_names, needed_rows)
i = 1
while (i < (len(where_clause))):
# do the next where clause
needed_rows = do_where_clause(where_clause[i], next_t_for_clause)
# make a new table with the previous where claus executed
next_t_for_clause = Table(col_names, needed_rows)
# increase count
i += 1
# after the where clauses have been handled, make the next table
final_table = Table(col_names, needed_rows)
# do the select clause
#if all the colouns havent already been chosen/ if the user did not input *
if (query[1] != '*'):
# run select_wanted_columns to get the coloumns needed
final_table = select_wanted_columns(query, final_table)
# after from, where, and select have been handled, return the final table
return final_table
#function to select each column
def select_wanted_columns(query, result_query):
'''(list, Table)-> Table
A function that takes the query and a table, and using the query finds what
values need to be selected, and makes a table using those values.
REQ: a column has been selected.
'''
# get the wanted column names from the query
col_names = query[1].split(',')
# get the column at each column name
#make a table and return it
# blank variables, hold_list for holding data and a to index through the
# wanted coloumn names
hold_list = []; a = 0
# for the amount of coloumns called
for i in range(len(col_names)):
# add each coloumn to a list at each called column
hold_list += [result_query.get_column(col_names[a])]
# add one to the counter
a+=1
# take the list and put it from coloumn format into row format
hold_list = set_proper_list(hold_list)
# make a new table with only the selected coloumns and rows
table = Table(col_names, hold_list)
# return the new table
return table
def create_row_list(table_names, column_names, base):
'''(list, list, Database) -> list of list of str
a function that gets the table names and column names and puts them in a
list to be made into the selected part of a table
Note: this function works for 1 and only 1 table.
REQ: only one tables is given
REQ: the database given isnt empty
REQ: a table name is actually called.'''
# a blank list, and a blank variable for indexing
total_list = []; b = 0
# for all of the column names called
for x in range(len(column_names)):
# for the amount of column names called
while (b < len(column_names)):
# table is assigned to table for the current table name, aka the
# current table that table_names is at
table = base.get_table(table_names[0])
# an empty list
hold_list= []
# c is the things in each column
c = table.get_column(column_names[b])
# add 1 to counter
b += 1
# for the number of things in the column
for l in range(len(c)):
# a list version of each thing in the column to hold list
hold_list += [c[l]]
# add the list of each thing in each coloumn to the total things in
# each coloumn
total_list += [hold_list]
# run the set_proper_lost methed to arrange from coloumn form to row form
# and return in
return set_proper_list(total_list)
def cart_create_row_list(table, base):
'''(Table, list) -> list of list of str
a function that gets the table names and column names and puts them in a
list to be made into the selected rows of a table
Note: this function works for 1 and only 1 table. This function is a
duplicate of create_row_list, except specifically for the cartesian
producted table
REQ: the table given has columns.'''
# a blank list
total_list = []; b= 0
# for all of the column names called
column_names = table.get_column_names()
# for the amount of coloumns called (which will be all of them)
for x in range(len(column_names)):
# for the amount of column names called
while (b < len(column_names)):
#because the table is already set to a cartesian table, and table is
# is now the parameter nothing needs to be changed
hold_list= []
# c is the things in each column
c = table.get_column(column_names[b])
# add 1 to the counter
b += 1
# for the number of things in the column
for l in range(len(c)):
# a list version of each thing in the column to hold list
hold_list += [c[l]]
# add each hold_list to a list
total_list += [hold_list]
# return the coloumns in row format
return set_proper_list(total_list)
def set_proper_list(total_list):
'''(list of list of str) -> list
Rearranges the list to its proper order to get the desired columns. It
rearranges a list of a list from coloumn form to row form and vice versa.
REQ: a list of a list is given
'''
# rearrange the orders of the list from column form to row form and vice
# versa
# initialize blak variables, one for saving information and one for indexing
final_rows = []; i = 0
# for the number of columns in the table
for x in range(len(total_list[i])):
# another blank list for holding information
hold_list = []
# for all the rows in the table
for i in range(len(total_list)):
# add each value at the start of the coloumn to a new row
hold_list += [total_list[i][x]]
# add this row to the final rows
final_rows += [hold_list]
# return hold list
return final_rows
def do_where_clause(where_clause, table):
'''(list, str, Table) -> list of list
a function that handles the where clauses depending on what the clauses
were and what values were given.
REQ: A where clause is given
REQ: A table is given.'''
# if clause is =, clause = true otherwise clause = False
clause = True
# find the equal sign in the where clause
index = where_clause.find('=')
# if it wasnt there (when its not there .find returns -1) set the clause to
# false.
if index == (-1):
# find the index where the greater sign is and set the clause to false,
# to later say to compare for greater than and not for equal to.
index = where_clause.index('>')
clause = False
# get column that needs to be compared, aka column 1
col_1 = where_clause[:index]
# get column that needs to be compared to, aka column 2
col_2 = where_clause[index+1:]
#check if the second thing is equeal to a value or a column.
# get all the coloumn names
col_names = table.get_column_names()
# interate through the coloumn names and if it's there set the variable
# coloumn to true.
is_column = False
for i in range(len(col_names)):
# if col_2 has a value in col_names then column is true and a value is
# being compared and not two column names
if (col_2 == col_names[i]):
is_column = True
# set column 1 to an actual column in the table
col_1 = table.get_column(col_1)
# set column 2 to an actual column in the table
if (is_column is True):
col_2 = table.get_column(col_2)
# empty list for saving data
hold_list = handle_clause(table, col_1, col_2, is_column, clause)
# for all of column 2
# if a column is being compared
return hold_list
def handle_clause(table, col_1, col_2, is_column, clause):
'''(Table, list, list/str, bool, bool) -> list of list of str
A function that handles the main selection of rows in a where clause.
REQ: All parameters are given
REQ: table given isnt empty
REQ: coloumns arent empy
'''
hold_list = []
# if a column is being compared
if (is_column is True):
# if the clause is =
if (clause is True):
for i in range(len(col_1)):
# if the rows are equal to each other,
if (col_1[i] == col_2[i]):
# save that entire row to a new list
hold_list += [table.get_spec_row(i)]
# if the clause is <
else:
for i in range(len(col_1)):
# if the rows are equal to each other,
if (col_1[i] > col_2[i]):
# save that entire row to a new list
hold_list += [table.get_spec_row(i)]
# if a value is being compared
else:
if (clause is True):
for i in range(len(col_1)):
# if the rows are equal to each other,
if (col_1[i] == col_2):
# save that entire row to a new list
hold_list += [table.get_spec_row(i)]
# if the clause is <
else:
for i in range(len(col_1)):
# if the rows are equal to each other,
if (col_1[i] > col_2):
# save that entire row to a new list
hold_list += [table.get_spec_row(i)]
return hold_list
if(__name__ == "__main__"):
database = read_database()
query = input("Enter a SQuEaL query, or a blank line to exit:")
while (query != ''):
d = run_query(database, query)
d.print_csv()
query = input("Enter a SQuEaL query, or a blank line to exit:")
|
# cook your dish here
def diet(n,k,a):
b=0
for j in range(n):
b+=a[j]
b-=k
if b<0:
return 'NO '+str(j+1)
return 'YES'
t=int(input())
for i in range(t):
n,k=map(int,input().split())
a=list(map(int,input().split()))
print(diet(n,k,a)) |
# -*- coding: utf-8 -*-
from flask_oauth import OAuth
oauth = OAuth()
from credentials import *
from flask import session
from flask import Flask
from flask.ext.pymongo import PyMongo
app = Flask(__name__, static_url_path='')
mongo = PyMongo(app)
@gFit.tokengetter
def get_gFit_token(token=None):
print 'here\n'
print session.get('gFit_token')
@app.route('/')
def hello():
return 'success'
@app.route('/login')
def login():
return gFit.authorize(callback='http://localhost:5000/callback')
@app.route('/callback')
def callback():
return '!!!!'
@app.route('/index')
def home_page():
#online_users = mongo.db.users.find({'online': True})
# return 'the index?'
return app.send_static_file('index.html')
#return render_template(('index.html'),online_users=online_users)
if __name__ == "__main__":
app.secret_key = 'super secret key'
app.run()
|
# This class will take care of coding and decoding files from and to
# base64
# This will also take care of converting base64 to string
class B64Ops:
pass |
import sys
sys.path.insert(0, './constraint')
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
import time
from holder import *
from util import *
from n1 import *
from n2 import *
class WithinLayer(torch.nn.Module):
def __init__(self, opt, shared):
super(WithinLayer, self).__init__()
self.opt = opt
self.shared = shared
#self.num_att_labels = opt.num_att_labels
self.within_constr = self.get_within_constr(opt.within_constr)
self.constr_on_att1 = False
self.constr_on_att2 = False
for t in self.opt.constr_on.split(','):
if t == '1':
self.constr_on_att1 = True
elif t == '2':
self.constr_on_att2 = True
else:
pass
self.zero = Variable(torch.zeros(1), requires_grad=False)
rho_w = torch.ones(1) * opt.rho_w
if opt.gpuid != -1:
rho_w = rho_w.cuda()
self.zero = self.zero.cuda()
self.rho_w = nn.Parameter(rho_w, requires_grad=False)
if len(self.within_constr) != 0:
print('within-layer constraint enabled')
# DEPRECATED
def grow_rho(self, x):
rs = None
if self.opt.grow_rho == 'log':
# the log_10(epoch)
rs = torch.log(torch.ones(1) * float(x)) / torch.log(torch.ones(1) * 10.0)
elif self.opt.grow_rho == '1log':
# the log_10(epoch) + 1
rs = torch.log(torch.ones(1) * float(x)) / torch.log(torch.ones(1) * 10.0) + 1.0
elif self.opt.grow_rho == 'inv':
# 1 - 1/epoch
rs = torch.ones(1) - torch.ones(1) / (torch.ones(1) * float(x))
if self.opt.gpuid != -1:
rs = rs.cuda()
return rs
# the function that grabs constraints
def get_within_constr(self, names):
layers = []
if names == '':
return layers
for n in names.split(','):
if n == 'n1':
layers.append(N1(self.opt, self.shared))
elif n == 'n2':
layers.append(N2(self.opt, self.shared))
else:
print('unrecognized constraint layer name: {0}'.format(n))
assert(False)
return layers
def forward(self, score1, score2, att1, att2):
batch_l = self.shared.batch_l
sent_l1 = self.shared.sent_l1
sent_l2 = self.shared.sent_l2
# logic pass
batch_l = self.shared.batch_l
datt1_ls = []
datt2_ls = []
for layer in self.within_constr:
if self.constr_on_att1:
datt1_ls.append(layer(att1.transpose(1,2)).transpose(1,2).contiguous().view(1, batch_l, sent_l1, sent_l2))
if self.constr_on_att2:
datt2_ls.append(layer(att2).view(1, batch_l, sent_l2, sent_l1))
datt1 = self.zero
datt2 = self.zero
if len(datt1_ls) != 0:
datt1 = torch.cat(datt1_ls, 0).sum(0)
if len(datt2_ls) != 0:
datt2 = torch.cat(datt2_ls, 0).sum(0)
# stats
self.shared.w_hit_cnt = (datt2.data.sum(-1).sum(-1) > 0.0).sum()
rho_w = self.rho_w
constrained_score1 = score1 + rho_w * datt1
constrained_score2 = score2 + rho_w * datt2
# stats
self.shared.rho_w = rho_w
return [constrained_score1, constrained_score2]
|
import sys
import random
import logging
import numpy as np
import networkx as nx
from pgmpy.models.BayesianModel import BayesianModel
from pgmpy.factors.discrete import TabularCPD
from pgmpy.sampling import BayesianModelSampling
from asciinet import graph_to_ascii
import pcalg
from gsq import ci_tests
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)s %(message)s")
# def graph_complete(nodes, undirected=True):
# if (undirected):
# g = nx.Graph()
# else:
# g = nx.DiGraph()
# for node in nodes:
# g.add_node(node)
# for s in range(0, len(nodes)):
# for t in range(s+1, len(nodes)):
# g.add_edge(nodes[s], nodes[t])
# if not (undirected):
# g.add_edge(nodes[t], nodes[s])
# return g
def get_node_name(n):
return 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[n]
def create_random_skeleton(node_count, edge_count):
nodes = [ get_node_name(i) for i in range(node_count) ]
g = nx.DiGraph()
g.add_nodes_from(nodes)
edge_pool = [ (s, t) for s in nodes for t in nodes if (s != t) ]
random.shuffle(edge_pool)
while g.number_of_edges() < edge_count:
s, t = edge_pool.pop(0)
g.add_edge(s, t)
if not nx.algorithms.dag.is_directed_acyclic_graph(g):
logging.debug("rejected edge {} -> {}".format(s, t))
g.remove_edge(s, t)
else:
logging.debug("added edge {} -> {}".format(s, t))
return g
def create_random_cpds(g, card):
node_pool = g.nodes()
node_count = len(node_pool)
random.shuffle(node_pool)
cpds = {}
dones = set()
while len(dones) < node_count:
node = node_pool.pop(0)
parents = set(g.predecessors(node))
logging.debug("creating cpd for {} (parents: {})".format(node, parents))
if len(parents) == 0:
cpds[node] = [ [p] for p in random_distrib(card) ]
dones.add(node)
elif parents.issubset(dones):
# construct a cpd whose size depends on the number of parents and the cardinality
distribs = [random_distrib(card) for i in range(card ** len(parents))]
cpds[node] = zip(*distribs)
dones.add(node)
else:
node_pool.append(node)
continue
if node in dones:
logging.debug("cpd for {}: {})".format(node, cpds[node]))
return cpds
def random_distrib(sz):
vals = []
slack = 1.0
for i in range(sz - 1):
val = random.uniform(0.0, slack)
slack -= val
vals.append(val)
vals.append(slack)
random.shuffle(vals)
return vals
# >>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
# >>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
# >>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
# >>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
# ... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
# ... ['intel', 'diff'], [2, 2])
# >>> student.add_cpds(cpd_d, cpd_i, cpd_g)
# >>> inference = BayesianModelSampling(student)
# >>> inference.forward_sample(size=2, return_type='recarray')
# rec.array([(0, 0, 1), (1, 0, 2)], dtype=
# [('diff', '<i8'), ('intel', '<i8'), ('grade', '<i8')])
def create_random_dag(node_count, edge_count, card):
logging.debug("creating skeleton")
dag = create_random_skeleton(node_count, edge_count)
logging.debug("creating cpds")
cpds = create_random_cpds(dag, card)
for node, cpd in cpds.items():
dag.node[node]['cpd'] = cpd
return dag
def sample_dag(dag, num):
#zzz this loses disconnected nodes!!!
# bayesmod = BayesianModel(dag.edges())
# bayesmod = BayesianModel(dag)
bayesmod = BayesianModel()
bayesmod.add_nodes_from(dag.nodes())
bayesmod.add_edges_from(dag.edges())
tab_cpds = []
cards = { node: len(dag.node[node]['cpd']) for node in dag.nodes() }
for node in dag.nodes():
parents = dag.predecessors(node)
cpd = dag.node[node]['cpd']
if parents:
parent_cards = [ cards[par] for par in parents ]
logging.debug("TablularCPD({}, {}, {}, {}, {})".format(node, cards[node], cpd,
parents, parent_cards))
tab_cpds.append(TabularCPD(node, cards[node], cpd, parents, parent_cards))
else:
logging.debug("TablularCPD({}, {}, {})".format(node, cards[node], cpd))
tab_cpds.append(TabularCPD(node, cards[node], cpd))
logging.debug("cpds add: {}".format(tab_cpds))
print "model variables:", bayesmod.nodes()
for tab_cpd in tab_cpds:
print "cpd variables:", tab_cpd.variables
bayesmod.add_cpds(*tab_cpds)
logging.debug("cpds get: {}".format(bayesmod.get_cpds()))
inference = BayesianModelSampling(bayesmod)
logging.debug("generating data")
recs = inference.forward_sample(size=num, return_type='recarray')
return recs
def run_pc(data_orig, col_names=None):
data = np.array([ list(r) for r in data_orig ])
(skel_graph, sep_set) = pcalg.estimate_skeleton(indep_test_func=ci_tests.ci_test_dis,
data_matrix=data,
alpha=0.01)
# gdir = nx.DiGraph()
# gdir.add_nodes_from(g.nodes())
# gdir.add_edges_from(g.edges())
dag = pcalg.estimate_cpdag(skel_graph, sep_set)
if col_names:
name_map = { i: col_names[i] for i in range(len(dag.nodes())) }
nx.relabel.relabel_nodes(dag, name_map, copy=False)
return dag
#####################################
if __name__ == '__main__':
num_nodes = int(sys.argv[1])
num_edges = int(sys.argv[2])
attr_card = int(sys.argv[3])
dag = create_random_dag(num_nodes, num_edges, attr_card)
print graph_to_ascii(dag)
recs = sample_dag(dag, 10000)
print dag.nodes()
print recs[:10]
for node in dag.nodes():
print "col", node, recs[node][:10]
gdir = run_pc(recs)
print graph_to_ascii(gdir)
print "graphs are isomorphic: ", nx.algorithms.isomorphism.is_isomorphic(dag, gdir)
|
import numpy as np
#from Laser import Laser, Map
from laser import Laser, Map
import math
import matplotlib.pyplot as plt
from copy import copy
height = 467
width = 617
offset_x =0.0
offset_y =0.0
resolution = 0.1
# Create map and laser scans
occ_map = Map.readFromTXT('../map.txt', width, height, offset_x, offset_y, resolution)
max_range = 50.0
no_of_beams = 181
min_angle = -math.pi/2.0
resolution_angle = math.pi/(no_of_beams-1)
noise_variance = 0.0
laser_pos_x = 1.2
laser_pos_y = 0.0
laser_angle = 0.0 * (math.pi/180.0)
laser = Laser(max_range, min_angle, resolution_angle, no_of_beams, noise_variance, occ_map, laser_pos_x, laser_pos_y, laser_angle)
# Read positions
positions = np.loadtxt('../data_pose.txt')
# Read corresponding laser scans
#laser_scans = np.loadtxt('../map_scans.txt') #, delimiter=','
# Go through each position and generate a new scan
# Validate each generated scan against recorded ones
counter = 0
all_ranges = []
n = 2000
for i in range(n):
ranges = laser.scan(positions[i,0], positions[i,1], positions[i,2])
all_ranges.append(copy(ranges))
if not counter%1000: print('iter', counter)
counter+=1
all_ranges=np.array(all_ranges)
np.savetxt('../map_scans_py.txt', all_ranges)
#print("MSE", np.sum(np.abs(all_ranges-laser_scans))/all_ranges.shape[0])
|
# 1. Create a dictionary called zodiac with the following inforation.
# Each key is the name of the zodiac
# Aries - The Warrior
# Taurus - The Builder
# Gemini - The Messenger
# Cancer - The Mother
# Leo - The King
# Virgo -The Analyst
# Libra - The Judge
# Scorpio - The Magician
# Sagittarius - the Gypsy
# Capricorn - the Father
# Aquarius - The Thinker
# Pisces - TheMystic
# zodiac = {
# "Aries" : "The Warrior",
# "Taurus" : "The Builder",
# "Gemini" : "The Messenger",
# "Cancer" : "The Mother",
# "Leo" : "The King",
# "Virgo" : "The Analyst",
# "Libra" : "The Judge",
# "Scorpio" : "The Magician",
# "Sagittarius" : "the Gypsy",
# "Capricorn" : "the Father",
# "Aquarius" : "The Thinker",
# "Pisces" : "TheMystic"
# }
# myZodiac = zodiac["Aries"]
# print(myZodiac)
# 1a. Retrieve information about your zodiac from the zodiac dictionary
# 2. Given the following dictionary
# phonebook_dict = {
# 'Alice': '703-493-1834',
# 'Bob': '857-384-1234',
# 'Elizabeth': '484-584-2923'
# }
# # phonebook_dict["Kareem"] = "938-489-1234"
# # 2a. Print Elizabeth's phone number
# print(phonebook_dict["Elizabeth"])
# # 2b. Add a entry to the dictionary: Kareem's number is 938-489-1234.
# phonebook_dict["Kareem"] = "938-489-1234"
# print(phonebook_dict)
# # 2c. Delete Alice's phone entry.
# del phonebook_dict["Alice"]
# print(phonebook_dict)
# # 2d. Change Bob's phone number to '968-345-2345'.
# phonebook_dict["Bob"] = "968-345-2345"
# print(phonebook_dict)
# # 2e. Print all the phone entries.
# for key, value in phonebook_dict.items():
# print (f"{key} : {value}")
# 3. Nested dictionaries
# ramit = {
# 'name': 'Ramit',
# 'email': 'ramit@gmail.com',
# 'interests': ['movies', 'tennis'],
# 'friends': [
# {
# 'name': 'Jasmine',
# 'email': 'jasmine@yahoo.com',
# 'interests': ['photography', 'tennis']
# },
# {
# 'name': 'Jan',
# 'email': 'jan@hotmail.com',
# 'interests': ['movies', 'tv']
# }
# ]
# }
# # 3a. Write a python expression that gets the email address of Ramit.
# ramit_email = ramit['email']
# print(ramit_email)
# # 3b. Write a python expression that gets the first of Ramit's interests.
# ramit_interest1 = ramit['interests'][0]
# print(ramit_interest1)
# # 3c. Write a python expression that gets the email address of Jasmine.
# jasmine_email = ramit['friends'][0]['email']
# print(jasmine_email)
# # 3d. Write a python expression that gets the second of Jan's two interests.
# jan_interest2 = ramit['friends'][1]['interests'][1]
# print(jan_interest2)
# 4. Letter Summary
# Write a letter_histogram function that takes a word as its input,
# and returns a dictionary containing the tally of how many times
# each letter in the alphabet was used in the word. For example:
# >>>letter_histogram('banana')
# {'a': 3, 'b': 1, 'n': 2}
def letter_histogram(word):
lower_word = word.lower()
count_of_letter = {}
for letter in lower_word:
count_of_letter[letter] = int(lower_word.count(letter))
return count_of_letter
# input_word = input("Please enter a word: ")
# print(letter_histogram(input_word))
# Word Summary
# Write a word_histogram function that takes a paragraph of text as its input, and returns a dictionary containing the tally of how many times each word in the alphabet was used in the text. For example:
# >>> word_histogram('To be or not to be')
# def word_histogram(paragraph):
# count_of_word = {}
# lower_paragraph = paragraph.lower()
# words = lower_paragraph.split()
# for word in words:
# count_of_word[word] = int(words.count(word))
# # if word in count_of_word:
# # count_of_word[word] += 1
# # else:
# # count_of_word[word] = 1
# return count_of_word
# input_paragraph = input('Please enter a paragraph: ')
# print(word_histogram(input_paragraph))
# Sorting a histogram
# Given a histogram tally (one returned from either letter_histogram
# or word_histogram), print the top 3 words or letters.
# def sorting_histogram(word):
# dictionary_histogram = letter_histogram(word)
# sorted_string = ""
# for key in sorted(dictionary_histogram):
# sorted_string += (f"{key}: {dictionary_histogram[key]}\n")
# return sorted_string.strip()
# input_word = input("Please enter a word: ")
# print(sorting_histogram(input_word))
|
import sys
count = int(len(sys.argv))
if(count != 2):
print("Usage: {0} <num>".format(sys.argv[0]))
exit(-1)
num = int(sys.argv[1])
result = 0
while num != 0:
result = result +int(num%10)
num=int(num/10)
print("Sum of digits of {0} is {1}".format(int(sys.argv[1]), result))
|
""":mod:`padak.html5` --- HTML5 template engine
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
|
class Solution(object):
def rainbowSort(self, array):
"""
input: int[] array
return: int[]
"""
if not array:
return
lst = []
sumMinusOne, sumZero, sumOne = self.count(array)
for i in range(sumMinusOne):
lst.append(-1)
for i in range(sumZero):
lst.append(0)
for i in range(sumOne):
lst.append(1)
return lst
def count(self, array):
sumZero, sumOne, sumMinusOne = 0, 0, 0
for i in range(len(array)):
if array[i] == -1:
sumMinusOne += 1
elif array[i] == 0:
sumZero += 1
else:
sumOne += 1
return sumMinusOne, sumZero, sumOne
array = [0, 0, -1]
a = Solution()
b = a.rainbowSort(array) |
from csv_loader import CsvLoader
from clustering_analyzer import ClusteringAnalyzer
import df_handler
class MainWrapper:
def __init__(self, df_object):
self.df_object = df_object
def proportion_pressure_of_question(self):
sorted_unique_index = df_handler.get_unique_index(self.df_object)
for index in sorted_unique_index:
index_list = index.tolist()
selected_df_by_index = df_handler.get_rows_by_index(self.df_object,index_list)
true_answer_df = df_handler.filter_rows_by_correct(selected_df_by_index,True)
false_answer_df = df_handler.filter_rows_by_correct(selected_df_by_index,False)
true_mean_pressure_df = df_handler.group_mean_touch_pressure(true_answer_df)
false_mean_pressure_df = df_handler.group_mean_touch_pressure(false_answer_df)
proportion_person_array, mean_pressure_true_array,mean_pressure_false_array,proportion_data_array = df_handler.join_df_and_divide_pressure(true_mean_pressure_df,false_mean_pressure_df,'person_id')
if len(proportion_person_array) == 0:
continue
else:
png_path = '../../../../files/clustering/pressureProportion/png/'+str(index_list[0])+'_'+str(index_list[1])+'_'+str(index_list[2])+'.png'
csv_path = '../../../../files/clustering/pressureProportion/csv/'+str(index_list[0])+'_'+str(index_list[1])+'_'+str(index_list[2])+'.csv'
print(
'Clustering data from contentIndex[%d]:questionIndex[%d]:derivedQuestionIndex[%d]\t%d users'
%(index_list[0],index_list[1],index_list[2],len(proportion_person_array) ))
proportion_pressure_analyzer = ClusteringAnalyzer(proportion_person_array, mean_pressure_true_array,mean_pressure_false_array,proportion_data_array)
proportion_pressure_analyzer.do_kmeans(n_clusters=2)
proportion_pressure_analyzer.draw_png(
suptitle='',subtitle_1='Kernel Density Estimation',subtitle_2='KMeans Clustering'
,xlabel='Mean Wrong Pressure / Mean Correct Pressure',file_path =png_path
)
proportion_pressure_analyzer.write_result_for_proportion(file_path=csv_path)
return
def proportion_pressure_of_all(self):
true_answer_df = df_handler.filter_rows_by_correct(self.df_object,True)
false_answer_df = df_handler.filter_rows_by_correct(self.df_object,False)
true_mean_pressure_df = df_handler.group_mean_touch_pressure(true_answer_df)
false_mean_pressure_df = df_handler.group_mean_touch_pressure(false_answer_df)
proportion_person_array, mean_pressure_true_array,mean_pressure_false_array,proportion_data_array = df_handler.join_df_and_divide_pressure(true_mean_pressure_df,false_mean_pressure_df,'person_id')
if len(proportion_person_array) == 0:
pass
else:
png_path = '../../../../files/clustering/pressureProportion/png/total_result.png'
csv_path = '../../../../files/clustering/pressureProportion/csv/total_result.csv'
proportion_pressure_analyzer = ClusteringAnalyzer(proportion_person_array, mean_pressure_true_array,mean_pressure_false_array,proportion_data_array)
proportion_pressure_analyzer.do_kmeans(n_clusters=2)
proportion_pressure_analyzer.draw_png(
suptitle='',subtitle_1='Kernel Density Estimation',subtitle_2='KMeans Clustering'
,xlabel='Mean Wrong Pressure / Mean Correct Pressure',file_path =png_path)
proportion_pressure_analyzer.write_result_for_proportion(file_path=csv_path)
return
|
N, X, Y, Z = map(int, input().split())
S = [tuple(map(int, input().split())) for i in range(N)]
print(sum([Ai >= X and Bi >= Y and Ai + Bi >= Z for Ai, Bi in S]))
|
import time
arr = [22,56,1,34,2,98,5,65,9]
print("unsorted list is ")
print(arr)
for i in range(len(arr)):
min_index = i
for j in range(i+1,len(arr)):
if arr[min_index]>arr[j]:
min_index = j;
arr[i],arr[min_index] = arr[min_index],arr[i]
print("sorted list using selection sort")
print(arr)
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import pandas as pd
from random import randint
from time import sleep
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# url = 'https://www.daraz.pk/laptops/?page=1&spm=a2a0e.home.cate_1.5.6a274937VstcSP'
url = 'https://www.daraz.pk/smartphones/?spm=a2a0e.home.cate_1.1.35e34937tX3Jyv'
# url = 'https://www.cdn.geeksforgeeks.org/'
'''ua = UserAgent()
print(ua)
print()
# a = ua.chrome
# print(a)
user_agent = ua.chrome
print(user_agent)
'''
options = Options()
# options.add_argument("window-size=1400,600")
# options.headless = True
options.add_argument("Accept-Language=en-US,en;q=0.5")
# options.add_argument(f'user-agent = {user_agent}')
# hd = {
# 'user-agent': user_agent,
# "Accept-Language": "en-US, en;q=0.5"
# }
print("opening the browser")
driverPath = "..\\chromedriver_win32\\chromedriver.exe"
driver = webdriver.Chrome(options=options, executable_path=driverPath)
print('opening the url')
# driver.execute_script('alert("Hello 1")')
driver.get(url)
print("URL has opened successfully")
#
# print('alert is popping up')
# driver.execute_script('alert("Before clicking next button")')
# time.sleep(1)
#
# driver.switch_to.alert.accept
# driver.switch_to.alert.accept
# print('alert has been canceled')
print(driver.current_url)
# print("Finding the nextBtn")
# nextbtn = driver.find_element_by_xpath('//*[@id="root"]/div/div[3]/div[1]/div/div[1]/div[3]/div/ul/li[9]/a')
nextbtn = driver.find_element_by_xpath('//li[@title="Next Page"]')
if 'ant-pagination-disabled' in nextbtn.get_attribute('class'):
print('disabled button')
nextbtn = None
'''
# checking whether the program gives exception on unfound elements
khuzaima = driver.find_elements_by_css_selector('.khuzaima')
print(type(khuzaima))
print(khuzaima)
print(not khuzaima)'''
# nextbtn = driver.find_element_by_class_name("nextpostslink")
# driver.implicitly_wait(100)
# print('executing the script')
# S = lambda X: driver.execute_script('return document.body.parentNode.scroll' + X)
# print(S)
# height = S('Height')
# print(height)
# print(type(S('Height')))
#
# width = S('Width')
# print(width)
# print(type(S('Width')))
#
# print("height is " + str(height))
# driver.set_window_size(width, height)
def pause():
time = randint(2, 10)
print(f'sleeping for {time} second(s)')
sleep(time)
print('sleeping time elapsed')
pause()
def scrollTo(element):
desired_y = (element.size['height'] / 2) + element.location['y']
current_y = (driver.execute_script('return window.innerHeight') / 2) + driver.execute_script(
'return window.pageYOffset')
scroll_y_by = desired_y - current_y
driver.execute_script("window.scrollBy(0, arguments[0]);", scroll_y_by)
names = []
prices = []
n = 0
while nextbtn is not None:
# driver.implicitly_wait(100)
print('button is not none')
'''try:
while True:
alert = driver.switch_to.alert
print(alert.text)
alert.dismiss()
print("a popup is removed")
except Exception:
print(Exception.__class__)
print("no alert found\n")
print("sleeping for 2 seconds after removing popups")
sleep(2)
print('\n\n\n')
'''
# scraping a page
print('---------------------------------------------------------')
print(f'scraping "{driver.current_url}" .')
print("parsing...")
soup = BeautifulSoup(driver.page_source, "html.parser")
soup = soup.find("div", "c1_t2i")
# print(soup.prettify())
print("finding...")
itemDetails = soup.find_all("div", "c3KeDq")
print("\n\n\n\n")
print(len(itemDetails))
for item in itemDetails:
# price = item.find("span", class_="c13VH6").text.strip()
price = item.find("span", class_="c13VH6").get_text(strip=True, separator=' ')
prices.append(price)
# print(price.strip().ljust(15), end="")
name = item.find("div", class_="c16H9d").text.strip()
names.append(name)
# print(name.strip(), end="")
# print()
n= n+1
print(n)
#end of scraping page
# issue = driver.find_element_by_class_name('lzd-logo-bar')
print('waiting for presence of button')
WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.CSS_SELECTOR, 'li[title = "Next Page"]'))
# EC.element_to_be_clickable(
# # (By.CSS_SELECTOR, '//*[@id="root"]/div/div[3]/div[1]/div/div[1]/div[3]/div/ul/li[9]/a')
# (By.CSS_SELECTOR, 'li[@title="Next Page"]')
# )
)
print('button is present')
# webElement = driver.findElement(By.cssSelector("div[class*="loadingWhiteBox"]"))
# ((JavascriptExecutor) driver).executeScript("arguments[0].click();", webElement);
# WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "li .ant-pagination-next"))).click()
# this scrolls until the element is in the middle of the page
'''scrollTo(nextbtn) # optional'''
# print('pasted line has executed')
nextbtn.click()
print('clicked the next button')
# print('button is clicked')
'''lists = driver.find_element_by_class_name('cpF1IH')
# soup = BeautifulSou(lists.text, "html.parser")
print(type(lists))
print(lists)
print(lists.text)
print('now buttons are going to print')
for btn in lists.find_elements_by_css_selector('li.ant-pagination'):
print(btn.get_attribute('title'))
'''
# nextbtn.submit()
pause()
# nextbtn = driver.find_element_by_css_selector('li.ant-pagination-next')
nextbtn = driver.find_element_by_xpath('//li[@title="Next Page"]')
area_disabled = type(nextbtn.get_attribute('aria-disabled'))
classNames = nextbtn.get_attribute('class')
if (area_disabled == 'true') or ('ant-pagination-disabled' in classNames):
# print('disabled button')
nextbtn = None
# print(nextbtn.get_attribute('innerhtml'))
# print(nextbtn)
# print(len(nextbtn))
# nextbtn = nextbtn[0] if len(nextbtn) > 0 else None
# print('\n\n')
# print(nextbtn)
# print('Is None: '+ str(type(nextbtn)) + '\n\n')
# nextbtn = driver.find_element_by_xpath('//*[@id="root"]/div/div[3]/div[1]/div/div[1]/div[3]/div/ul/li[9]/a')
# nextbtn = driver.find_element_by_class_name("nextpostslink")
# print(nextbtn)
print('\n----------------------------------------------\n')
print('button is none. Ended...')
print('saving data')
df = pd.DataFrame({"Prices": prices, "Names": names})
df.to_csv("CompleteDarazLaptopData.csv", index=False)
# df.to_html("daraz.html")
print('saved')
print('quitting the browser in 3 seconds')
sleep(3)
driver.quit()
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
from django.http import HttpResponse
from django.views import generic
from .models import Artist, Song
class IndexView(generic.ListView):
model = Artist
template_name = 'history/index.html'
context_object_name = 'artist_list'
class SongsView(generic.DetailView):
model = Artist
template_name = 'history/songs.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Artist.objects |
#!/usr/bin/env python
"""
Copyright (c) 2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import eth_ep
import xgmii_ep
import baser_serdes_ep
module = 'eth_phy_10g'
testbench = 'test_%s_64' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/eth_phy_10g_rx.v")
srcs.append("../rtl/eth_phy_10g_rx_if.v")
srcs.append("../rtl/eth_phy_10g_rx_ber_mon.v")
srcs.append("../rtl/eth_phy_10g_rx_frame_sync.v")
srcs.append("../rtl/eth_phy_10g_tx.v")
srcs.append("../rtl/eth_phy_10g_tx_if.v")
srcs.append("../rtl/xgmii_baser_dec_64.v")
srcs.append("../rtl/xgmii_baser_enc_64.v")
srcs.append("../rtl/lfsr.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
DATA_WIDTH = 64
CTRL_WIDTH = (DATA_WIDTH/8)
HDR_WIDTH = 2
BIT_REVERSE = 0
SCRAMBLER_DISABLE = 0
PRBS31_ENABLE = 1
TX_SERDES_PIPELINE = 2
RX_SERDES_PIPELINE = 2
BITSLIP_HIGH_CYCLES = 1
BITSLIP_LOW_CYCLES = 8
COUNT_125US = 1250/6.4
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
rx_clk = Signal(bool(0))
rx_rst = Signal(bool(0))
tx_clk = Signal(bool(0))
tx_rst = Signal(bool(0))
xgmii_txd = Signal(intbv(0)[DATA_WIDTH:])
xgmii_txc = Signal(intbv(0)[CTRL_WIDTH:])
serdes_rx_data = Signal(intbv(0)[DATA_WIDTH:])
serdes_rx_hdr = Signal(intbv(1)[HDR_WIDTH:])
tx_prbs31_enable = Signal(bool(0))
rx_prbs31_enable = Signal(bool(0))
serdes_rx_data_int = Signal(intbv(0)[DATA_WIDTH:])
serdes_rx_hdr_int = Signal(intbv(1)[HDR_WIDTH:])
# Outputs
xgmii_rxd = Signal(intbv(0)[DATA_WIDTH:])
xgmii_rxc = Signal(intbv(0)[CTRL_WIDTH:])
serdes_tx_data = Signal(intbv(0)[DATA_WIDTH:])
serdes_tx_hdr = Signal(intbv(0)[HDR_WIDTH:])
serdes_rx_bitslip = Signal(bool(0))
rx_error_count = Signal(intbv(0)[7:])
rx_bad_block = Signal(bool(0))
rx_block_lock = Signal(bool(0))
rx_high_ber = Signal(bool(0))
# sources and sinks
xgmii_source = xgmii_ep.XGMIISource()
xgmii_source_logic = xgmii_source.create_logic(
tx_clk,
tx_rst,
txd=xgmii_txd,
txc=xgmii_txc,
name='xgmii_source'
)
xgmii_sink = xgmii_ep.XGMIISink()
xgmii_sink_logic = xgmii_sink.create_logic(
rx_clk,
rx_rst,
rxd=xgmii_rxd,
rxc=xgmii_rxc,
name='xgmii_sink'
)
serdes_source = baser_serdes_ep.BaseRSerdesSource()
serdes_source_logic = serdes_source.create_logic(
rx_clk,
tx_data=serdes_rx_data_int,
tx_header=serdes_rx_hdr_int,
name='serdes_source'
)
serdes_sink = baser_serdes_ep.BaseRSerdesSink()
serdes_sink_logic = serdes_sink.create_logic(
tx_clk,
rx_data=serdes_tx_data,
rx_header=serdes_tx_hdr,
name='serdes_sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
rx_clk=rx_clk,
rx_rst=rx_rst,
tx_clk=tx_clk,
tx_rst=tx_rst,
xgmii_txd=xgmii_txd,
xgmii_txc=xgmii_txc,
xgmii_rxd=xgmii_rxd,
xgmii_rxc=xgmii_rxc,
serdes_tx_data=serdes_tx_data,
serdes_tx_hdr=serdes_tx_hdr,
serdes_rx_data=serdes_rx_data,
serdes_rx_hdr=serdes_rx_hdr,
serdes_rx_bitslip=serdes_rx_bitslip,
rx_error_count=rx_error_count,
rx_bad_block=rx_bad_block,
rx_block_lock=rx_block_lock,
rx_high_ber=rx_high_ber,
tx_prbs31_enable=tx_prbs31_enable,
rx_prbs31_enable=rx_prbs31_enable
)
@always(delay(4))
def clkgen():
clk.next = not clk
rx_clk.next = not rx_clk
tx_clk.next = not tx_clk
load_bit_offset = []
@instance
def shift_bits():
bit_offset = 0
last_data = 0
while True:
yield clk.posedge
if load_bit_offset:
bit_offset = load_bit_offset.pop(0)
if serdes_rx_bitslip:
bit_offset += 1
bit_offset = bit_offset % 66
data = int(serdes_rx_data_int) << 2 | int(serdes_rx_hdr_int)
out_data = ((last_data | data << 66) >> 66-bit_offset) & 0x3ffffffffffffffff
last_data = data
serdes_rx_data.next = out_data >> 2
serdes_rx_hdr.next = out_data & 3
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
tx_rst.next = 1
rx_rst.next = 1
yield clk.posedge
rst.next = 0
tx_rst.next = 0
rx_rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
# wait for block lock
while not rx_block_lock:
yield clk.posedge
# dump garbage
while not xgmii_sink.empty():
xgmii_sink.recv()
yield clk.posedge
print("test 1: test RX packet")
current_test.next = 1
test_frame = bytearray(range(128))
xgmii_frame = xgmii_ep.XGMIIFrame(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame)
xgmii_source.send(xgmii_frame)
yield serdes_sink.wait()
rx_frame = serdes_sink.recv()
assert rx_frame.data == xgmii_frame.data
assert xgmii_sink.empty()
assert serdes_sink.empty()
yield delay(100)
yield clk.posedge
print("test 2: test TX packet")
current_test.next = 2
test_frame = bytearray(range(128))
xgmii_frame = xgmii_ep.XGMIIFrame(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame)
serdes_source.send(xgmii_frame)
yield xgmii_sink.wait()
rx_frame = xgmii_sink.recv()
assert rx_frame.data == xgmii_frame.data
assert xgmii_sink.empty()
assert serdes_sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
from enum import Enum
class SeriesStatus(Enum):
Started = 1
Finished = 2
Planned = 3
class UserStatus(Enum):
Watching = 1
Completed = 2
Onhold = 3
Dropped = 4
Planned = 6
|
# 建立一个列表用以保存用户的字典名片
card_list = []
def show_menu():
print("*" * 50)
print("1.新建名片")
print("2.显示全部")
print("3.查询名片")
print("0.退出系统")
print("*" * 50)
def new_card():
print("您正在使用功能[1]--新建名片")
# 1.提示用户输入要添加的用户信息
name = input("请输入要添加的用户姓名:")
age = input("请输入要添加的用户年龄:")
phone = input("请输入要添加的用户电话:")
addr = input("请输入要添加的用户住址:")
# 2.使用用户添加的信息建立名片字典
card_dict = {"name": name,
"age": age,
"phone": phone,
"addr": addr}
# 3.将建立的名片字典添加到列表中
card_list.append(card_dict)
# 4.提示用户添加成功
print("添加 %s 的名片成功!" % name)
def display_all():
print("您正在使用功能[2]--显示全部")
# 判断是否为空的列表,若为空,提示用户并返回
if len(card_list) == 0:
print("现在无任何用户名片,请先添加数据!")
# return关键字返回一个函数的执行结果,下方的代码不会被执行
return
# 输出表头
for headers in ["姓名", "年龄", "电话", "地址"]:
print(headers, end=" \t\t")
print("")
# 遍历输出用户名片信息
for card_dict in card_list:
print("%s \t\t%s \t\t%s \t\t%s \t\t" % (card_dict["name"],
card_dict["age"],
card_dict["phone"],
card_dict["addr"]))
def search_card():
print("您正在使用功能[3]--查询名片")
# 1.提示用户要查找的姓名
search_name = input("请输入要查找的用户名:")
# 2.遍历名片列表,查找要搜索的名片字典,若没有找到提示用户
for card_dict in card_list:
if search_name == card_dict["name"]:
print("用户信息已找到")
print("姓名 \t\t年龄 \t\t电话 \t\t地址 \t\t")
print("-" * 50)
print("%s \t\t%s \t\t%s \t\t%s \t\t" % (card_dict["name"],
card_dict["age"],
card_dict["phone"],
card_dict["addr"]))
# 针对找到的名片信息执行修改删除操作
handle_card(card_dict)
break
else:
print("未找到%s相关信息,请重新输入" % search_name)
search_card()
def handle_card(search_dict):
"""处理查找到的名片
:param search_dict:查找到的名片字典
"""
action_str = input("请输入要执行的操作:"
"[1]修改[2]删除[0]返回主菜单")
if action_str == "1":
search_dict["name"] = input_info(search_dict["name"], "姓名:")
search_dict["age"] = input_info(search_dict["age"], "年龄:")
search_dict["phone"] = input_info(search_dict["phone"], "电话:")
search_dict["addr"] = input_info(search_dict["addr"], "地址:")
elif action_str == "2":
card_list.remove(search_dict)
print("已删除名片信息!")
def input_info(dict_value, tip_message):
"""输入名片信息
:param dict_value:字典中原有的值
:param tip_message:输入提示
:return:若用户输入了内容,就返回内容,反之,返回字典中原有的值
"""
# 1.提示用户输入内容
result_str = input(tip_message)
# 2.针对用户输入的内容进行判断,若有输入内容,则返回输入结果
if len(result_str) > 0:
return result_str
# 3.若无输入,则返回原名片信息
else:
return dict_value
|
from batch_simulator import run_agents_in_environment
#######################################
# Two options about what to do with the agent's log messages in absence
# of a GUI pane
def log_to_console(msg):
print(msg)
def log_null(msg):
pass
########################################
dirt_density = 0.1
wall_density = 0.3
num_samples = 100
output_file_name = 'simulation_results.csv'
agents = [ ("agents/reactiveagent.py", "NoSenseAgent"),
("agents/reactiveagent.py", "SenseDirtAgent"),
]
write_results_to_console = True
run_agents_in_environment(dirt_density,
wall_density,
agents,
log_null,
num_samples,
output_file_name,
write_results_to_console)
|
#!/usr/bin/env python3
"""
Alexander Hay
ME449
Assignment 3
"""
import numpy as np
import modern_robotics as mr
# Given: Mlist, Glist, Slist
M01 = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0.089159], [0, 0, 0, 1]]
M12 = [[0, 0, 1, 0.28], [0, 1, 0, 0.13585], [-1, 0, 0, 0], [0, 0, 0, 1]]
M23 = [[1, 0, 0, 0], [0, 1, 0, -0.1197], [0, 0, 1, 0.395], [0, 0, 0, 1]]
M34 = [[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0.14225], [0, 0, 0, 1]]
M45 = [[1, 0, 0, 0], [0, 1, 0, 0.093], [0, 0, 1, 0], [0, 0, 0, 1]]
M56 = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0.09465], [0, 0, 0, 1]]
M67 = [[1, 0, 0, 0], [0, 0, 1, 0.0823], [0, -1, 0, 0], [0, 0, 0, 1]]
Mlist = [M01, M12, M23, M34, M45, M56, M67]
G1 = np.diag([0.010267495893, 0.010267495893, 0.00666, 3.7, 3.7, 3.7])
G2 = np.diag([0.22689067591, 0.22689067591, 0.0151074, 8.393, 8.393, 8.393])
G3 = np.diag([0.049443313556, 0.049443313556, 0.004095, 2.275, 2.275, 2.275])
G4 = np.diag([0.111172755531, 0.111172755531, 0.21942, 1.219, 1.219, 1.219])
G5 = np.diag([0.111172755531, 0.111172755531, 0.21942, 1.219, 1.219, 1.219])
G6 = np.diag([0.0171364731454, 0.0171364731454, 0.033822, 0.1879, 0.1879, 0.1879])
Glist = [G1, G2, G3, G4, G5, G6]
Slist = [[0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, -1, 0],
[0, -0.089159, -0.089159, -0.089159, -0.10915, 0.005491],
[0, 0, 0, 0, 0.81725, 0],
[0, 0, 0.425, 0.81725, 0, 0.81725]]
# Gravity
g = [0, 0, -9.81]
# Time stuff
# t_end = 3 # seconds (variable not used)
dt = 1/100 # 100 steps per second
# No forces, no torques (free fall)
Ftip = [0,0,0,0,0,0]
taulist = [0,0,0,0,0,0]
# Home configuration at rest
thetalist = [0, 0, 0, 0, 0, 0]
dthetalist = [0, 0, 0, 0, 0, 0]
# Forward dynamics equation: M(th)ddth = tau - c(th,dth) - g(th) - J.T(th)*Ftip
ddthetalist = mr.ForwardDynamics(thetalist,dthetalist,taulist,g,Ftip,Mlist,Glist,Slist)
path = np.zeros([300,6]) # 3 seconds x 100 steps/sec, 6 joint angles
print("Part 1 Running...")
for i in range(path.shape[0]):
"""
* mr.EulerStep returns the next vector of joint angles at t+delta_t
* redefine thetalist and dthetalist with updated values from EulerStep
* recalculate ddthetalist using Forward Dynamics (as above)
* save new angles to history array
* save it all to csv
"""
[thetalistNext, dthetalistNext] = mr.EulerStep(thetalist, dthetalist, ddthetalist, dt)
thetalist = thetalistNext
dthetalist = dthetalistNext
ddthetalist = mr.ForwardDynamics(thetalist,dthetalist,taulist,g,Ftip,Mlist,Glist,Slist)
path[i] = thetalist
if i%10==0:
print("Iter: " + str(i) + "/" + str(path.shape[0]))
np.savetxt("path.csv",path,delimiter=",")
print("Done")
print(".csv file saved as path.csv")
print("************************")
# Second configuration
thetalist = [-1, -1, 0, 0, 0, 0]
dthetalist = [0, 0, 0, 0, 0, 0]
ddthetalist = mr.ForwardDynamics(thetalist,dthetalist,taulist,g,Ftip,Mlist,Glist,Slist)
path_2 = np.zeros([500,6]) # 5 seconds x 100 steps/sec, 6 joint angles
print()
print("Part 2 Running...")
for i in range(path_2.shape[0]):
[thetalistNext, dthetalistNext] = mr.EulerStep(thetalist, dthetalist, ddthetalist, dt)
thetalist = thetalistNext
dthetalist = dthetalistNext
ddthetalist = mr.ForwardDynamics(thetalist,dthetalist,taulist,g,Ftip,Mlist,Glist,Slist)
path_2[i] = thetalist
if i%10==0:
print("Iter: " + str(i) + "/" + str(path_2.shape[0]))
np.savetxt("path_2.csv",path_2,delimiter=",")
print("Done")
print(".csv file saved as path_2.csv") |
import abc
import os
from abc import ABC
from typing import List, NamedTuple
import cpath
from cpath import output_path
from misc_lib import path_join
def normalize255(v, max):
if max==0:
return 0
return v/max * 255
def normalize100(v, max):
if max == 0:
return 0
return v/max * 100
def get_color(r):
r = 255 - int(r)
bg_color = ("%02x" % r) + ("%02x" % r) + "ff"
return bg_color
class HtmlCellStr(NamedTuple):
s: str
class Cell:
def __init__(self, s, highlight_score=0, space_left=True, space_right=True, target_color="B", is_head=False):
if type(s) == float:
self.s = "{:02.2f}".format(s)
else:
self.s = str(s)
# score should be normalized to 0~255 scale, or else floor
if highlight_score > 255:
highlight_score = 255
elif highlight_score < 0:
highlight_score = 0
self.highlight_score = highlight_score
self.space_left = space_left
self.space_right = space_right
self.target_color = target_color
self.is_head = is_head
def get_tooltip_cell(s, tooltip):
return Cell(get_tooltip_span(s, tooltip))
def set_cells_color(cells, color):
for c in cells:
c.target_color = color
def get_tooltip_span(span_text, tooltip_text):
tag = "<span class=\"tooltip\">{}\
<span class=\"tooltiptext\">{}</span>\
</span>".format(span_text, tooltip_text)
return tag
class VisualizerCommon(ABC):
@abc.abstractmethod
def write_table(self, rows):
pass
def multirow_print(self, cells, width=20):
i = 0
while i < len(cells):
self.write_table([cells[i:i+width]])
i += width
def multirow_print_from_cells_list(self, cells_list, width=20):
i = 0
cells = cells_list[0]
while i < len(cells):
rows = []
for row_idx, _ in enumerate(cells_list):
row = cells_list[row_idx][i:i+width]
rows.append(row)
self.write_table(rows)
i += width
def get_tooltip_style_text():
return open(os.path.join(cpath.data_path, "html", "tooltip")).read()
def get_collapsible_css():
return open(os.path.join(cpath.src_path, "html", "collapsible.css")).read()
def get_scroll_css():
return open(os.path.join(cpath.src_path, "html", "scroll.css")).read()
def get_collapsible_script():
return open(os.path.join(cpath.src_path, "html", "collapsible.js")).read()
def get_link_highlight_code():
return """a {
color: blue !important;
text-decoration: none !important;
}
a:link, a:visited {
color: purple !important;
}
a:hover {
color: red !important;
}"""
def get_bootstrap_include_source():
s = """<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css">
<!-- jQuery library -->
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<!-- Latest compiled JavaScript -->
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/js/bootstrap.min.js"></script>
"""
return s
class HtmlVisualizer(VisualizerCommon):
def __init__(self, filename, dark_mode=False, use_tooltip=False,
additional_styles=[],
script_include=[],
):
if os.path.basename(filename) == filename:
save_path = path_join(output_path, "visualize", filename)
else:
save_path = filename
self.f_html = open(save_path, "w", encoding="utf-8")
self.dark_mode = dark_mode
self.dark_foreground = "A9B7C6"
self.dark_background = "2B2B2B"
self.use_tooltip = use_tooltip
additional_styles.append("th { text-align: center;} \n")
self._write_header(additional_styles, script_include)
def _write_header(self, additional_styles, script_include):
self.f_html.write("<!DOCTYPE html>\n")
self.f_html.write("<html><head>\n")
self.f_html.write("<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"/>")
style_dark = "body{color:#" + self.dark_foreground + ";}"
if self.dark_mode:
additional_styles.append(style_dark)
if self.use_tooltip:
tooltip_style = get_tooltip_style_text()
additional_styles.append(tooltip_style)
for script_text in script_include:
self.f_html.write(script_text)
self.f_html.write("<style>")
for style in additional_styles:
self.f_html.write(style)
self.f_html.write("</style>")
self.f_html.write("</head>\n")
if self.dark_mode:
self.f_html.write("<body style=\"background-color:#{};\"".format(self.dark_background))
else:
self.f_html.write("<body>\n")
def write_script(self, script):
self.f_html.write("<script>")
self.f_html.write(script)
self.f_html.write("</script>")
def close(self):
self.f_html.write("</body>\n")
self.f_html.write("</html>\n")
def write_paragraph(self, s):
self.f_html.write("<p>\n")
self.f_html.write(s+"\n")
self.f_html.write("</p>\n")
def write_bar(self, ):
self.f_html.write("<hr>\n")
def write_div(self, s, div_class):
self.write_elem("div", s, div_class)
def write_elem(self, elem, s, elem_class, style=""):
if elem_class:
optional_class = " class=" + elem_class
else:
optional_class = ""
if style:
style_text = " style=\"{}\"".format(style)
else:
style_text = ""
self.f_html.write("<{}{}{}>\n".format(elem, optional_class, style_text))
self.f_html.write(s + "\n")
self.f_html.write("</{}>\n".format(elem))
def write_div_open(self, div_class=""):
if div_class:
optional_class = " class=" + div_class
else:
optional_class = ""
self.f_html.write("<div{}>\n".format(optional_class))
def write_div_close(self):
self.f_html.write("</div>\n")
def write_headline(self, s, level=4):
self.f_html.write("<h{}>{}</h{}>\n".format(level, s, level))
def write_table(self, rows, head=None):
self.f_html.write("<table style=\"border-spacing: 0px;\" class=\"table-bordered\">\n")
self._write_table_inner(head, rows)
self.f_html.write("</table>\n")
def _write_table_inner(self, head, rows):
if head is not None:
self.f_html.write("<thead>")
for column in head:
if type(column) == HtmlCellStr:
self.f_html.write(column.s)
elif type(column) == str:
self.f_html.write(get_table_head_cell(column).s)
elif type(column) == Cell:
self.f_html.write(self.get_cell_html(column))
else:
raise TypeError
self.f_html.write("</thead>")
self.f_html.write("<tbody>")
for row in rows:
self.f_html.write("<tr>\n")
for cell in row:
s = self.get_cell_html(cell)
self.f_html.write(s)
self.f_html.write("</tr>\n")
self.f_html.write("</tbody>")
def write_table_with_class(self, rows: List[List[Cell]], class_str, head=None):
self.f_html.write("<table class=\"{}\">\n".format(class_str))
self._write_table_inner(head, rows)
self.f_html.write("</table>\n")
def get_cell_html(self, cell: Cell):
left = " " if cell.space_left else ""
right = " " if cell.space_right else ""
td_th = "th" if cell.is_head else "td"
no_padding = "style=\"padding-right:0px; padding-left:0px\""
if cell.highlight_score:
if not self.dark_mode:
bg_color = self.get_color(cell.highlight_score, cell.target_color)
else:
bg_color = self.get_blue_d(cell.highlight_score)
s = f"<{td_th} bgcolor=\"#{bg_color}\" {no_padding}>{left}{cell.s}{right}</{td_th}>"
else:
s = f"<{td_th} {no_padding}>{left}{cell.s}{right}</{td_th}>"
return s
def get_color(self, score, color):
r = 255 - int(score)
if color == "B":
bg_color = ("%02x" % r) + ("%02x" % r) + "ff"
elif color == "R":
bg_color = "ff" + ("%02x" % r) + ("%02x" % r)
elif color == "G":
bg_color = ("%02x" % r) + "ff" + ("%02x" % r)
elif color == "Y":
bg_color = "ffff" + ("%02x" % r)
elif color == "Gray":
bg_color = ("%02x" % r) + ("%02x" % r) + ("%02x" % r)
else:
try:
bytes.fromhex(color)
bg_color = color
except ValueError:
print("Color {} is not expected".format(color))
raise
return bg_color
def get_blue_d(self, r):
r = (0xFF - 0x2B) * r / 255
r = 0x2B + int(r)
bg_color = "2B2B" + ("%02x" % r)
return bg_color
def write_span_line(self, span_and_tooltip_list):
if not self.use_tooltip:
print("WARNING toolip is not activated")
self.f_html.write("<div>")
for span_text, tooltip_text in span_and_tooltip_list:
self.f_html.write(get_tooltip_span(span_text, tooltip_text))
self.f_html.write("</div>")
self.f_html.write("<br>")
def normalize(scores):
max_score = max(scores)
min_score = min(scores)
gap = max_score - min_score
if gap < 0.001:
gap = 1
return [(s - min_score) / gap * 100 for s in scores]
def get_table_head_cell(s, width=0) -> HtmlCellStr:
if width:
return HtmlCellStr("<th style=\"width:{}%\">{}</th>".format(width, s))
else:
return HtmlCellStr("<th>{}</th>".format(s))
def apply_html_highlight(indices, tokens) -> str:
s_out = ""
inside_mark = False
mark_begin = "<mark>"
mark_end = "</mark>"
for idx, t in enumerate(tokens):
if idx in indices:
if inside_mark:
s_out += " " + t
else:
s_out += " " + mark_begin + t
inside_mark = True
else:
if inside_mark:
s_out += mark_end + " " + t
inside_mark = False
else:
s_out += " " + t
return s_out
|
n, mark = input().split(" ")
marks = [int(x) for x in input().split(" ")]
for i in range(len(marks)):
if marks[i] == int(mark):
print(i)
break
else:
print(-1)
|
import sys
import click
from commands.report import report
@click.group()
def cli():
pass
cli.add_command(report)
if __name__ == "__main__":
try:
cli()
except Exception as exc:
print(exc)
sys.exit(1) |
# -*- coding: utf-8 -*-
"""
Created on Tue May 11 21:52:34 2021
@author: Sarat
"""
import rioxarray as rio
import numpy as np
import datetime as dt
import xarray as xr
import pandas as pd
import geopandas as gpd
from shapely.geometry import mapping
import cartopy.crs as ccrs
from cartopy.io.shapereader import Reader
import statsmodels.api as sm
#################################
rf=xr.open_dataset('D:/IMD_data/rain_1951_2020.nc')
######################################################
fname = 'sub_basins_shp_1to9/sub1.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s1=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
##################################################
fname = 'sub_basins_shp_1to9/sub2.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s2=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
###############################################################
fname = 'sub_basins_shp_1to9/sub3.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s3=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
##############################################
fname = 'sub_basins_shp_1to9/sub4.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s4=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
####################################################
fname = 'sub_basins_shp_1to9/sub5.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s5=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
########################################################
fname = 'sub_basins_shp_1to9/sub6.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s6=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
#####################################################
fname = 'D:/Walter/Geospatial_DB/sub7.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s7=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
######################################################
fname = 'sub_basins_shp_1to9/sub8.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s8=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
######################################################
fname = 'sub_basins_shp_1to9/sub9.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s9=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
###################################################
fname = 'sub_basins_shp_10to15/sub10.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s10=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
#######################################################
fname = 'sub_basins_shp_10to15/sub11.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s11=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
########################################################
fname = 'sub_basins_shp_10to15/sub12.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s12=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
########################################################
fname ='sub_basins_shp_10to15/sub13.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s13=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
#########################################################
fname ='sub_basins_shp_10to15/sub14.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s14=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
############################################################
fname = 'sub_basins_shp_10to15/sub15.shp'
ind_shape = gpd.read_file(fname, crs='epsg:4326')
rf.rio.set_spatial_dims(x_dim='lon',y_dim='lat', inplace=True)
rf.rio.write_crs("epsg:4326", inplace=True)
rf_s15=rf.rio.clip(ind_shape.geometry.apply(mapping), ind_shape.crs,
drop=False)
############################################################
########################################################
##########################################################
rf_s1_avg = rf_s1.rain.mean('lat').mean('lon')
rf_s2_avg = rf_s2.rain.mean('lat').mean('lon')
rf_s3_avg = rf_s3.rain.mean('lat').mean('lon')
rf_s4_avg = rf_s4.rain.mean('lat').mean('lon')
rf_s5_avg = rf_s5.rain.mean('lat').mean('lon')
rf_s6_avg = rf_s6.rain.mean('lat').mean('lon')
rf_s7_avg = rf_s7.rain.mean('lat').mean('lon')
rf_s8_avg = rf_s8.rain.mean('lat').mean('lon')
rf_s9_avg = rf_s9.rain.mean('lat').mean('lon')
rf_s10_avg = rf_s10.rain.mean('lat').mean('lon')
rf_s11_avg = rf_s11.rain.mean('lat').mean('lon')
rf_s12_avg = rf_s12.rain.mean('lat').mean('lon')
rf_s13_avg = rf_s13.rain.mean('lat').mean('lon')
rf_s14_avg = rf_s14.rain.mean('lat').mean('lon')
rf_s15_avg = rf_s15.rain.mean('lat').mean('lon')
##########################################################
rf_s1_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub1.csv')
rf_s2_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub2.csv')
rf_s3_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub3.csv')
rf_s4_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub4.csv')
rf_s5_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub5.csv')
rf_s6_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub6.csv')
rf_s7_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub7.csv')
rf_s8_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub8.csv')
rf_s9_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub9.csv')
rf_s10_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub10.csv')
rf_s11_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub11.csv')
rf_s12_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub12.csv')
rf_s13_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub13.csv')
rf_s14_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub14.csv')
rf_s15_avg.to_pandas().to_csv('Sub_Basins_Rainfall/sub15.csv')
#########################################################
|
# This models the data into an object
class PostCode:
def __init__(self, pc_json): # Feed in a json of the API with request
self.status = pc_json['status']
self.result = pc_json['result']
self.postcode = self.result['postcode']
self.quality = self.result['quality']
self.eastings = self.result['eastings']
self.northings = self.result['northings']
self.country = self.result['country']
self.nhs_ha = self.result['nhs_ha']
self.admin_county = self.result['admin_county']
self.admin_ward = self.result['admin_ward']
self.longitude = self.result['longitude']
self.latitude = self.result['latitude']
self.parliamentary_constituency = self.result['parliamentary_constituency']
self.european_electoral_region = self.result['european_electoral_region']
self.codes_nuts = self.result['codes']['nuts']
|
import numpy as np
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
import folium
import plotly.graph_objs as go
import plotly.io as pio
from dash.dependencies import Output, Input
from joblib import load
###################################################################
# clean csv
df = pd.read_csv('final.csv', index_col=0)
# change int column to Int64 to handle missing value
for i in ['CIVIC_NUMBER', 'bathroom', 'bedroom']:
df[i] = df[i].astype('Int64')
# drop rows with null price, area and built_year, then clean data
df = df.dropna(subset=['total_value', 'prev_value', 'area'])
df['total_value'] = df['total_value'].apply(lambda x: int(x.replace('$', '').replace(',', '')))
df['prev_value'] = df['prev_value'].apply(lambda x: int(x.replace('$', '').replace(',', '')))
df['garage'] = df['garage'].apply(lambda x: 1 if not pd.isnull(x) else 0)
df['carport'] = df['carport'].apply(lambda x: 1 if not pd.isnull(x) else 0)
df['built_year'] = df['built_year'].apply(lambda x: int(x) if x != ' ' else np.nan)
df['bedroom'] = df['bedroom'].fillna(0).astype(int)
df['bathroom'] = df['bathroom'].fillna(0).astype(int)
def get_area(str_area):
str_lst = str_area.split()
if 'x' in str_lst:
x_index = str_lst.index('x')
return float(str_lst[x_index - 1]) * float(str_lst[x_index + 1])
if 'Sq' in str_lst:
sq_i = str_lst.index('Sq')
return float(str_lst[sq_i - 1])
df['area'] = df['area'].apply(lambda x: get_area(x))
df['unit_price'] = df['total_value'] / df['area']
df['price_change'] = df['total_value'] - df['prev_value']
df['change_rate'] = df['price_change'] / df['prev_value']
df.to_csv('cleaned.csv', index=False)
# correlation between all parameters
corr_df = df.corr()
###############################################################################################
# Visualization
df = pd.read_csv('cleaned.csv')
df = df.dropna(subset=['area', 'CIVIC_NUMBER', 'actual_address'])
region_df = df.groupby(['Geo Local Area'])['unit_price', 'change_rate', 'total_value'].mean()
region_df['district'] = region_df.index
bedroom_df = df.groupby(['bedroom']).agg({'total_value':['mean', 'count'], 'unit_price':'mean', 'change_rate':'mean'})
bathroom_df = df.groupby(['bathroom']).agg({'total_value':['mean', 'count'], 'unit_price':'mean', 'change_rate':'mean'})
garage_df = df[df['bedroom']!=0].groupby(['garage']).agg({'total_value':['mean', 'count'], 'unit_price':'mean', 'change_rate':'mean'})
# binning year value
bins = [1880, 1890, 1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010, 2020]
labels = [1880, 1890, 1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
df['binned_year'] = pd.cut(df['built_year'], bins=bins, labels=labels)
year_df = df.groupby(['binned_year'])[['total_value', 'unit_price', 'change_rate']].mean()
# binning area value
bins = [0, 500, 1000, 2000, 3000, 4000, 5000, 10000, 4000000]
labels = [0, 500, 1000, 2000, 3000, 4000, 5000, 10000]
df['binned_area'] = pd.cut(df['area'], bins=bins, labels=labels)
area_df = df.groupby(['binned_year'])[['total_value', 'unit_price', 'change_rate']].mean()
pio.renderers.default = 'browser'
# create graph for price change rate based on region
change_rate, change_rate_region = (list(t) for t in zip(*sorted(zip(region_df['change_rate'], region_df.index))))
price_change_fig = go.Figure(data=[
go.Bar(name='avg price change rate', x=change_rate, y=change_rate_region, orientation='h')
])
price_change_fig.update_layout(title_text='Price change rate')
# create graph for unit price based on region
unit_price, unit_price_region = (list(t) for t in zip(*sorted(zip(region_df['unit_price'], region_df.index))))
unit_price_fig = go.Figure(data=[
go.Bar(name='avg unit price', x=unit_price, y=unit_price_region, orientation='h')
])
unit_price_fig.update_layout(title_text='Value per Square Feet')
# use folium to create map graph
region_df['log_unit_price'] = np.log(region_df['unit_price'])
m = folium.Map(location=[49.258, -123.15], zoom_start=13)
folium.Choropleth(
geo_data='./vancouver.json',
name='choropleth',
data=region_df,
columns=['district', 'log_unit_price'],
key_on='feature.id',
fill_color='Oranges',
fill_opacity=0.9,
line_opacity=0.2,
legend_name='unit price: dollar per Sq Ft (value is log'
).add_to(m)
folium.LayerControl().add_to(m)
m.save('region_unit_price.html')
#################################################################################################
#Build dashboard
external_stylesheets = ['https://cdn.rawgit.com/plotly/dash-app-stylesheets/2d266c578d2a6e8850ebce48fdb52759b2aef506/stylesheet-oil-and-gas.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
colors = {
'background': '#F0FFFF',
'text': '#000000'
# 'text': '#7FDBFF'
}
app.layout = html.Div(style={'backgroundColor': colors['background']}, children=[
html.H1(children='Vancouver house price',
style={
'textAlign': 'center',
'color': colors['text'],
'padding': '50px, 50px'
}
),
# region based graph
html.Div(
html.Div([
html.Div([
dcc.Graph(style={'height': '700px'}, id='region-price-change', figure=price_change_fig)
], className='six columns'),
html.Div([
dcc.Graph(style={'height': '700px'}, id='region-unit-price', figure=unit_price_fig)
], className='six columns')
], className='row'),
style={'padding': '50px, 50px', 'height': '1000', 'width': '80%', 'margin': 'auto'}
),
html.Iframe(id='map', srcDoc=open('region_unit_price.html', 'r').read(), width='80%', height='800',
style={'display': 'block', 'margin-left': 'auto','margin-right': 'auto',
'padding':'25px 50px', 'border-style':'none'}),
# drop down menu for region selection
html.Div([
html.Label('Select Regions', style={'color': colors['text'], 'margin': 'auto'}),
dcc.Dropdown(
id= 'graph-dropdown',
options=[
{'label': 'Kensington-Cedar Cottage', 'value': 'Kensington-Cedar Cottage'},
{'label': 'Renfrew-Collingwood', 'value': 'Renfrew-Collingwood'},
{'label': 'Sunset', 'value': 'Sunset'},
{'label': 'Hastings-Sunrise', 'value': 'Hastings-Sunrise'},
{'label': 'Dunbar-Southlands', 'value': 'Dunbar-Southlands'},
{'label': 'Victoria-Fraserview', 'value': 'Victoria-Fraserview'},
{'label': 'Riley Park', 'value': 'Riley Park'},
{'label': 'Marpole', 'value': 'Marpole'},
{'label': 'Killarney', 'value': 'Killarney'},
{'label': 'Kerrisdale', 'value': 'Kerrisdale'},
{'label': 'Kitsilano', 'value': 'Kitsilano'},
{'label': 'Grandview-Woodland', 'value': 'Grandview-Woodland'},
{'label': 'Arbutus-Ridge', 'value': 'Arbutus-Ridge'},
{'label': 'Mount Pleasant', 'value': 'Mount Pleasant'},
{'label': 'Shaughnessy', 'value': 'Shaughnessy'},
{'label': 'Oakridge', 'value': 'Oakridge'},
{'label': 'West Point Grey', 'value': 'West Point Grey'},
{'label': 'Fairview', 'value': 'Fairview'},
{'label': 'Strathcona', 'value': 'Strathcona'},
{'label': 'South Cambie', 'value': 'South Cambie'},
{'label': 'Downtown', 'value': 'Downtown'},
{'label': 'West End', 'value': 'West End'}
],
value=['Kensington-Cedar Cottage', 'Renfrew-Collingwood', 'Sunset',
'Hastings-Sunrise', 'Dunbar-Southlands', 'Victoria-Fraserview',
'Riley Park', 'Marpole', 'Killarney', 'Kerrisdale', 'Kitsilano',
'Grandview-Woodland', 'Arbutus-Ridge', 'Mount Pleasant', 'Shaughnessy',
'Oakridge', 'West Point Grey', 'Fairview', 'Strathcona', 'South Cambie',
'Downtown', 'West End'],
multi=True,
style={'backgroundColor': colors['background'], 'margin': 'auto' }
)
], style={'backgroundColor': colors['background'], 'margin': 'auto', 'width': '80%'}
),
# line chart based on rooms
html.Div(
html.Div([
html.Div([
dcc.Graph(id='total value based on room', figure={})
], className='four columns'),
html.Div([
dcc.Graph(id='unit price based on room', figure={})
], className='four columns'),
html.Div([
dcc.Graph(id='change rate based on room', figure={})
], className='four columns')
], className='row'),
style={'padding': '50px, 50px', 'height': '30%', 'width': '80%', 'margin': 'auto'}
),
# line chart based on year
html.Div(
html.Div([
html.Div([
dcc.Graph(id='total value based on year', figure={})
], className='four columns'),
html.Div([
dcc.Graph(id='unit price based on year', figure={})
], className='four columns'),
html.Div([
dcc.Graph(id='change rate based on year', figure={})
], className='four columns')
], className='row'),
style={'padding': '50px, 50px', 'height': '30%', 'width': '80%', 'margin': 'auto'}
),
# line chart based on house area
html.Div(
html.Div([
html.Div([
dcc.Graph(id='total value based on area', figure={})
], className='four columns'),
html.Div([
dcc.Graph(id='unit price based on area', figure={})
], className='four columns'),
html.Div([
dcc.Graph(id='change rate based on area', figure={})
], className='four columns')
], className='row'),
style={'padding': '50px, 50px', 'height': '30%', 'width': '80%', 'margin': 'auto'}
),
# apply machine learning model
html.Div([
html.Header('House price prediction', style={'color': colors['text'], 'margin': 'auto'}),
html.Div([
html.Div([
dcc.Dropdown(
id='prediction region',
options=[
{'label': 'Kensington-Cedar Cottage', 'value': 'Kensington-Cedar Cottage'},
{'label': 'Renfrew-Collingwood', 'value': 'Renfrew-Collingwood'},
{'label': 'Sunset', 'value': 'Sunset'},
{'label': 'Hastings-Sunrise', 'value': 'Hastings-Sunrise'},
{'label': 'Dunbar-Southlands', 'value': 'Dunbar-Southlands'},
{'label': 'Victoria-Fraserview', 'value': 'Victoria-Fraserview'},
{'label': 'Riley Park', 'value': 'Riley Park'},
{'label': 'Marpole', 'value': 'Marpole'},
{'label': 'Killarney', 'value': 'Killarney'},
{'label': 'Kerrisdale', 'value': 'Kerrisdale'},
{'label': 'Kitsilano', 'value': 'Kitsilano'},
{'label': 'Grandview-Woodland', 'value': 'Grandview-Woodland'},
{'label': 'Arbutus-Ridge', 'value': 'Arbutus-Ridge'},
{'label': 'Mount Pleasant', 'value': 'Mount Pleasant'},
{'label': 'Shaughnessy', 'value': 'Shaughnessy'},
{'label': 'Oakridge', 'value': 'Oakridge'},
{'label': 'West Point Grey', 'value': 'West Point Grey'},
{'label': 'Fairview', 'value': 'Fairview'},
{'label': 'Strathcona', 'value': 'Strathcona'},
{'label': 'South Cambie', 'value': 'South Cambie'},
{'label': 'Downtown', 'value': 'Downtown'},
{'label': 'West End', 'value': 'West End'}
],
multi=False,
style={'backgroundColor': colors['background'], 'margin': 'auto'}
)
], style={'backgroundColor': colors['background'], 'margin': 'auto'}, className='two columns'),
html.Div(dcc.Input(id='area-text', value='area (1000 for 1000 Sq Ft', type='text'), className='two columns'),
html.Div(dcc.Input(id='bedroom-text', value='# of bedroom', type='text'), className='two columns'),
html.Div(dcc.Input(id='bathroom-text', value='# of bathroom', type='text'), className='two columns'),
html.Div(dcc.Input(id='garage-text', value='garage (1 for yes)', type='text'), className='two columns'),
html.Div(dcc.Input(id='year-text', value='built year', type='text'), className='two columns'),
], className='row'),
html.Div(id='price-prediction', style={'margin':'auto'}),
], style={'width':'80%', 'margin': 'auto'})
])
# line chart based on rooms and regions
@app.callback(
Output('total value based on room', 'figure'),
[Input('graph-dropdown', 'value')])
def update_room_total_value_graph(value):
fig = create_line_graph(value, 'total_value')
return fig
@app.callback(
Output('unit price based on room', 'figure'),
[Input('graph-dropdown', 'value')])
def update_room_unit_price_graph(value):
fig = create_line_graph(value, 'unit_price')
return fig
@app.callback(
Output('change rate based on room', 'figure'),
[Input('graph-dropdown', 'value')])
def update_room_change_rate_graph(value):
fig = create_line_graph(value, 'change_rate')
return fig
def create_line_graph(dist, value):
"""
create plotly figure based on district
:param dist: list of str, regions collected from dropdown
:param value: str, column we want to explore in dataframe ('total_value', 'unit_price', 'change_rate')
:return: plotly figure
"""
if len(dist) != 0:
dff = pd.concat([df[df['Geo Local Area'] == i] for i in dist])
else:
dff = df.copy()
bedroom_df = dff.groupby(['bedroom']).agg(
{'total_value': ['mean', 'count'], 'unit_price': 'mean', 'change_rate': 'mean'})
bathroom_df = dff.groupby(['bathroom']).agg(
{'total_value': ['mean', 'count'], 'unit_price': 'mean', 'change_rate': 'mean'})
fig = go.Figure()
fig.add_trace(
go.Scatter(x=bedroom_df.index, y=bedroom_df[value]['mean'], mode='lines+markers',
name='# of bedrooms'))
fig.add_trace(
go.Scatter(x=bathroom_df.index, y=bathroom_df[value]['mean'], mode='lines+markers',
name='# of bathrooms'))
if value == 'total_value':
fig.update_layout(title_text='Building price based on rooms')
elif value == 'unit_price':
fig.update_layout(title_text='Unit price based on rooms')
elif value == 'change_rate':
fig.update_layout(title_text='Price Change rate based on rooms')
fig.update_layout(legend_orientation="h")
return fig
# year based graph
@app.callback(
Output('total value based on year', 'figure'),
[Input('graph-dropdown', 'value')])
def update_year_total_value_graph(value):
"""
create plotly figure based on regions and year
:param value: list of str, regions collected from dropdown
:return: plotly figure
"""
if len(value) != 0:
dff = pd.concat([df[df['Geo Local Area'] == i] for i in value])
else:
dff = df.copy()
year_df = dff.groupby(['binned_year'])[['total_value', 'unit_price', 'change_rate']].mean()
fig = go.Figure()
fig.add_trace(
go.Scatter(x=year_df.index, y=year_df['total_value'], mode='lines+markers',))
fig.update_layout(title_text='Building price based on built year', xaxis_title="built year")
return fig
@app.callback(
Output('unit price based on year', 'figure'),
[Input('graph-dropdown', 'value')])
def update_year_unit_price_graph(value):
if len(value) != 0:
dff = pd.concat([df[df['Geo Local Area'] == i] for i in value])
else:
dff = df.copy()
year_df = dff.groupby(['binned_year'])[['total_value', 'unit_price', 'change_rate']].mean()
fig = go.Figure()
fig.add_trace(
go.Scatter(x=year_df.index, y=year_df['unit_price'], mode='lines+markers',))
fig.update_layout(title_text='Unit price based on built year', xaxis_title="built year")
return fig
@app.callback(
Output('change rate based on year', 'figure'),
[Input('graph-dropdown', 'value')])
def update_year_change_rate_graph(value):
if len(value) != 0:
dff = pd.concat([df[df['Geo Local Area'] == i] for i in value])
else:
dff = df.copy()
year_df = dff.groupby(['binned_year'])[['total_value', 'unit_price', 'change_rate']].mean()
fig = go.Figure()
fig.add_trace(
go.Scatter(x=year_df.index, y=year_df['change_rate'], mode='lines+markers',))
fig.update_layout(title_text='Price change rate based on built year', xaxis_title="built year")
return fig
# area based graph
@app.callback(
Output('total value based on area', 'figure'),
[Input('graph-dropdown', 'value')])
def update_area_total_value_graph(value):
"""
create plotly figure based on regions and area
:param value: list of str, regions collected from dropdown
:return: plotly figure
"""
if len(value) != 0:
dff = pd.concat([df[df['Geo Local Area'] == i] for i in value])
else:
dff = df.copy()
area_df = dff.groupby(['binned_area'])[['total_value', 'unit_price', 'change_rate']].mean()
fig = go.Figure()
fig.add_trace(
go.Scatter(x=area_df.index, y=area_df['total_value'], mode='lines+markers',))
fig.update_layout(title_text='Building price based on built area')
return fig
@app.callback(
Output('unit price based on area', 'figure'),
[Input('graph-dropdown', 'value')])
def update_area_unit_price_graph(value):
if len(value) != 0:
dff = pd.concat([df[df['Geo Local Area'] == i] for i in value])
else:
dff = df.copy()
area_df = dff.groupby(['binned_area'])[['total_value', 'unit_price', 'change_rate']].mean()
fig = go.Figure()
fig.add_trace(
go.Scatter(x=area_df.index, y=area_df['unit_price'], mode='lines+markers',))
fig.update_layout(title_text='Unit price based on built area')
return fig
@app.callback(
Output('change rate based on area', 'figure'),
[Input('graph-dropdown', 'value')])
def update_area_change_rate_graph(value):
if len(value) != 0:
dff = pd.concat([df[df['Geo Local Area'] == i] for i in value])
else:
dff = df.copy()
area_df = dff.groupby(['binned_area'])[['total_value', 'unit_price', 'change_rate']].mean()
fig = go.Figure()
fig.add_trace(
go.Scatter(x=area_df.index, y=area_df['change_rate'], mode='lines+markers',))
fig.update_layout(title_text='Price change rate based on built area')
return fig
# using linear regression model to predict house price
@app.callback(
Output('price-prediction', 'children'),
[Input('prediction region', 'value'), Input('area-text', 'value'), Input('bedroom-text', 'value'),
Input('bathroom-text', 'value'), Input('garage-text', 'value'), Input('year-text', 'value'),])
def lr(region, area, bedr, bathr, gar, year):
if region is not None:
model = load(f'{region}.joblib')
if area.isdigit() and bathr.isdigit() and bedr.isdigit() and year.isdigit() and gar.isdigit():
input = pd.DataFrame([[int(area), int(bathr), int(bedr), int(year), int(gar)]])
return f'Predicted price: $ {int(model.predict(input)[0])}'
if __name__ == '__main__':
app.run_server(debug=True)
|
import tkinter as tk
window = tk.Tk()
window.title('my window')
window.geometry('400x400')
canvas = tk.Canvas(window, bg = 'yellow', height=200, width=400)
image_file = tk.PhotoImage(file='../ins.gif')
imgae = canvas.create_image(10,10, anchor='nw', image=image_file) #anchor是定起始点, NW,N,WE,W,CENTER,E,SW,S,SE
x0,y0,x1,y1 = 50, 50, 80, 80
line = canvas.create_line(x0,y0,x1+15,y1+15)
oval = canvas.create_oval(x0,y0,x1,y1, fill = 'red')
arc = canvas.create_arc(x0+30,y0+30,x1+30,y1+30,start=30,extent=300)
rect = canvas.create_rectangle(100,30,100+50,30+50)
canvas.pack()
def moveit_up():
canvas.move(rect,0,-10)
def moveit_down():
canvas.move(rect,0,10)
def moveit_left():
canvas.move(rect,-10,0)
def moveit_right():
canvas.move(rect,10,0)
up = tk.Button(window,text='Go up', command = moveit_up).place(bordermode='outside', anchor='center',x=200,y=230,height=50,width=100)
down = tk.Button(window,text='Go down', command = moveit_down).place(anchor='center',x=200,y=300,height=40,width=100)
left = tk.Button(window,text='Go left', command = moveit_left).place(anchor='center',x=150,y=265,height=40,width=100)
right = tk.Button(window,text='Go right', command = moveit_right).place(anchor='center',x=250,y=265,height=40,width=100)
window.mainloop() |
#!/usr/bin/env python2.7
# -*- coding: utf-8; -*-
"""
This script converts Twitter data from corpus in XML format to a tab separated
value format in which the 1-st field is Tweet's id, the 2-nd field is its
creation time, and the 3-rd field is the actual text.
"""
##################################################################
# Libraries
import xml.etree.ElementTree as ET
from alt_argparse import argparser
from alt_fio import AltFileInput, AltFileOutput
##################################################################
# Constants
##################################################################
# Methods
##################################################################
# Arguments
argparser.description = """Utility for converting plain text XML corpus to a tab
separated format."""
argparser.add_argument("source_file",
help="source XML file containing corpus")
args = argparser.parse_args()
srcdoc = ET.parse(args.source_file)
##################################################################
# Main
foutput = AltFileOutput(encoding=args.encoding,
flush=args.flush)
finput = AltFileInput(*args.files,
print_func=foutput.fprint)
for tweet in srcdoc.iter("tweet"):
foutput.fprint('\t'.join([tweet.get("id", "unknown"),
tweet.get("time", "unknown"),
tweet.text]))
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Scipy库中的optimiz可以实现matlab中fminunc的功能,来优化函数计算成本和梯度
import scipy.optimize as opt
#高级处理分类的一个信息report
from sklearn.metrics import classification_report as cr
#=================visualizing data
path = 'E:\lessons\ML wu.n.g\coursera-ml-py-master\coursera-ml-py-master\machine-learning-ex2\ex2\ex2data2.txt'
data2 = pd.read_csv(path, names = ['Test1', 'Test2', 'Accepter'])
#print(data2.head())
def plot_data():
positive = data2[data2.Accepter.isin([1])]
negative = data2[data2.Accepter.isin([0])]
fig ,ax = plt.subplots(figsize = (8,8))
ax.scatter(positive['Test1'], positive['Test2'], s=50, c = 'b', marker = 'o', label = 'good')
ax.scatter(negative['Test1'], negative['Test2'], s=50, c = 'r', marker = 'x', label = 'bad')
ax.legend()
ax.set_xlabel('Test 1 Score')
ax.set_ylabel('Test 2 Score')
ax.set_title('Data2')
plot_data()
#plt.show()
#=============feature mapping
#经过映射后,将只有两个特征的向量转换成为有28个特征的特征向量,故可以产生一个更复杂的边界
def feature_mapping(x1, x2, power):
data={}
#range()函数也不包括最后一个数
for i in range(power+1):
for p in range(i+1):
# format前面是'.' 不是','!!!!!!!!!!!!!!
data["f{}{}".format(i - p, p)] = np.power(x1, i - p) * np.power(x2, p)
return pd.DataFrame(data)
x1 = data2['Test1'].as_matrix()
x2 = data2['Test2'].as_matrix()
ndata2 = feature_mapping(x1, x2, power = 6)
#==============Regularized CostFunction
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def cost(theta , X, Y):
first = (-Y) * np.log(sigmoid(X @ theta))
second = (1 - Y) * np.log(1 - sigmoid(X @ theta))
return np.mean(first - second)
#X中已经包含1向量,故无需添加
X = ndata2.as_matrix()
Y = data2['Accepter'].as_matrix()
theta = np.zeros(X.shape[1]).reshape(-1)
#print(X.shape,Y.shape,theta.shape)
def costReg(theta, X, Y, l=1):
ntheta = theta[1:, ]
#补偿项
reg = (l / (2*len(X))) * (ntheta @ ntheta.T)
return cost(theta, X, Y) + reg
print(costReg(theta, X, Y, l=1))
#==============Regularized Gradient
def gradient(theta, x, y):
return (x.T @ (sigmoid(x @ theta) -y)) / len(y)
def gradientReg(theta, x, y, l=1):
reg = (1 / len(y)) * theta
reg[0] = 0
return gradient(theta, x, y) + reg
#gradientReg(theta, X, Y, l=1)
#print(gradientReg(theta, X, Y))
#============Learning Parameters(theta)
'''
result2 = opt.fmin_tnc(func = costReg, x0 = theta, fprime = gradientReg, args=(X, Y, 2))
print(result2)
'''
#print(theta, theta.shape)
#print(X.shape, X, Y.shape, Y)
#逆矩阵操作
B = np.linalg.inv(X.T @ X)
final_theta = B @ X.T @ Y
print(final_theta, final_theta.shape)
'''高级库使用发放
from sklearn import linear_model#调用sklearn的线性回归包
model = linear_model.LogisticRegression(penalty='l2', C=1.0)
model.fit(X, y.ravel())
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,
penalty='l2', random_state=None, solver='liblinear', tol=0.0001,
verbose=0, warm_start=False)
model.score(X, y) # 0.8305084745762712
'''
#============Evaluating
def predict(theta, X):
probability = sigmoid(X @ theta)
return [1 if x >= 0.5 else 0 for x in probability]
'''
final_theta = result2[0]
'''
predictions = predict(final_theta, X)
correct = [1 if a==b else 0 for (a,b) in zip(predictions, Y)]
#准确度
acc = sum(correct) / len(correct)
print(acc)
#===========Decision Boundary
x = np.linspace(-1,1.5,250)
#生成xx yy坐标 利用meshgrid生成二维数组,在用ravel()变成一维数组
xx, yy = np.meshgrid(x, x)
z = feature_mapping(xx.ravel(), yy.ravel(), 6).as_matrix()
print(z.shape)
z = z @ final_theta
#z一定要和xx,yy保持一样的格式
print(z.shape,final_theta.shape,xx.shape)
z= z.reshape(xx.shape)
plot_data()
# 0 表示只画出一条线 默认画出z=0???
contour = plt.contour(xx, yy, z, 0 )
plt.clabel(contour,fontsize=10,colors=('k'))
plt.ylim(-0.8, 1.2)
plt.show()
|
from collections import defaultdict
with open("day11.txt") as f:
line = f.read()
initial_list = line.split(',')
x = defaultdict(int)
for count, item in enumerate(initial_list):
x[count] = item
i = 0
relative_base = 0
def run_programme(input_colour, i):
global relative_base
output_count = 0
while output_count < 2:
x[i] = str(x[i])
while len(x[i]) < 5:
x[i] = '0' + x[i]
if x[i][3:] == '01':
if x[i][2] == '0':
first_number = int(x[int(x[i+1])])
elif x[i][2] == '1':
first_number = int(x[i+1])
elif x[i][2] == '2':
first_number = int(x[int(x[i+1]) + relative_base])
if x[i][1] == '0':
second_number = int(x[int(x[i+2])])
elif x[i][1] == '1':
second_number = int(x[i+2])
elif x[i][1] == '2':
second_number = int(x[int(x[i+2]) + relative_base])
if x[i][0] == '0':
index_to_replace = int(x[i+3])
elif x[i][0] == '2':
index_to_replace = int(x[i+3]) + relative_base
x[index_to_replace] = first_number + second_number
i += 4
elif x[i][3:] == '02':
if x[i][2] == '0':
first_number = int(x[int(x[i+1])])
elif x[i][2] == '1':
first_number = int(x[i+1])
elif x[i][2] == '2':
first_number = int(x[int(x[i+1]) + relative_base])
if x[i][1] == '0':
second_number = int(x[int(x[i+2])])
elif x[i][1] == '1':
second_number = int(x[i+2])
elif x[i][1] == '2':
second_number = int(x[int(x[i+2]) + relative_base])
if x[i][0] == '0':
index_to_replace = int(x[i+3])
elif x[i][0] == '2':
index_to_replace = int(x[i+3]) + relative_base
x[index_to_replace] = first_number * second_number
i += 4
elif x[i][3:] == '03':
if x[i][2] == '0':
x[int(x[i+1])] = str(input_colour)
elif x[i][2] == '2':
x[int(x[i+1]) + relative_base] = str(input_colour)
i += 2
elif x[i][3:] == '04':
if x[i][2] == '0':
output = int(x[int(x[i+1])])
elif x[i][2] == '1':
output = int(x[i+1])
elif x[i][2] == '2':
output = int(x[int(x[i+1]) + relative_base])
output_count += 1
i += 2
elif x[i][3:] == '05':
if x[i][2] == '0':
check = int(x[int(x[i+1])])
elif x[i][2] == '1':
check = int(x[i+1])
elif x[i][2] == '2':
check = int(x[int(x[i+1]) + relative_base])
if check != 0:
if x[i][1] == '0':
i = int(x[int(x[i+2])])
elif x[i][1] == '1':
i = int(x[i+2])
elif x[i][1] == '2':
i = int(x[int(x[i+2]) + relative_base])
else:
i += 3
elif x[i][3:] == '06':
if x[i][2] == '0':
check = int(x[int(x[i+1])])
elif x[i][2] == '1':
check = int(x[i+1])
elif x[i][2] == '2':
check = int(x[int(x[i+1]) + relative_base])
if check == 0:
if x[i][1] == '0':
i = int(x[int(x[i+2])])
elif x[i][1] == '1':
i = int(x[i+2])
elif x[i][1] == '2':
i = int(x[int(x[i+2]) + relative_base])
else:
i += 3
elif x[i][3:] == '07':
if x[i][2] == '0':
first_number = int(x[int(x[i+1])])
elif x[i][2] == '1':
first_number = int(x[i+1])
elif x[i][2] == '2':
first_number = int(x[int(x[i+1]) + relative_base])
if x[i][1] == '0':
second_number = int(x[int(x[i+2])])
elif x[i][1] == '1':
second_number = int(x[i+2])
elif x[i][1] == '2':
second_number = int(x[int(x[i+2]) + relative_base])
if first_number < second_number:
if x[i][0] == '0':
x[int(x[i+3])] = '1'
elif x[i][0] == '2':
x[int(x[i+3]) + relative_base] = '1'
else:
if x[i][0] == '0':
x[int(x[i+3])] = '0'
elif x[i][0] == '2':
x[int(x[i+3]) + relative_base] = '0'
i += 4
elif x[i][3:] == '08':
if x[i][2] == '0':
first_number = int(x[int(x[i+1])])
elif x[i][2] == '1':
first_number = int(x[i+1])
elif x[i][2] == '2':
first_number = int(x[int(x[i+1]) + relative_base])
if x[i][1] == '0':
second_number = int(x[int(x[i+2])])
elif x[i][1] == '1':
second_number = int(x[i+2])
elif x[i][1] == '2':
second_number = int(x[int(x[i+2]) + relative_base])
if first_number == second_number:
if x[i][0] == '0':
x[int(x[i+3])] = '1'
elif x[i][0] == '2':
x[int(x[i+3]) + relative_base] = '1'
else:
if x[i][0] == '0':
x[int(x[i+3])] = '0'
elif x[i][0] == '2':
x[int(x[i+3]) + relative_base] = '0'
i += 4
elif x[i][3:] == '09':
if x[i][2] == '0':
relative_base += int(x[int(x[i+1])])
elif x[i][2] == '1':
relative_base += int(x[i+1])
elif x[i][2] == '2':
relative_base += int(x[int(x[i+1]) + relative_base])
i += 2
elif x[i][3:] == '99':
return False
if output_count == 1:
colour = output
elif output_count == 2:
direction = output
return colour, direction, i
position = (0, 0)
direction = 0 # 0 is up, 1 is right, 2 is down, 3 is left
tiles_painted = set()
tile_colour = defaultdict(int) # initial colour of all tiles except first one are black, which is 0
tile_colour[position] = 1
state = True
while state:
result = run_programme(tile_colour[position], i)
if result == False:
state = False
else:
colour_painted, direction_turn, i = result
tile_colour[position] = colour_painted
tiles_painted.add(position)
if direction_turn == 0:
direction += 3
elif direction_turn == 1:
direction += 1
if direction % 4 == 0:
position = (position[0], position[1] - 1)
elif direction % 4 == 1:
position = (position[0] + 1, position[1])
elif direction % 4 == 2:
position = (position[0], position[1] + 1)
elif direction % 4 == 3:
position = (position[0] - 1, position[1])
coordinates_list = []
for i in range(45):
for j in range(7):
coordinates_list.append((i, j))
sorted_list = sorted(coordinates_list , key=lambda k: [k[1], k[0]])
for i in sorted_list:
print(tile_colour[i], end='')
|
from BeautifulSoup import BeautifulSoup
f = open("main.1","w")
def parse(html):
soup = BeautifulSoup(html)
for link in soup.findAll('img'):
f.write(link.get('src').encode('utf-8')+"\n")
lines = tuple(open("main_jpgs", 'r'))
for i in lines:
parse(i)
|
## normalize.py
## Yuan Wang
from pandas import DataFrame
import pandas
from sklearn.preprocessing import MinMaxScaler, StandardScaler
__DEBUG__ = True
def normalize(df):
"""
Normalizes all feature columns in a given dataframe. Returns a new dataframe.
"""
data = df.values
normalized = scale_series(data)
# print beginning
if __DEBUG__:
for i in range(5):
print(normalized[i])
# create new df, retain old col names
out = DataFrame(
data = normalized,
index = df.index,
columns = df.columns.values
)
# todo: return scaler as well.
return out
# scaler = MinMaxScaler(feature_range=(-1, 1))
def scale_series(data, scaler=StandardScaler()):
"""
Scales a series using a min-max scaler with a specified feature range
"""
# scaler = MinMaxScaler(feature_range=feature_range)
scaler = scaler.fit(data)
# print stats on scaler
if len(scaler.mean_ > 0):
for i in range(len(scaler.mean_)):
print('Mean: %f, Var: %f' % (scaler.mean_[i], scaler.var_[i]))
normalized = scaler.transform(data)
return normalized
def norm_series(x, scaler=StandardScaler()):
"""
normalize data of df column
"""
data = x.values.reshape(-1, 1) #returns a numpy array
x_scaled = scaler.fit_transform(data)
return x_scaled
|
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class TreeView(QTreeView):
def __init__(self, parent):
QTreeView.__init__(self, parent)
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setSortingEnabled(True)
|
#877-石子游戏
'''
亚历克斯和李用几堆石子在做游戏。偶数堆石子排成一行,每堆都有正整数颗石子 piles[i] 。
游戏以谁手中的石子最多来决出胜负。石子的总数是奇数,所以没有平局。
亚历克斯和李轮流进行,亚历克斯先开始。 每回合,玩家从行的开始或结束处取走整堆石头。
这种情况一直持续到没有更多的石子堆为止,此时手中石子最多的玩家获胜。
假设亚历克斯和李都发挥出最佳水平,当亚历克斯赢得比赛时返回 true ,当李赢得比赛时返回 false 。
'''
#解法一
'''
1.由于是偶数堆,那么最左边就是奇数位置,最后一位就是偶数位置。
2.那么不管先手取最左边还是最右边,先手的人总可以一直选择奇数位置或者偶数位置的石堆,比如说,先选择了
最左边的,那么后者就只能选择偶数位置的。
3.这样先手者就可以决定,选择偶数位置的石堆,或者奇数位置的石堆,又由于石头总数是奇数,那么一定有一个选择
是更多的,先手者就一定能赢。
return True
'''
#解法二: 动态规划
'''
原问题可以拆分成子问题。
令dp[i][j]表示从第i位到第j位,先选择的人最多能比后面选择的人多多少个石头,那么就有递归式:
dp[i][j] = max(A[i]-dp[i+1][j], A[j]-dp[i][j-1])
上面这个式子表示,可以先选第i个石堆,那么后者一定在第i+1个位置到第j个位置中执行最优的选择,也就是dp[i+1][j],
总之上面递归式一定成立。这样先手选第i个石堆,最后得到的石头比对方多A[i]-dp[i+1][j]。从两个选择中选择更优的一种。
通过动态规划求解这个问题,显然要采用一个二维数组(注意i<j,所以实际上是一个上角阵)来记录每
一个dp[i][j]的值:
1.建立一个n*n的二维数组,对角线上依次赋值输入的各个石堆的数目。
2.由递推式我们知道,每个dp[i][j],要么和他左边的值dp[i][j-1]有关,要么和他下边的值dp[i+1][j]有关,
因此我们每次从第一列开始遍历列,对于每一列又从下向上开始遍历行,更新dp[i][j]的值。
3.最终dp[0][n-1]就是题目要求的答案
'''
class Solution(object):
def stoneGame(self, piles):
"""
:type piles: List[int]
:rtype: bool
"""
#return True
if not piles:
return False
n = len(piles)
dp = [[0]*n for _ in range(n)]
for i in range(n):
dp[i][i]=piles[i]
for j in range(1,n):
for i in range(j-1,-1,-1):
dp[i][j] = max(dp[i][i]-dp[i+1][j],dp[j][j]-dp[i][j-1])
return dp[0][n-1]>0
|
#coding=utf-8
array_num = ['one', 'two', 'three', 'four', 'five', 'six']
for i in range(0,6):
print "array_num %d = %s" %(i, array_num[i])
|
try:
from modules.constants import *
except ModuleNotFoundError:
print('Não foi possível carregar algum módulo.')
def player_move(self):
"""
move o player de acordo com as teclas pressionadas /
moves the player as certain keys are pressed
"""
if self.pressing['Up'] or self.pressing['w'] or self.pressing['W']:
move_up(self)
if self.pressing['Down'] or self.pressing['s'] or self.pressing['S']:
move_down(self)
if self.pressing['Left'] or self.pressing['a'] or self.pressing['A']:
move_left(self)
if self.pressing['Right'] or self.pressing['d'] or self.pressing['D']:
move_right(self)
def move_up(self):
"""
move o player para cima /
moves the player up
"""
if self.player.y > 30:
self.canvas.delete(self.player.ship)
self.player.ship = self.canvas.create_image((self.player.x, self.player.y),
image=self.player.image[0])
self.canvas.move(self.player.ship, 0, -self.player.vy)
self.player.y -= self.player.vy
def move_down(self):
"""
move o player para baixo /
moves the player down
"""
if self.player.y < CANVAS_H - 30:
self.canvas.delete(self.player.ship)
self.player.ship = self.canvas.create_image((self.player.x, self.player.y),
image=self.player.image[0])
self.canvas.move(self.player.ship, 0, self.player.vy)
self.player.y += self.player.vy
def move_left(self):
"""
move o player para a esquerda /
moves the player left
"""
if self.player.x > 30:
self.canvas.delete(self.player.ship)
self.player.ship = self.canvas.create_image((self.player.x, self.player.y),
image=self.player.image[-1])
self.canvas.move(self.player.ship, -self.player.vx, 0)
self.player.x -= self.player.vx
self.p_ship = 'left'
if self.pressing['d'] or self.pressing['Right']:
self.p_ship = 'center'
def move_right(self):
"""
move o player para a direita /
moves the player right
"""
if self.player.x < CANVAS_W - 30:
self.canvas.delete(self.player.ship)
self.player.ship = self.canvas.create_image((self.player.x, self.player.y),
image=self.player.image[1])
self.canvas.move(self.player.ship, self.player.vx, 0)
self.player.x += self.player.vx
self.p_ship = 'right'
if self.pressing['a'] or self.pressing['Left']:
self.p_ship = 'center'
def pressed(self, event):
"""
checa se uma tecla de self.pressing[] está apertada e atribui valor True a ela /
checks if any key from self.pressing[] is pressed, and sets True to them
"""
self.pressing[event.keysym] = True
def released(self, event):
"""
checa se uma tecla foi solta e atribui o valor False a ela /
checks if any key from self.pressing[] is released, and sets False to them
"""
self.pressing[event.keysym] = False
self.p_ship = 'center'
if __name__ == "__main__":
print('Não é possível executar esse arquivo. Execute "main.py"') |
"""
76. Minimum Window Substring
Given two strings s and t of lengths m and n respectively, return the minimum window substring of s such that every character in t (including duplicates) is included in the window. If there is no such substring, return the empty string "".
The testcases will be generated such that the answer is unique.
A substring is a contiguous sequence of characters within the string.
Example 1:
Input: s = "ADOBECODEBANC", t = "ABC"
Output: "BANC"
Explanation: The minimum window substring "BANC" includes 'A', 'B', and 'C' from string t.
"""
from collections import Counter
class Solution:
def minWindow1(self, s: str, t: str) -> str:
if len(t) > len(s):
return ""
minLength = len(s)
inedx = (0, 0)
c = Counter(t)
for i in range(len(s)):
for j in range(i, len(s)):
c1 = Counter(s[i: j + 1])
count = 0
for char in c:
if c[char] <= c1[char]:
count += 1
else:
break
if count == len(c) and len(t) <= (j + 1 - i) <= minLength:
minLength = j + 1 - i
inedx = (i, j + 1)
print("===")
print(inedx)
# print(inedx)
return s[inedx[0]:inedx[1]]
def minWindow2(self, s: str, t: str) -> str:
left = 0
right = 0
c = Counter(t)
inedx = (left, right)
minLength = len(s)
while right < len(s):
c1 = Counter(s[left: right + 1])
count = 0
for char in c:
if c[char] <= c1[char]:
count += 1
else:
break
if count == len(c):
if len(t) <= (right + 1 - left) <= minLength:
minLength = right + 1 - left
inedx = (left, right + 1)
left += 1
else:
right += 1
return s[inedx[0]:inedx[1]]
def minWindow3(self, s: str, t: str) -> str:
def helper1(c1, c2, t):
for char in t:
if c1[char] > c2[char]:
return False
return True
def helper2(c1, c2, t):
for char in t:
if c1[char] != c2[char]:
return False
return True
left = 0
right = 0
c = Counter(t)
inedx = (left, right)
minLength = len(s)
while right < len(s):
c1 = Counter(s[left: right + 1])
if helper1(c, c1, t):
while True:
temp = Counter(s[left: right + 1])
if helper1(c, temp, t):
left += 1
else:
if len(t) <= (right + 1 - left - 1) <= minLength:
minLength = min(minLength, right + 1 - left - 1)
inedx = (left - 1, right + 1)
break
else:
right += 1
return s[inedx[0]:inedx[1]]
if __name__ == '__main__':
s = Solution()
print(s.minWindow2("acbbaca", "aba"))
|
# -*- coding: utf-8 -*-
"""
9-6 文件比较
未完成
"""
import sys,os.path
def compareTxt():
txt1=raw_input("Please enter the first file: ")
txt2=raw_input("Please enter the second file: ")
try:
file1=open(os.path.join(sys.path[0],txt1),'r')
file2=open(os.path.join(sys.path[0],txt2),'r')
lines1=file1.readlines()
lines2=file2.readlines()
for i in range(min(len(lines1),len(lines2))):
if lines1[i]!=lines2[i]:
print i
break
if len(lines1)!=len(lines2):
print i+1
except IOError,e:
print e
compareTxt() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.