index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
29,899,400
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/utils/db.py
|
class Database:
def __init__(self, main_loop, bot):
self.bot = bot
self._main_loop = main_loop
self.task_manager()
def task_manager(self):
self._main_loop.create_task(self.ensure_todo_table())
self._main_loop.create_task(self.ensure_guild_properties())
self._main_loop.create_task(self.ensure_user_properties())
self._main_loop.create_task(self.ensure_nword_table())
async def ensure_todo_table(self):
command = ('CREATE TABLE IF NOT EXISTS todotable('
'user_id BIGINT,'
'todo TEXT,'
'completed BOOLEAN DEFAULT False,'
'time_added TIMESTAMP'
');'
)
async with self.bot.dbpool.acquire() as conn:
await conn.execute(command)
async def ensure_guild_properties(self):
command = ('CREATE TABLE IF NOT EXISTS guildprop('
'guild_id BIGINT,'
'prefix VARCHAR(4),'
'PRIMARY KEY (guild_id)'
');'
)
async with self.bot.dbpool.acquire() as conn:
await conn.execute(command)
async def ensure_user_properties(self):
command = ('CREATE TABLE IF NOT EXISTS userprop('
'user_id BIGINT,'
'lyrics_source TEXT DEFAULT \'genius\','
'PRIMARY KEY (user_id)'
');'
)
async with self.bot.dbpool.acquire() as conn:
await conn.execute(command)
async def ensure_nword_table(self):
command = ('CREATE TABLE IF NOT EXISTS nwordtable('
'user_id BIGINT,'
'nword1 SMALLINT DEFAULT 0,'
'nword2 SMALLINT DEFAULT 0,'
'PRIMARY KEY (user_id)'
');'
)
async with self.bot.dbpool.acquire() as conn:
await conn.execute(command)
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,401
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/utils/prettydiscordprinter/concrete_printers.py
|
import asyncio
from copy import deepcopy
from datetime import datetime
from discord import Embed
from discord.ext.commands import Paginator
from utils.text_formatter import chunks
from utils.prettydiscordprinter.abstract_classes import PrettyAbstractPrinter
from utils.prettydiscordprinter.concrete_formatters import *
class PrettyTextPrinter(PrettyAbstractPrinter):
""" Normal text printing to discord """
def __init__(self):
super().__init__()
self._chr_limit = 2000
def _configure_formatter(self, formatter):
return formatter
async def pretty_print(self, ctx, text):
prettified_text = self._use_formatters(text)
for chunk in chunks(prettified_text, self._chr_limit):
async with ctx.typing():
await ctx.send(chunk)
class PrettyCodeBlockPrinter(PrettyAbstractPrinter):
""" Code block printing to discord """
def __init__(self):
super().__init__()
self._chr_limit = 2000 - 6
def _configure_formatter(self, formatter):
return formatter
async def pretty_print(self, ctx, text):
prettified_text = self._use_formatters(text)
for chunk in chunks(prettified_text, self._chr_limit):
async with ctx.typing():
await ctx.send(f"```{chunk}```")
class PrettyEmbedPrinter(PrettyAbstractPrinter):
""" Embed printing to discord. Will be printed in description, not field. """
def __init__(self, embed):
super().__init__()
self._chr_limit = 2048
self._embed = embed
def _configure_formatter(self, formatter):
return formatter
async def pretty_print(self, ctx, text):
prettified_text = self._use_formatters(text)
for chunk in chunks(prettified_text, self._chr_limit):
embed_clone = deepcopy(self._embed)
embed_clone.description = chunk
async with ctx.typing():
await ctx.send(embed=embed_clone)
class PrettyPaginator(PrettyAbstractPrinter):
"""
Shows a discord message with pages and reactions acting as the button. Users can move left, right, go to the first
page, go to the last page, and delete the message by interacting with the buttons.
Usage: PrettyPaginator().pretty_print(embeds) where embeds will act as the pages.
"""
def __init__(self):
super().__init__()
self._chr_limit = 2048
self._entries = None
self._reaction_emojis = [
('\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}', self._first_page),
('\N{BLACK LEFT-POINTING TRIANGLE}', self._previous_page),
('\N{BLACK RIGHT-POINTING TRIANGLE}', self._next_page),
('\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}', self._last_page),
('\N{BLACK SQUARE FOR STOP}', self._stop_pages),
]
self._ctx = None
self._message = None
self._current_index = -1
self._paginating = False
self._match = None
async def _first_page(self):
await self._show_page(0)
async def _previous_page(self):
await self._show_page(self._current_index - 1)
async def _next_page(self):
await self._show_page(self._current_index + 1)
async def _last_page(self):
await self._show_page(len(self._entries) - 1)
async def _show_page(self, index):
# Do nothing if the index is out of range or index is the same as the current index
if index not in range(0, len(self._entries)) or index == self._current_index:
return
self._current_index = index
# Create new message if it is not created and then add reactions
if self._message is None:
self._message = await self._ctx.send(embed=self._entries[self._current_index])
for (reaction, _) in self._reaction_emojis:
await self._message.add_reaction(reaction)
# If a message with reactions already exist, edit it
else:
await self._message.edit(embed=self._entries[self._current_index])
async def _stop_pages(self):
self._paginating = False
def _react_check(self, reaction, user):
if user is None or user.id != self._ctx.author.id:
return False
if reaction.message.id != self._message.id:
return False
for (emoji, func) in self._reaction_emojis:
if reaction.emoji == emoji:
self._match = func
return True
return False
def _configure_formatter(self, formatter):
return formatter
async def pretty_print(self, ctx, entries):
if self._entries is not None:
raise Exception("Printer is being used for an exisiting scrollable embed. Create another printer or wait for"
"the exisiting embed to expire after the given time")
# Adds page numbers to the title
for i, e in enumerate(entries, start=1):
e.title += f"| Page {i} out of {len(entries)}"
self._entries = entries
self._ctx = ctx
await self._show_page(0)
self._paginating = True
while self._paginating:
try:
reaction, user = await self._ctx.bot.wait_for('reaction_add', check=self._react_check, timeout=120.0)
except asyncio.TimeoutError:
self._paginating = False
try:
await self._message.remove_reaction(reaction, user)
except:
pass # can't remove it so don't bother doing so
await self._match()
try:
await self._message.delete()
except:
pass
finally:
self._ctx = None
self._message = None
self._current_index = -1
self._match = None
class DelayedPrinterWrapper(PrettyTextPrinter):
""" Wraps the printer to wait a specified amount of time before printing. If a new message is requested during this
waiting time, it will be appended to the current message and the timer will reset. When the timer is up, begin
printing. """
SLEEP_DURATION = 2
def __init__(self, printer, delay=5.0):
self._printer = printer
self._delay = delay
self._queue = None
self._text = None
def add_formatters(self, *formatters):
self._printer.add_formatters(*formatters)
def add_formatter(self, formatter):
self._printer.add_formatter(formatter)
async def pretty_print(self, ctx, text):
if self._queue is None:
formatted_now = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
self._queue = []
self._text = [f"[ {formatted_now} ]"]
asyncio.get_event_loop().create_task(self._check_message_updates(ctx))
self._queue.append(text)
async def _check_message_updates(self, ctx):
i = 0
max_i = int(self._delay / DelayedPrinterWrapper.SLEEP_DURATION) + 1
while i < max_i:
i += 1
await asyncio.sleep(DelayedPrinterWrapper.SLEEP_DURATION)
if len(self._queue) > 0:
self._text.extend(self._queue)
self._queue.clear()
i = 0
await self._printer.pretty_print(ctx, "\n".join(self._text))
self._queue = None
self._text = None
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,402
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/cogs/personal_todo.py
|
import discord
from discord.ext import commands
from utils.text_formatter import strike
class personal_todo(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def get_user_todos(self, user_id: int):
async with self.bot.dbpool.acquire() as conn:
details = await conn.fetch(f"SELECT todo, completed FROM todotable WHERE user_id = $1;",
user_id)
if details is None:
return None
return details
@staticmethod
def convert_boolean_to_emoji(dict_to_convert):
for key, item in dict_to_convert.items():
if item is False:
dict_to_convert[key] = '❌'
if item is True:
dict_to_convert[key] = '✅'
@commands.group()
async def todo(self, ctx):
"""Show your available todos"""
if ctx.invoked_subcommand is None:
if ctx.subcommand_passed:
em = discord.Embed(title='Oof! That was not a valid command 🤨 ',
description='Type ;help [command] for more info on a command.',
colour=0x3c1835)
await ctx.send(embed=em, delete_after=60)
else:
todo_record = await self.get_user_todos(ctx.author.id)
if not todo_record:
await ctx.send(f"{ctx.author.mention} has no todos yet.")
else:
todo_dict = dict(todo_record)
em = discord.Embed(title='To-dos', colour=0xff3056)
em.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
self.convert_boolean_to_emoji(todo_dict)
for key, value in todo_dict.items():
em.add_field(name=key, value=value, inline=False)
await ctx.send(embed=em)
@todo.command()
async def add(self, ctx, *, new_todo: str):
"""Add a todo"""
user_id = ctx.author.id
async with self.bot.dbpool.acquire() as conn:
await conn.execute(
f"INSERT INTO todotable VALUES ($1, $2)"
,user_id, new_todo)
await ctx.message.add_reaction('👍')
def setup(bot):
bot.add_cog(personal_todo(bot))
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,403
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/cogs/helper.py
|
import discord
from discord.ext import commands
from utils.prettydiscordprinter.concrete_printers import PrettyPaginator
class Helper(commands.Cog):
""" Helper cog for printing out a paginated help message to the users."""
def __init__(self, bot):
self._bot = bot
self._bot.remove_command("help") # Remove the default help command if this cog is loaded.
@commands.command()
async def help(self, ctx, *, entry: str = None):
""" This help function overrides the default help function. """
paginator = PrettyPaginator()
if entry is None:
await paginator.pretty_print(ctx, [self._create_main_page(ctx)])
else:
embed = discord.Embed(title = "Test")
await ctx.send(self._create_lone_command_help_text(ctx.bot.get_command(entry))[0])
def _create_main_page(self, ctx):
""" Create and returns the main page which will show all the command groups and their description. """
embed = discord.Embed(title="Help: Main Page ")
embed.description = "Main command groups. Scroll to the next page for more info."
for name, cog in ctx.bot.cogs.items():
cog_descript = "No Description"
if cog.description is not None and cog.description != "":
cog_descript = cog.description
embed.add_field(name=name, value=cog_descript, inline=False)
embed.set_footer(text=f"Created and only usable by {ctx.author.name}")
return embed
def _create_command_aliases(self, command):
" Returns [alias1 or alias2 or alias3] of command. "
return "[{}]".format(" or ".join([command.name, *command.aliases]))
def _create_lone_command_help_text(self, command):
""" Will not attempt to get subcommands of this command but rather return the formatted command usage and
description.
Example of command usage: [parent1 or parent 2 or parent 3] [child1 or child2 or child3] <parameter>"""
self_alias = self._create_command_aliases(command)
parent_alias = " ".join(reversed([self._create_command_aliases(p) for p in command.parents]))
if command.usage is None:
parameter = ""
else:
parameter = "<{}>".format(command.usage)
return "{} {} {}".format(parent_alias, self_alias, parameter), command.help
def setup(bot):
bot.add_cog(Helper(bot))
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,404
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/cogs/fun.py
|
import asyncio
import config
import discord
import owotrans
from discord.ext import commands
from utils.checks import is_approved_talker
class Fun(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def owo(self, ctx, *, arg):
"""owoifies your message"""
msg = owotrans.owo(arg)
await ctx.send(msg)
@commands.command(aliases=["say"])
@is_approved_talker()
@commands.bot_has_permissions(manage_messages=True)
async def talk(self, ctx, *, arg):
"""deletes your message and talks through the bot"""
await ctx.message.delete()
await ctx.send(arg)
@commands.command()
async def dice(self, ctx):
""" Roll the dice until you die (roll 4 to 6). """
from random import randint
# Delete message which invoked this command
await ctx.message.delete()
game_in_progress = True
round_number = 1
accumulated_score = 0
# Initialise the reactions, message
roll_emoji = "\U0001F3B2"
stop_emoji = "\U0000274E"
embed = discord.Embed(title="Dice Game")
embed.description = f"| Player : {ctx.author.name} | Round : {round_number} | Game In Progress |"
embed.add_field(name="Score", value=str(accumulated_score), inline=False)
embed.set_footer(text=f"{roll_emoji} to keep rolling, {stop_emoji} to end game and keep your score")
# Show the message
message = await ctx.send(embed=embed)
await message.add_reaction(roll_emoji)
await message.add_reaction(stop_emoji)
# This function will handle user input
def progress(reaction, user):
nonlocal user_choice
# If the user is not the player or the reaction is not on the correct message, ignore
if (user is None) or (user.id != ctx.author.id) or (reaction.message.id != message.id):
user_choice = None
return False
if reaction.emoji == roll_emoji:
user_choice = roll_emoji
elif reaction.emoji == stop_emoji:
user_choice = stop_emoji
return True
# The game starts here
user_choice = None # Stores the user choice made in each iteration of the while loop
while game_in_progress:
try:
curr_reaction, curr_user = await ctx.bot.wait_for('reaction_add', check=progress, timeout=60.0)
await message.remove_reaction(curr_reaction, curr_user)
except asyncio.TimeoutError:
user_choice = None
game_in_progress = False
await message.delete()
await ctx.send(f"`Dice Game: {ctx.author.name} timed out with a score of {accumulated_score} on Round {round_number}.`")
except Exception as e:
pass # Choose to ignore this error
# The player chooses to roll again
if user_choice is roll_emoji:
round_number += 1
roll = randint(1, 6)
if roll <= 3: # The player rolls successfully, the score will be added
accumulated_score += roll
embed.description = f"| Player : {ctx.author.name} | Round : {round_number} | Game In Progress |"
embed.set_field_at(0, name="Score", value=f"You rolled a {roll}. Your new score is {accumulated_score}.")
await message.edit(embed=embed)
else: # The player rolls more than 3 and die
game_in_progress = False
await message.delete()
await ctx.send(f"`Dice Game: {ctx.author.name} rolled a {roll} and lost with a score of {accumulated_score} on Round {round_number}`")
# The player chooses to stop
elif user_choice is stop_emoji:
game_in_progress = False
await message.delete()
await ctx.send(f"`Dice Game: {ctx.author.name} kept a score of {accumulated_score} on Round {round_number}`")
@commands.command()
async def nword(self, ctx, member: discord.Member):
nword1_counter = self.bot.nword1_counter.get(member.id, 0)
nword2_counter = self.bot.nword2_counter.get(member.id, 0)
nwords = nword2_counter + nword1_counter
await ctx.send(f"Thank you for the request, comrade. I have looked through {member.mention}'s posting history and found {nwords} N-words, of which {nword1_counter} were hard-Rs since 27/10/19.")
def setup(bot):
bot.add_cog(Fun(bot))
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,405
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/utils/discord_handler.py
|
import asyncio
import discord
import logging
import traceback
from utils.prettydiscordprinter import *
""" Handler which is used to make loggers print to discord. """
class DiscordHandler(logging.Handler):
def __init__(self, channel, main_loop, level=logging.NOTSET):
super().__init__(level)
self._channel = channel
self._main_loop = main_loop
self._set_formatter()
def _set_formatter(self):
str_format = ("Logging Level: %(levelname)s\n"
"Logger: %(name)s\n"
"Time created: %(asctime)s\n"
"Source Path: %(pathname)s\n"
"Function Name: %(funcName)s\n"
"Message: %(message)s\n")
self.setFormatter(logging.Formatter(str_format))
def emit(self, record):
self._main_loop.create_task(self.send_to_channel(self.format(record)))
async def send_to_channel(self, msg):
async with self._channel.typing():
em = discord.Embed(title='logger', description=msg, colour=0x19a934)
await self._channel.send(embed=em)
class DiscordWriter:
""" Writers which will be used mainly for redirecting stdout and stderr to discord."""
def __init__(self, original_writer, channel):
self._original_writer = original_writer
self._channel = channel
self._printer = DelayedPrinterWrapper(PrettyCodeBlockPrinter(), delay=2.0)
def write(self, text):
try:
if len(text) != 0:
self._original_writer.write(text)
asyncio.get_event_loop().create_task(self._printer.pretty_print(self._channel, text))
except Exception as e:
self._original_writer.write(str(e))
self._original_writer.write(traceback.format_exc())
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,406
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/utils/prettydiscordprinter/concrete_formatters.py
|
""" To be implemented later on. """
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,407
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/cogs/events.py
|
import config
import discord
from discord.ext import commands
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
# If command has its own error handler, don't do anything
if hasattr(ctx.command, 'on_error'):
return
# Get error.original if it is not the original error
error = getattr(error, "original", error)
ignored_errors = (commands.errors.CommandNotFound,
commands.errors.TooManyArguments,
discord.errors.NotFound,
discord.errors.Forbidden)
if isinstance(error, ignored_errors):
return
elif isinstance(error, commands.CheckFailure):
await ctx.send(str(error))
elif isinstance(error, commands.MissingRequiredArgument):
return await ctx.send(f"Hol\' up you forgot an argument: {error.param.name}")
elif isinstance(error, commands.BadArgument):
return await ctx.send(f'Uh oh there was an error: {error}')
else:
# Decorate the error message with the source guild and source user.
invoker = f" | Source Channel: {ctx.guild.name} | Source User: {ctx.author.name} | Invoked command: {ctx.command}"
error.args = (error.args[0] + invoker,)
raise error
@commands.Cog.listener()
async def on_message(self, message):
# we do not want the bot to reply to itself
if message.author == self.bot.user:
return
# sends me a message if I am mentioned
if config.creator in message.content.lower():
msg = message.content.lower().format(message)
author = message.author
guild = message.guild.name
em = discord.Embed(title='@' + guild, description=msg, colour=0xFF00FF)
em.set_author(name=author, icon_url=author.avatar_url)
channel = self.bot.get_user(config.creatorID)
await channel.send(embed=em)
return
if config.nword1 or config.nword2 in message.content.lower():
if config.nword1 in message.content.lower() and 'snigger' not in message.content.lower():
user_id = message.author.id
if user_id in self.bot.nword1_counter or self.bot.nword2_counter:
async with self.bot.dbpool.acquire() as conn:
nword1 = self.bot.nword1_counter.get(user_id)
try:
nword1 += 1
except TypeError:
nword1 = 1
await conn.execute(
'UPDATE nwordtable SET "nword1"=$1 WHERE "user_id"=$2;',
nword1, user_id)
self.bot.nword1_counter[user_id] = nword1
else:
async with self.bot.dbpool.acquire() as conn:
await conn.execute('INSERT INTO nwordtable ("user_id", "nword1") VALUES ($1, $2);',
user_id, 1)
self.bot.nword1_counter[user_id] = 1
if config.nword2 in message.content.lower():
user_id = message.author.id
if user_id in self.bot.nword1_counter or self.bot.nword2_counter:
async with self.bot.dbpool.acquire() as conn:
nword2 = self.bot.nword2_counter.get(user_id)
try:
nword2 += 1
except TypeError:
nword2 = 1
nword2 += 1
await conn.execute(
'UPDATE nwordtable SET "nword2"=$1 WHERE "user_id"=$2;',
nword2, user_id)
self.bot.nword2_counter[user_id] = nword2
else:
async with self.bot.dbpool.acquire() as conn:
await conn.execute('INSERT INTO nwordtable ("user_id", "nword2") VALUES ($1, $2);',
user_id, 1)
self.bot.nword2_counter[user_id] = 1
@commands.Cog.listener()
async def on_guild_join(self, guild):
default_prefix = config.default_prefix
self.bot.all_prefixes[guild.id] = default_prefix
async with self.bot.dbpool.acquire() as db:
await db.execute('INSERT INTO guildprop ("guild_id", "prefix") VALUES ($1, $2);',
guild.id, default_prefix)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
del self.bot.all_prefixes[guild.id]
async with self.bot.dbpool.acquire() as db:
await db.execute("DELETE FROM guildprop WHERE guild_id=$1", guild.id)
def setup(bot):
bot.add_cog(Events(bot))
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,408
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/cogs/to-do.py
|
import discord
from discord.ext import commands
from utils.text_formatter import strike
class Todo(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.todo_channel_id = 572561960982413334
@commands.is_owner()
@commands.command()
async def task(self, ctx, *, arg: commands.clean_content(fix_channel_mentions=True)):
"""Add task to todo channel"""
channel = self.bot.get_channel(self.todo_channel_id)
em = discord.Embed(description=arg)
em.set_author(name=ctx.author, icon_url=ctx.author.avatar_url)
em.timestamp = ctx.message.created_at
task = await channel.send(embed=em)
await task.add_reaction('✅')
await task.add_reaction('❌')
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
try:
channel = self.bot.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
except:
return
else:
emoji = payload.emoji
user = message.guild.get_member(payload.user_id)
if user == self.bot.user or payload.channel_id != self.todo_channel_id:
return
try:
embed = message.embeds[0]
except:
if str(emoji) == '✅':
await message.delete()
await channel.send(strike(message.content))
if str(emoji) == '❌':
await message.delete()
else:
if str(emoji) == '✅':
striked_message = strike(embed.description)
author_image = embed.author.icon_url
author = embed.author.name
await message.delete()
em = discord.Embed(title=striked_message)
em.set_author(name=author, icon_url=author_image)
em.timestamp = message.created_at
await channel.send(embed=em)
if str(emoji) == '❌':
await message.delete()
def setup(bot):
bot.add_cog(Todo(bot))
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,409
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/utils/prettydiscordprinter/__init__.py
|
from utils.prettydiscordprinter.concrete_formatters import *
from utils.prettydiscordprinter.concrete_printers import *
__all__ = ["PrettyTextPrinter",
"PrettyCodeBlockPrinter",
"PrettyEmbedPrinter",
"PrettyPaginator",
"DelayedPrinterWrapper"]
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,410
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/config/__init__.py
|
import os
from discord import ActivityType
from xml.etree import ElementTree
# Set at default values for when config.xml or parsing xml fails
# There are also other variables which are not defined here, but will be defined dynamically in _load()
cogs = []
games, gamestimer = [], 200
def _load():
tree = ElementTree.parse(r"config/config.xml")
root = tree.getroot()
_load_environment(root.find("environment"))
_load_settings(root.find("settings"))
_load_cogs(root.find("cogs"))
_load_games(root.find("games"))
def _load_environment(env_element):
for child in env_element:
value_type = child.get("type")
value = os.environ.get(child.text)
if value_type == "str":
value = str(value)
elif value_type == "int":
value = int(value)
globals()[child.text] = value
def _load_settings(settings_element):
# More flexibility can be gained from removing for loop and adding one-by-one
for child in settings_element:
globals()[child.tag] = child.text
def _load_cogs(cogs_element):
for child in cogs_element:
if child.get("activated") == "True":
cogs.append(child.text)
def _load_games(games_element):
for child in games_element:
activity_type = child.get("activity_type")
if activity_type == "playing":
activity_type = ActivityType.playing
elif activity_type == "watching":
activity_type = ActivityType.watching
elif activity_type == "listening":
activity_type = ActivityType.listening
games.append((activity_type, child.text))
# Load and then cleanup unnecessary modules
_load()
del os, ActivityType, ElementTree
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,411
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/utils/lyricsretriever.py
|
import lyricsgenius
import os
import pylyrics3
class LyricsRetriever:
class LyricsNotFoundException(Exception):
pass
class SourceChangeNotSuccess(Exception):
pass
GENIUS_TOKEN = "genius_token"
GENIUS_SOURCE_NAME = "genius"
WIKI_SOURCE_NAME = "lyrics-wiki"
AVERAGE_SONG_WORD_SIZE = 1000 # Estimated and added 200 words more, for benefit of doubt
@staticmethod
def genius_get_lyrics(title, artist):
genius_api = lyricsgenius.Genius(os.environ.get(LyricsRetriever.GENIUS_TOKEN))
song = genius_api.search_song(title, artist)
if song is not None:
return song.lyrics
else:
return None
@staticmethod
def lyrics_wiki_get_lyrics(title, artist):
return pylyrics3.get_song_lyrics(artist, title)
@staticmethod
def _create_sources():
""" Create sources and return as a Source Name: Source Object pair. """
return_dict = dict()
return_dict[LyricsRetriever.GENIUS_SOURCE_NAME] = LyricsRetriever.genius_get_lyrics
return_dict[LyricsRetriever.WIKI_SOURCE_NAME] = LyricsRetriever.lyrics_wiki_get_lyrics
return return_dict
@staticmethod
def estimate_song_words(lyrics):
""" This is an attempt to weed out false positives in song search. """
return len(lyrics.split())
def __init__(self, bot):
self.bot = bot
self._sources = self._create_sources()
def get_main_source(self, user_id):
return self.bot.lyrics_source.get(user_id, 'genius')
async def change_main_source(self, user_id, new_source):
if user_id in self.bot.lyrics_source:
async with self.bot.dbpool.acquire() as conn:
await conn.execute('UPDATE userprop SET "lyrics_source"=$1 WHERE "user_id"=$2;',
new_source, user_id)
else:
async with self.bot.dbpool.acquire() as conn:
await conn.execute('INSERT INTO userprop ("user_id", "lyrics_source") VALUES ($1, $2);',
user_id, new_source)
self.bot.lyrics_source[user_id] = new_source
def get_lyrics(self, user_id, title, artist):
# Retrieve source object from the _sources dictionary and attempt to get the lyrics.
lyrics = self._sources[self.get_main_source(user_id)](title, artist)
if lyrics is not None and self.estimate_song_words(lyrics) <= self.AVERAGE_SONG_WORD_SIZE:
return lyrics
else:
raise self.LyricsNotFoundException
if __name__ == "__main__":
pass
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,412
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/cogs/moderation.py
|
import asyncio
from discord.ext import commands
class Moderation(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
@commands.command(aliases=["purge"])
async def clean(self, ctx, *, limit: int):
""" Deletes the previous n messages in the channel by the specified amount """
await ctx.message.channel.purge(limit=limit + 1) # also deletes your own message
deletion_message = await ctx.send(f"`{limit}` messages were deleted")
await asyncio.sleep(10)
await deletion_message.delete()
def setup(bot):
bot.add_cog(Moderation(bot))
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,413
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/utils/checks.py
|
import config
from discord.ext import commands
def is_approved_talker():
def predicate(ctx):
author_id = ctx.message.author.id
return author_id == config.creatorID or author_id == config.CillyID or author_id == config.WYID or author_id == config.MinID
return commands.check(predicate)
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,414
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/cogs/info.py
|
import aiohttp
import discord
import os
import platform
import psutil
import time
from bs4 import BeautifulSoup
from discord.ext import commands
class Info(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.feedback_channel = bot.get_channel(584041570739945502)
@commands.command(aliases=["code"])
async def github(self, ctx):
"""link to github"""
em = discord.Embed(title='read my code!', url='https://github.com/usvimal/Pi-thon', colour=0xb949b5)
em = em.set_author(name='Minininja', url='https://github.com/usvimal')
await ctx.send(embed=em)
return
@commands.command(aliases=["latency"])
async def ping(self, ctx):
"""check ping"""
pingtime = time.time()
async with ctx.typing():
ping = float(format(time.time() - pingtime, '.03f'))
await ctx.send(f" time is `{ping} seconds` :ping_pong:")
return
@commands.command()
async def link(self, ctx):
"""link to add bot to other servers"""
await ctx.send(
'https://discordapp.com/api/oauth2/authorize?client_id=517153107604668438&permissions=0&scope=bot')
@commands.command()
async def feedback(self, ctx, *, content):
"""Send feedback to bot developer"""
em = discord.Embed(title='Feedback', colour=0x37d9b9)
em.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
em.description = content
em.timestamp = ctx.message.created_at
if ctx.guild is not None:
em.add_field(name='Server', value=f'{ctx.guild.name} (ID: {ctx.guild.id})', inline=False)
em.add_field(name='Channel', value=f'{ctx.channel} (ID: {ctx.channel.id})', inline=False)
em.set_footer(text=f'Author ID: {ctx.author.id}')
await self.feedback_channel.send(embed=em)
await ctx.message.add_reaction('👍')
@commands.command()
@commands.is_owner()
async def dm(self, ctx, user_id: int, *, content: str):
"""Dm a user with their id"""
user = self.bot.get_user(user_id)
em = discord.Embed(title='Feedback reply', description=content)
em.set_author(name=ctx.author, icon_url=ctx.author.avatar_url)
em.set_footer(text='This is a DM sent because you had previously requested feedback or I found a bug '
'in a command you used, I do not monitor this DM.')
try:
await user.send(embed=em)
except:
await ctx.send(f'Could not PM user by ID {user_id}.')
else:
await ctx.message.add_reaction('👍')
@commands.command(aliases=["status"])
async def info(self, ctx):
"""Bot status"""
appinfo = await self.bot.application_info()
process = psutil.Process(os.getpid())
mem_usage = round(process.memory_info().rss / 1048576, 1)
url = 'https://github.com/usvimal/Pi-thon/commits/rewrite-v2.0'
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status == 200:
text = await response.read()
soup = BeautifulSoup(text, "html.parser")
last_commit_image = soup.find('a', class_="commit-author tooltipped tooltipped-s user-mention")
last_commit = last_commit_image.find_previous('a').find_previous('a')
changelog = last_commit.contents[0]
em = discord.Embed(title=f"Bot Info for {appinfo.name}",
description=f"[Bot Invite](https://discordapp.com/oauth2/authorize?&client_id={self.bot.user.id}&scope=bot&permissions=0) | [Source Code](https://github.com/usvimal/Pi-thon)")
em.add_field(name='Guilds', value=str(len(self.bot.guilds)))
em.add_field(name="Users", value=str(len(self.bot.users)))
em.add_field(name="Mem usage", value=f'{mem_usage} MiB')
em.add_field(name="CPU usage", value=f'{psutil.cpu_percent()}%')
em.add_field(name="Guild prefix", value=f"``{ctx.prefix}``")
em.add_field(name='Bot owners', value=f'{appinfo.owner} \nmk43v3r#1422')
em.add_field(name='Latest commit', value=f'[`{changelog}`](https://github.com/usvimal/Pi-thon/commits/rewrite-v2.0)')
em.set_footer(text=f'Python version: {platform.python_version()} , discord.py version: {discord.__version__}')
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Info(bot))
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,415
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/utils/text_formatter.py
|
"""
Used for chunking pieces of text to work around Discord's message size limit
Message limit: 2,000 characters. (note: user/channel/role mentions and emojis contain more characters than are shown)
"""
def chunks(s, n):
"""Produce `n`-character chunks from `s`."""
for start in range(0, len(s), n):
yield s[start:start + n]
def strike(text):
result = ''
for c in text:
result = result + c + '\u0336'
return result
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,416
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/utils/prettydiscordprinter/abstract_classes.py
|
from utils.text_formatter import chunks
class PrettyAbstractPrinter:
def __init__(self):
self._formatters = []
self._chr_limit = None # Implement later
def add_formatters(self, *formatters):
for formatter in formatters:
self.add_formatter(formatter)
def add_formatter(self, formatter):
self._formatters.append(self._configure_formatter(formatter))
def _configure_formatter(self, formatter):
""" Every formatter must be configured to fit the printer type. """
raise NotImplementedError("Implement this abstract function later.")
async def pretty_print(self, ctx, text):
raise NotImplementedError("Implement this abstract function later.")
def _use_formatters(self, text):
copied_text = text
for formatter in self._formatters:
copied_text = formatter.pretty_format(copied_text)
return copied_text
def get_chr_limit(self):
return self._chr_limit
class PrettyAbstractFormatter:
def __init__(self):
raise NotImplementedError("Implement this abstract function later.")
def configure(self, **kwargs):
raise NotImplementedError("Implement this abstract function later.")
def pretty_format(self, text):
raise NotImplementedError("Implement this abstract funtion later.")
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,417
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/cogs/vote.py
|
import asyncio
from discord.ext import commands
class Vote(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.guild_only()
@commands.command()
async def vote(self, ctx, *, question: str):
"""vote feature, will add reactions (thumbsup and thumbsdown) and output final result"""
"""enter your reason for the vote"""
await ctx.message.add_reaction('✅')
await ctx.message.add_reaction('❌')
await ctx.send("How long do you want the vote to run? (Send an integer in seconds)")
check = lambda m: m.author == ctx.author and m.channel == ctx.channel
try:
msg = await self.bot.wait_for("message", timeout=60.0, check=check)
except asyncio.TimeoutError:
await ctx.send('The vote has been cancelled due to a lack of response')
else:
if msg.content.isdigit():
await ctx.send('👍 vote is running')
await asyncio.sleep(int(msg.content))
else:
await ctx.send('Please restart the vote and send a positive integer only')
return
reactions = (await ctx.fetch_message(ctx.message.id)).reactions
counts = {}
for reaction in reactions:
counts[reaction.emoji] = reaction.count - 1
if counts['✅'] > counts['❌']:
await ctx.send('The answer to ' + question + ' is: ✅')
elif counts['✅'] < counts['❌']:
await ctx.send('The answer to ' + question + ' is: ❌')
else:
await ctx.send('Aww shucks, its a stalemate')
return
return
def setup(bot):
bot.add_cog(Vote(bot))
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,899,418
|
usvimal/Pi-thon
|
refs/heads/rewrite-v2.0
|
/cogs/lyrics.py
|
import discord
from discord.ext import commands
from utils.text_formatter import chunks
from utils.lyricsretriever import LyricsRetriever
class Lyrics(commands.Cog):
class SpotifyNotPlaying(commands.CommandError):
pass
SPOTIFY = "Spotify"
def __init__(self, bot):
""" Create a lyrics retriever and add a list of registered user and their context"""
self.bot = bot
self.lyrics_retriever = LyricsRetriever(bot)
self.user_context_dict = dict()
@commands.group()
async def lyrics(self, ctx):
""" Show the lyrics of the song currently playing in Spotify"""
if ctx.invoked_subcommand is None:
if ctx.subcommand_passed:
em = discord.Embed(title='Oof! That was not a valid command 🤨 ',
description='Type ;help [command] for more info on a command.',
colour=0x3c1835)
await ctx.send(embed=em, delete_after=60)
else:
song_title, song_artist = self.get_song_description(ctx.author)
await self.show_lyrics_from_description(ctx, song_title, song_artist)
@lyrics.command(aliases=["begin"])
async def start(self, ctx):
""" Show lyrics for all the songs the user plays until they stop this"""
if ctx.author not in self.user_context_dict:
song_title, song_artist = self.get_song_description(ctx.author)
await self.show_lyrics_from_description(ctx, song_title, song_artist)
self.user_context_dict[ctx.author] = ctx
else:
await ctx.send("\";lyrics start\" has already been activated.")
@lyrics.command(aliases=["end"])
async def stop(self, ctx):
""" Stop showing new lyrics """
if ctx.author in self.user_context_dict:
del self.user_context_dict[ctx.author]
await ctx.send("You will stop receiving lyrics now.")
else:
await ctx.send("You are not registered to receive lyrics. Use \";lyrics start\" to start receiving lyrics.")
@lyrics.command()
async def source(self, ctx):
""" Show the current source for lyrics"""
current_source = self.lyrics_retriever.get_main_source(ctx.author.id)
await ctx.send(f"Current lyric source is {current_source}.")
@lyrics.command()
async def change_source(self, ctx):
""" Change the lyrics source"""
em = discord.Embed(title='Choose your source:', description='\t1. genius \n\t2. lyrics-wiki', color=0xbd6c24)
em.set_footer(text="Send the number corresponding to the lyrics source")
await ctx.send(embed=em)
check = lambda m: m.author == ctx.author and m.channel == ctx.channel
msg = await self.bot.wait_for("message", timeout=10, check=check)
new_source = ''
if int(msg.content) == 1:
new_source = 'genius'
elif int(msg.content) == 2:
new_source = 'lyrics-wiki'
await self.lyrics_retriever.change_main_source(ctx.author.id, new_source)
await ctx.send(f"Changing of lyrics source to `{new_source}` is successful.")
@commands.Cog.listener()
async def on_member_update(self, before, after):
""" If the user is registered and the next activity is still Spotify, show new lyrics. """
if before in self.user_context_dict and str(after.activity) == Lyrics.SPOTIFY:
# Get the context of registered user and update the dictionary
ctx = self.user_context_dict[before]
del self.user_context_dict[before]
self.user_context_dict[after] = ctx
before_description = self.get_song_description(before)
after_description = self.get_song_description(after)
if before_description != after_description:
try:
await self.show_lyrics_from_description(ctx, *after_description)
except LyricsRetriever.LyricsNotFoundException:
await ctx.send(f"Current lyrics source {self.lyrics_retriever.get_main_source(after.id)} could not retrieve the lyrics.")
def get_song_description(self, user):
""" Get the description of a song from user if the user is playing a song on Spotify. """
if user.activities is not None:
for activity in user.activities:
if str(activity) == Lyrics.SPOTIFY:
return activity.title, activity.artist
raise self.SpotifyNotPlaying
async def show_lyrics_from_description(self, ctx, song_title, song_artist):
"""Discord bot will show lyrics of a song from its description."""
for chunk in chunks(self.lyrics_retriever.get_lyrics(ctx.author.id, song_title, song_artist), 2048):
em = discord.Embed(title=song_title, description=chunk)
em = em.set_author(name=song_artist)
await ctx.trigger_typing()
await ctx.send(embed=em, delete_after=5*60)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
""" Error thrown by commands will be of the type discord.ext.command.CommandError. For errors not inheriting
from CommandError, it will be difficult to error handle. """
if isinstance(error, self.SpotifyNotPlaying):
await ctx.send("Please play a song to get the lyrics 🙃")
elif hasattr(error, "original") and isinstance(error.original, LyricsRetriever.LyricsNotFoundException):
await ctx.send("Current lyrics source {} could not retrieve the lyrics.".format(
self.lyrics_retriever.get_main_source(ctx.author.id)))
def setup(bot):
bot.add_cog(Lyrics(bot))
if __name__ == "__main__":
pass
|
{"/cogs/settings.py": ["/config/__init__.py", "/utils/lyricsretriever.py"], "/main.py": ["/config/__init__.py", "/utils/db.py", "/utils/discord_handler.py"], "/utils/prettydiscordprinter/concrete_printers.py": ["/utils/text_formatter.py", "/utils/prettydiscordprinter/abstract_classes.py", "/utils/prettydiscordprinter/concrete_formatters.py"], "/cogs/personal_todo.py": ["/utils/text_formatter.py"], "/cogs/helper.py": ["/utils/prettydiscordprinter/concrete_printers.py"], "/cogs/fun.py": ["/config/__init__.py", "/utils/checks.py"], "/utils/discord_handler.py": ["/utils/prettydiscordprinter/__init__.py"], "/cogs/events.py": ["/config/__init__.py"], "/cogs/to-do.py": ["/utils/text_formatter.py"], "/utils/prettydiscordprinter/__init__.py": ["/utils/prettydiscordprinter/concrete_formatters.py", "/utils/prettydiscordprinter/concrete_printers.py"], "/utils/checks.py": ["/config/__init__.py"], "/utils/prettydiscordprinter/abstract_classes.py": ["/utils/text_formatter.py"], "/cogs/lyrics.py": ["/utils/text_formatter.py", "/utils/lyricsretriever.py"]}
|
29,959,579
|
dschick/pyEvalData
|
refs/heads/develop
|
/pyEvalData/io/spec.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015-2021 Daniel Schick
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import xrayutilities as xu
from .source import Source
from .scan import Scan
__all__ = ['Spec']
__docformat__ = 'restructuredtext'
class Spec(Source):
"""Spec
Source implementation for SPEC files.
Args:
file_name (str): file name including extension,
can include regex pattern.
file_path (str, optional): file path - defaults to ``./``.
Keyword Args:
start_scan_number (uint): start of scan numbers to parse.
stop_scan_number (uint): stop of scan numbers to parse.
This number is included.
nexus_file_name (str): name for generated nexus file.
nexus_file_name_postfix (str): postfix for nexus file name.
nexus_file_path (str): path for generated nexus file.
read_all_data (bool): read all data on parsing.
If false, data will be read only on demand.
read_and_forget (bool): clear data after read to save memory.
update_before_read (bool): always update from source
before reading scan data.
use_nexus (bool): use nexus file to join/compress raw data.
force_overwrite (bool): forced re-read of raw source and
re-generated of nexus file.
Attributes:
log (logging.logger): logger instance from logging.
name (str): name of the source
scan_dict (dict(scan)): dict of scan objects with
key being the scan number.
start_scan_number (uint): start of scan numbers to parse.
stop_scan_number (uint): stop of scan numbers to parse.
This number is included.
file_name (str): file name including extension,
can include regex pattern.
file_path (str, optional): file path - defaults to ``./``.
nexus_file_name (str): name for generated nexus file.
nexus_file_name_postfix (str): postfix for nexus file name.
nexus_file_path (str): path for generated nexus file.
nexus_file_exists(bool): if nexus file exists.
read_all_data (bool): read all data on parsing.
read_and_forget (bool): clear data after read to save memory.
update_before_read (bool): always update from source
before reading scan data.
use_nexus (bool): use nexus file to join/compress raw data.
force_overwrite (bool): forced re-read of raw source and
re-generated of nexus file.
"""
def __init__(self, file_name, file_path, **kwargs):
super().__init__(file_name, file_path, **kwargs)
def parse_raw(self):
"""parse_raw
Parse the raw source file/folder and populate the `scan_dict`.
"""
self.log.info('parse_raw')
if ('spec_file' not in dir(self)) or self.force_overwrite:
self.log.info('Create spec_file from xrayutilities')
self.spec_file = xu.io.SPECFile(self.file_name,
path=self.file_path)
# update the xu.spec_file
self.spec_file.Update()
# iterate through scan list in xu.spec_file
for spec_scan in self.spec_file.scan_list:
# check for scan number in given range
if (spec_scan.nr >= self.start_scan_number) and \
((spec_scan.nr <= self.stop_scan_number) or
(self.stop_scan_number == -1)):
last_scan_number = self.get_last_scan_number()
# check if Scan needs to be re-created
# if scan is not present, its the last one, or force overwrite
if (spec_scan.nr not in self.scan_dict.keys()) or \
(spec_scan.nr >= last_scan_number) or \
self.force_overwrite:
# rename init_motor_pos keys without prefix
init_motor_pos = {}
for key, value in spec_scan.init_motor_pos.items():
init_motor_pos[key.replace('INIT_MOPO_', '')] = value
# catching PR for itime in xu SpecScan missing
try:
int_time = float(spec_scan.itime)
except AttributeError:
int_time = 0.0
# create scan object
scan = Scan(int(spec_scan.nr),
cmd=spec_scan.command,
date=spec_scan.date,
time=spec_scan.time,
int_time=int_time,
header=spec_scan.header,
init_mopo=init_motor_pos)
self.scan_dict[spec_scan.nr] = scan
# check if the data needs to be read as well
if self.read_all_data:
self.read_scan_data(self.scan_dict[spec_scan.nr])
def read_raw_scan_data(self, scan):
"""read_raw_scan_data
Reads the data for a given scan object from raw source.
Args:
scan (Scan): scan object.
"""
self.log.info('read_raw_scan_data for scan #{:d}'.format(scan.number))
spec_scan = self.spec_file.__getattr__('scan{:d}'.format(scan.number))
spec_scan.ReadData()
scan.data = spec_scan.data
spec_scan.ClearData()
scan.meta['header'] = spec_scan.header
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,580
|
dschick/pyEvalData
|
refs/heads/develop
|
/test/conftest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import pyEvalData as ped
@pytest.fixture(scope='module')
def source_pal(tmp_path_factory):
temp_folder = tmp_path_factory.mktemp("pytest_data")
source_pal = ped.io.PalH5(name='pal_file',
file_name='{0:07d}',
file_path='example_data/example_files_pal',
use_nexus=True,
force_overwrite=False,
update_before_read=False,
read_and_forget=True,
nexus_file_path=temp_folder,
nexus_file_name='test_pal')
return source_pal
@pytest.fixture(scope='module')
def source_spec(tmp_path_factory):
temp_folder = tmp_path_factory.mktemp("pytest_data")
source_spec = ped.io.Spec(file_name='example_file_spec.spec',
file_path='example_data/',
use_nexus=True,
force_overwrite=False,
update_before_read=False,
read_and_forget=True,
nexus_file_path=temp_folder,
nexus_file_name='test_spec')
return source_spec
@pytest.fixture(scope='module')
def evaluation(source_spec):
data = ped.Evaluation(source_spec)
return data
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,581
|
dschick/pyEvalData
|
refs/heads/develop
|
/pyEvalData/evaluation.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015-2020 Daniel Schick
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from . import config
import logging
import numpy as np
import collections
import matplotlib.pyplot as plt
import matplotlib as mpl
import re
from uncertainties import unumpy
from .helpers import bin_data
__all__ = ['Evaluation']
__docformat__ = 'restructuredtext'
class Evaluation(object):
"""Evaluation
Main class for evaluating data.
The raw data is accessed via a ``Source`` object.
The evaluation allows to bin data, calculate errors and propagate them.
There is also an interface to ``lmfit`` for easy batch-fitting.
Args:
source (Source): raw data source.
Attributes:
log (logging.logger): logger instance from logging.
clist (list[str]): list of counter names to evaluate.
cdef (dict{str:str}): dict of predefined counter names and
definitions.
xcol (str): counter or motor for x-axis.
t0 (float): approx. time zero for delay scans to determine the
unpumped region of the data for normalization.
custom_counters (list[str]): list of custom counters - default is []
math_keys (list[str]): list of keywords which are evaluated as numpy functions
statistic_type (str): 'gauss' for normal averaging, 'poisson' for counting statistics
propagate_errors (bool): propagate errors for dpendent counters.
"""
def __init__(self, source):
self.log = logging.getLogger(__name__)
self.log.setLevel(config.LOG_LEVEL)
self.source = source
self.clist = []
self.cdef = {}
self.xcol = ''
self.t0 = 0
self.custom_counters = []
self.math_keys = ['mean', 'sum', 'diff', 'max', 'min', 'round', 'abs',
'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan',
'pi', 'exp', 'log', 'log10', 'sqrt', 'sign']
self.statistic_type = 'gauss'
self.propagate_errors = True
self.apply_data_filter = False
self.data_filters = ['evaluatable statement']
def get_clist(self):
"""get_clist
Returns a list of counters as defined by the user.
If the counters where defined in a ``dict`` it will be converted
to a ``list`` for backwards compatibility.
Returns:
clist (list[str]): list of counter names to evaluate.
"""
if isinstance(self.clist, dict):
# the clist property is a dict, so retrun its keys as list
clist = list(self.clist.keys())
else:
clist = list(self.clist)
return clist
def traverse_counters(self, clist, source_cols=''):
"""traverse_counters
Traverse all counters and replace all predefined counter definitions.
Returns also a list of the included source counters for error propagation.
Args:
clist (list[str]): Initial counter list.
source_cols (list[str], optional): counters in the raw source data.
Returns:
(tuple):
- *resolved_counters (list[str])* - resolved counters.
- *source_counters (list[str])* - all source counters in the resolved counters.
"""
resolved_counters = []
source_counters = []
for counter_name in clist:
# resolve each counter in the clist
counter_string, res_source_counters = \
self.resolve_counter_name(counter_name, source_cols)
resolved_counters.append(counter_string)
source_counters.extend(res_source_counters)
return resolved_counters, list(set(source_counters))
def resolve_counter_name(self, col_name, source_cols=''):
"""resolve_counter_name
Replace all predefined counter definitions in a given counter name.
The function works recursively.
Args:
col_name (str): initial counter string.
source_cols (list[str], optional): columns in the source data.
Returns:
(tuple):
- *col_string (str)* - resolved counter string.
- *source_counters (list[str])* - source counters in the col_string
"""
recall = False # boolean to stop recursive calls
source_counters = []
col_string = col_name
for find_cdef in self.cdef.keys():
# check for all predefined counters
search_pattern = r'\b' + find_cdef + r'\b'
if re.search(search_pattern, col_string) is not None:
if self.cdef[find_cdef] in source_cols:
# this counter definition is a base source counter
source_counters.append(self.cdef[find_cdef])
# found a predefined counter
# recursive call if predefined counter must be resolved again
recall = True
# replace the counter definition in the string
(col_string, _) = re.subn(search_pattern,
'(' + self.cdef[find_cdef] + ')', col_string)
if recall:
# do the recursive call
col_string, rec_source_counters = self.resolve_counter_name(col_string, source_cols)
source_counters.extend(rec_source_counters)
for find_cdef in source_cols:
# check for all base source counters
search_pattern = r'\b' + find_cdef + r'\b'
if re.search(search_pattern, col_string) is not None:
source_counters.append(find_cdef)
return col_string, source_counters
def col_string_to_eval_string(self, col_string, array_name='spec_data'):
"""Use regular expressions in order to generate an evaluateable string
from the counter string in order to append the new counter to the
spec data.
Args:
col_string (str) : Definition of the counter.
mode (int) : Flag for different modes
Returns:
eval_string (str): Evaluateable string to add the new counter
to the spec data.
"""
# search for alphanumeric counter names in col_string
iterator = re.finditer(
'([0-9]*[a-zA-Z\_]+[0-9]*[a-zA-Z]*)*', col_string)
# these are keys which should not be replaced but evaluated
math_keys = list(self.math_keys)
keys = math_keys.copy()
for key in iterator:
# traverse all found counter names
if len(key.group()) > 0:
# the match is > 0
if not key.group() in keys:
# the counter name is not in the keys list
# remember this counter name in the key list in order
# not to replace it again
keys.append(key.group())
# the actual replacement
(col_string, _) = re.subn(r'\b'+key.group()+r'\b',
array_name + '[\'' + key.group() + '\']', col_string)
# add 'np.' prefix to numpy functions/math keys
for mk in math_keys:
if mk != '0x0001FFFF':
(col_string, _) = re.subn(r'\b' + mk + r'\b', 'np.' + mk, col_string)
return col_string
def add_custom_counters(self, spec_data, scan_num, source_counters):
"""Add custom counters to the spec data array.
This is a stub for child classes.
Args:
spec_data (ndarray) : Data array from the spec scan.
scan_num (int) : Scan number of the spec scan.
source_counters list(str) : List of the source counters and custom counters
from the clist and xcol.
Returns:
spec_data (ndarray): Updated data array from the spec scan.
"""
return spec_data
def filter_data(self, data):
"""filter_data
Args:
data (TYPE): DESCRIPTION.
Returns:
TYPE: DESCRIPTION.
"""
res = []
for data_filter in self.data_filters:
name, _ = self.resolve_counter_name(data_filter)
idx = eval(self.col_string_to_eval_string(name, array_name='data'))
if len(res) == 0:
res = idx
else:
res = np.logical_and(res, idx)
data_list = []
dtype_list = []
for name in data.dtype.names:
data_list.append(data[name][res])
dtype_list.append((name,
data[name][res].dtype,
data[name][res].shape))
return np.core.records.fromarrays(data_list, dtype=dtype_list)
def get_scan_data(self, scan_num):
"""
Args:
scan_num (TYPE): DESCRIPTION.
Returns:
TYPE: DESCRIPTION.
"""
data, meta = self.source.get_scan_data(scan_num)
if self.apply_data_filter:
data = self.filter_data(data)
return data
def get_scan_list_data(self, scan_list):
"""
Args:
scan_num (TYPE): DESCRIPTION.
Returns:
TYPE: DESCRIPTION.
"""
data_list, meta_list = self.source.get_scan_list_data(scan_list)
if self.apply_data_filter:
for i, data in enumerate(data_list):
data_list[i] = self.filter_data(data)
return data_list
def avg_N_bin_scans(self, scan_list, xgrid=np.array([]), binning=True):
"""Averages data defined by the counter list, clist, onto an optional
xgrid. If no xgrid is given the x-axis data of the first scan in the
list is used instead.
Args:
scan_list (List[int]) : List of scan numbers.
xgrid (Optional[ndarray]) : Grid to bin the data to -
default in empty so use the
x-axis of the first scan.
Returns:
avg_data (ndarray) : Averaged data for the scan list.
std_data (ndarray) : Standart derivation of the data for the scan list.
err_data (ndarray) : Error of the data for the scan list.
name (str) : Name of the data set.
"""
# generate the name of the data set from the spec file name and scan_list
name = self.source.name + " #{0:04d}".format(scan_list[0])
# get the counters which should be evaluated
clist = self.get_clist()
if not clist:
raise Exception('No clist is defined. Do not know what to plot!')
return
# process also the xcol as counter in order to allow for newly defined xcols
if not self.xcol:
raise Exception('No xcol is defined. Do not know what to plot!')
return
if self.xcol not in clist:
clist.append(self.xcol)
source_cols = []
concat_data = np.array([])
data_list = self.get_scan_list_data(scan_list)
for i, (spec_data, scan_num) in enumerate(zip(data_list, scan_list)):
# traverse the scan list and read data
# try:
# # try to read the motors and data of this scan
# spec_data = self.get_scan_data(scan_num)
# except Exception:
# raise
# print('Scan #' + scan_num + ' not found, skipping')
if i == 0 or len(source_cols) == 0: # we need to evaluate this only once
# these are the base spec counters which are present in the data
# file plus custom counters
source_cols = list(
set(list(spec_data.dtype.names) + self.custom_counters))
# resolve the clist and retrieve the resolves counters and the
# necessary base spec counters for error propagation
resolved_counters, source_counters = self.traverse_counters(
clist, source_cols)
# counter names and resolved strings for further calculations
if self.statistic_type == 'poisson' or self.propagate_errors:
# for error propagation we just need the base spec counters
# and the xcol
col_names = source_counters[:]
col_strings = source_counters[:]
# add the xcol to both lists
col_names.append(self.xcol)
col_strings.append(resolved_counters[clist.index(self.xcol)])
else:
# we need to average the resolved counters
col_names = clist[:]
col_strings = resolved_counters[:]
# create the dtype of the return array
dtypes = []
for col_name in clist:
dtypes.append((col_name, '<f8'))
# add custom counters if defined
spec_data = self.add_custom_counters(spec_data, scan_num, source_counters)
data = np.array([])
# read data into data array
for col_string, col_name in zip(col_strings, col_names):
# traverse the counters in the clist and append to data if not
# already present
eval_string = self.col_string_to_eval_string(
col_string, array_name='spec_data')
if len(data) == 0:
data = np.array(eval(eval_string), dtype=[(col_name, float)])
elif col_name not in data.dtype.names:
data = eval('np.lib.recfunctions.append_fields(data,\''
+ col_name + '\',data=(' + eval_string
+ '), dtypes=float, asrecarray=True, usemask=True)')
if i > 0:
# this is not the first scan in the list so append the data to
# the concatenated data array
concat_data = np.concatenate((concat_data, data), axis=0)
else:
concat_data = data
if len(xgrid) == 0:
# if no xgrid is given we use the xData of the first scan instead
xgrid = concat_data[self.xcol]
# remove xcol from clist and resolved counters for further treatment
del resolved_counters[clist.index(self.xcol)]
clist.remove(self.xcol)
try:
# bin the concatenated data to the xgrid
# if a custom counter was calculated it might have a different length
# than the spec counters which will result in an error while binning data
# from a default spec counter and a custom counter.
if binning:
xgrid_reduced, _, _, _, _, _, _, _, _ = bin_data(
concat_data[self.xcol], concat_data[self.xcol], xgrid)
else:
xgrid_reduced = xgrid
# create empty arrays for averages, std and errors
avg_data = np.recarray(np.shape(xgrid_reduced)[0], dtype=dtypes)
std_data = np.recarray(np.shape(xgrid_reduced)[0], dtype=dtypes)
err_data = np.recarray(np.shape(xgrid_reduced)[0], dtype=dtypes)
if self.statistic_type == 'poisson':
bin_stat = 'sum'
else: # gauss
bin_stat = 'mean'
if binning:
if self.statistic_type == 'poisson' or self.propagate_errors:
# propagate errors using the uncertainties package
# create empty dict for uncertainties data arrays
unc_data_err = {}
unc_data_std = {}
for col in source_counters:
# for all cols in the clist bin the data to the xgrid an calculate
# the averages, stds and errors
y, avg_data[self.xcol], yerr, err_data[self.xcol], ystd, \
std_data[self.xcol], _, _, _ = bin_data(concat_data[col],
concat_data[self.xcol],
xgrid_reduced,
statistic=bin_stat)
# add spec base counters to uncData arrays
# the uncertainty package cannot handle masked arrays
# e.g. for divisions in the clist
# --> convert all base counter results to np.array()
unc_data_std[col] = unumpy.uarray(np.array(y),
np.array(ystd))
unc_data_err[col] = unumpy.uarray(np.array(y),
np.array(yerr))
for col_name, col_string in zip(clist, resolved_counters):
eval_string = self.col_string_to_eval_string(
col_string, array_name='unc_data_err')
temp = eval(eval_string)
avg_data[col_name] = unumpy.nominal_values(temp)
err_data[col_name] = unumpy.std_devs(temp)
eval_string = self.col_string_to_eval_string(
col_string, array_name='unc_data_std')
temp = eval(eval_string)
std_data[col_name] = unumpy.std_devs(temp)
else:
# no error propagation but averaging of individual scans
for col in clist:
# for all cols in the clist bin the data to the xgrid an calculate
# the averages, stds and errors
avg_data[col], avg_data[self.xcol], err_data[col], \
err_data[self.xcol], std_data[col], std_data[self.xcol], _, _, \
_ = bin_data(concat_data[col],
concat_data[self.xcol],
xgrid_reduced,
statistic=bin_stat)
else:
for col_name, col_string in zip(clist, resolved_counters):
eval_string = self.col_string_to_eval_string(
col_string, array_name='spec_data')
temp = eval(eval_string)
avg_data[col_name] = temp
avg_data[self.xcol] = concat_data[self.xcol]
err_data[col_name] = 0
err_data[self.xcol] = 0
std_data[col_name] = 0
std_data[self.xcol] = 0
except Exception:
raise
print('xcol and ycol must have the same length --> probably you try plotting a custom'
' counter together with a spec counter')
return avg_data, std_data, err_data, name
def plot_scans(self, scan_list, ylims=[], xlims=[], fig_size=[], xgrid=[],
yerr='std', xerr='std', norm2one=False, binning=True,
label_text='', title_text='', skip_plot=False, grid_on=True,
ytext='', xtext='', fmt='-o'):
"""Plot a list of scans from the spec file.
Various plot parameters are provided.
The plotted data are returned.
Args:
scan_list (List[int]) : List of scan numbers.
ylims (Optional[ndarray]) : ylim for the plot.
xlims (Optional[ndarray]) : xlim for the plot.
fig_size (Optional[ndarray]) : Figure size of the figure.
xgrid (Optional[ndarray]) : Grid to bin the data to -
default in empty so use the
x-axis of the first scan.
yerr (Optional[ndarray]) : Type of the errors in y: [err, std, none]
default is 'std'.
xerr (Optional[ndarray]) : Type of the errors in x: [err, std, none]
default is 'std'.
norm2one (Optional[bool]) : Norm transient data to 1 for t < t0
default is False.
label_text (Optional[str]) : Label of the plot - default is none.
title_text (Optional[str]) : Title of the figure - default is none.
skip_plot (Optional[bool]) : Skip plotting, just return data
default is False.
grid_on (Optional[bool]) : Add grid to plot - default is True.
ytext (Optional[str]) : y-Label of the plot - defaults is none.
xtext (Optional[str]) : x-Label of the plot - defaults is none.
fmt (Optional[str]) : format string of the plot - defaults is -o.
Returns:
y2plot (OrderedDict) : y-data which was plotted.
x2plot (ndarray) : x-data which was plotted.
yerr2plot (OrderedDict) : y-error which was plotted.
xerr2plot (ndarray) : x-error which was plotted.
name (str) : Name of the data set.
"""
# initialize the y-data as ordered dict in order to allow for multiple
# counters at the same time
y2plot = collections.OrderedDict()
yerr2plot = collections.OrderedDict()
# get the averaged data, stds and errors for the scan list and the xgrid
avg_data, std_data, err_data, name = self.avg_N_bin_scans(
scan_list, xgrid=xgrid, binning=binning)
# set the error data
if xerr == 'std':
xerr_data = std_data
elif xerr == 'err':
xerr_data = err_data
else:
xerr_data = np.zeros_like(std_data)
if yerr == 'std':
yerr_data = std_data
elif yerr == 'err':
yerr_data = err_data
else:
yerr_data = np.zeros_like(std_data)
# set x-data and errors
x2plot = avg_data[self.xcol]
xerr2plot = xerr_data[self.xcol]
# plot all keys in the clist
clist = self.get_clist()
for col in clist:
# traverse the counter list
# save the counter data and errors in the ordered dictionary
y2plot[col] = avg_data[col]
yerr2plot[col] = yerr_data[col]
if norm2one:
# normalize the y-data to 1 for t < t0
# just makes sense for delay scans
before_zero = y2plot[col][x2plot <= self.t0]
y2plot[col] = y2plot[col]/np.mean(before_zero)
yerr2plot[col] = yerr2plot[col]/np.mean(before_zero)
if len(label_text) == 0:
# if no label_text is given use the counter name
lt = col
else:
if len(clist) > 1:
# for multiple counters add the counter name to the label
lt = label_text + ' | ' + col
else:
# for a single counter just use the label_text
lt = label_text
if not skip_plot:
# plot the errorbar for each counter
if (xerr == 'none') & (yerr == 'none'):
plt.plot(x2plot, y2plot[col], fmt, label=lt)
else:
plt.errorbar(
x2plot, y2plot[col], fmt=fmt, label=lt,
xerr=xerr2plot, yerr=yerr2plot[col])
if not skip_plot:
# add a legend, labels, title and set the limits and grid
plt.legend(frameon=True, loc=0, numpoints=1)
plt.xlabel(self.xcol)
if xlims:
plt.xlim(xlims)
if ylims:
plt.ylim(ylims)
if len(title_text) > 0:
plt.title(title_text)
else:
plt.title(name)
if len(xtext) > 0:
plt.xlabel(xtext)
if len(ytext) > 0:
plt.ylabel(ytext)
if grid_on:
plt.grid(True)
return y2plot, x2plot, yerr2plot, xerr2plot, name
def plot_mesh_scan(self, scan_num, skip_plot=False, grid_on=False, ytext='', xtext='',
levels=20, cbar=True):
"""Plot a single mesh scan from the spec file.
Various plot parameters are provided.
The plotted data are returned.
Args:
scan_num (int) : Scan number of the spec scan.
skip_plot (Optional[bool]) : Skip plotting, just return data
default is False.
grid_on (Optional[bool]) : Add grid to plot - default is False.
ytext (Optional[str]) : y-Label of the plot - defaults is none.
xtext (Optional[str]) : x-Label of the plot - defaults is none.
levels (Optional[int]) : levels of contour plot - defaults is 20.
cbar (Optional[bool]) : Add colorbar to plot - default is True.
Returns:
xx, yy, zz : x,y,z data which was plotted
"""
from matplotlib.mlab import griddata
from matplotlib import gridspec
# read data from spec file
try:
# try to read data of this scan
spec_data = self.get_scan_data(scan_num)
except Exception:
print('Scan #' + int(scan_num) + ' not found, skipping')
dt = spec_data.dtype
dt = dt.descr
xmotor = dt[0][0]
ymotor = dt[1][0]
X = spec_data[xmotor]
Y = spec_data[ymotor]
xx = np.sort(np.unique(X))
yy = np.sort(np.unique(Y))
clist = self.get_clist()
if len(clist) > 1:
print('WARNING: Only the first counter of the clist is plotted.')
Z = spec_data[clist[0]]
zz = griddata(X, Y, Z, xx, yy, interp='linear')
if not skip_plot:
if cbar:
gs = gridspec.GridSpec(4, 2,
width_ratios=[3, 1],
height_ratios=[0.2, 0.1, 1, 3]
)
k = 4
else:
gs = gridspec.GridSpec(2, 2,
width_ratios=[3, 1],
height_ratios=[1, 3]
)
k = 0
ax1 = plt.subplot(gs[0+k])
plt.plot(xx, np.mean(zz, 0), label='mean')
plt.plot(xx, zz[np.argmax(np.mean(zz, 1)), :], label='peak')
plt.xlim([min(xx), max(xx)])
plt.legend(loc=0)
ax1.xaxis.tick_top()
if grid_on:
plt.grid(True)
plt.subplot(gs[2+k])
plt.contourf(xx, yy, zz, levels, cmap='viridis')
plt.xlabel(xmotor)
plt.ylabel(ymotor)
if len(xtext) > 0:
plt.xlabel(xtext)
if len(ytext) > 0:
plt.ylabel(ytext)
if grid_on:
plt.grid(True)
if cbar:
cb = plt.colorbar(cax=plt.subplot(
gs[0]), orientation='horizontal')
cb.ax.xaxis.set_ticks_position('top')
cb.ax.xaxis.set_label_position('top')
ax4 = plt.subplot(gs[3+k])
plt.plot(np.mean(zz, 1), yy)
plt.plot(zz[:, np.argmax(np.mean(zz, 0))], yy)
plt.ylim([np.min(yy), np.max(yy)])
ax4.yaxis.tick_right()
if grid_on:
plt.grid(True)
return xx, yy, zz
def plot_scan_sequence(self, scan_sequence, ylims=[], xlims=[], fig_size=[],
xgrid=[], yerr='std', xerr='std', norm2one=False,
binning=True, sequence_type='', label_text='',
title_text='', skip_plot=False, grid_on=True, ytext='',
xtext='', fmt='-o'):
"""Plot a list of scans from the spec file.
Various plot parameters are provided.
The plotted data are returned.
Args:
scan_sequence (List[
List/Tuple[List[int],
int/str]]) : Sequence of scan lists and parameters.
ylims (Optional[ndarray]) : ylim for the plot.
xlims (Optional[ndarray]) : xlim for the plot.
fig_size (Optional[ndarray]) : Figure size of the figure.
xgrid (Optional[ndarray]) : Grid to bin the data to -
default in empty so use the
x-axis of the first scan.
yerr (Optional[ndarray]) : Type of the errors in y: [err, std, none]
default is 'std'.
xerr (Optional[ndarray]) : Type of the errors in x: [err, std, none]
default is 'std'.
norm2one (Optional[bool]) : Norm transient data to 1 for t < t0
default is False.
sequence_type (Optional[str]): Type of the sequence: [fluence, delay,
energy, theta, position, voltage, none,
text] - default is enumeration.
label_text (Optional[str]) : Label of the plot - default is none.
title_text (Optional[str]) : Title of the figure - default is none.
skip_plot (Optional[bool]) : Skip plotting, just return data
default is False.
grid_on (Optional[bool]) : Add grid to plot - default is True.
ytext (Optional[str]) : y-Label of the plot - defaults is none.
xtext (Optional[str]) : x-Label of the plot - defaults is none.
fmt (Optional[str]) : format string of the plot - defaults is -o.
Returns:
sequence_data (OrderedDict) : Dictionary of the averaged scan data.
parameters (List[str, float]) : Parameters of the sequence.
names (List[str]) : List of names of each data set.
label_texts (List[str]) : List of labels for each data set.
"""
# initialize the return data
sequence_data = collections.OrderedDict()
names = []
label_texts = []
parameters = []
for i, (scan_list, parameter) in enumerate(scan_sequence):
# traverse the scan sequence
parameters.append(parameter)
# format the parameter as label text of this plot if no label text
# is given
if len(label_text) == 0:
if sequence_type == 'fluence':
lt = str.format('{:.2f} mJ/cm²', parameter)
elif sequence_type == 'delay':
lt = str.format('{:.2f} ps', parameter)
elif sequence_type == 'energy':
lt = str.format('{:.2f} eV', parameter)
elif sequence_type == 'theta':
lt = str.format('{:.2f} deg', parameter)
elif sequence_type == 'temperature':
lt = str.format('{:.2f} K', parameter)
elif sequence_type == 'position':
lt = str.format('{:.2f} mm', parameter)
elif sequence_type == 'voltage':
lt = str.format('{:.2f} V', parameter)
elif sequence_type == 'current':
lt = str.format('{:.2f} A', parameter)
elif sequence_type == 'scans':
lt = str(scan_list)
elif sequence_type == 'none':
# no parameter for single scans
lt = ''
elif sequence_type == 'text':
# parameter is a string
lt = parameter
else:
# no sequence type is given --> enumerate
lt = str.format('#{}', i+1)
else:
lt = label_text[i]
# get the plot data for the scan list
y2plot, x2plot, yerr2plot, xerr2plot, name = self.plot_scans(
scan_list,
ylims=ylims,
xlims=xlims,
fig_size=fig_size,
xgrid=xgrid,
yerr=yerr,
xerr=xerr,
norm2one=norm2one,
binning=binning,
label_text=lt,
title_text=title_text,
skip_plot=skip_plot,
grid_on=grid_on,
ytext=ytext,
xtext=xtext,
fmt=fmt
)
if self.xcol not in sequence_data.keys():
# if the xcol is not in the return data dict - add the key
sequence_data[self.xcol] = []
sequence_data[self.xcol + 'Err'] = []
# add the x-axis data to the return data dict
sequence_data[self.xcol].append(x2plot)
sequence_data[self.xcol + 'Err'].append(xerr2plot)
for counter in y2plot:
# traverse all counters in the data set
if counter not in sequence_data.keys():
# if the counter is not in the return data dict - add the key
sequence_data[counter] = []
sequence_data[counter + 'Err'] = []
# add the counter data to the return data dict
sequence_data[counter].append(y2plot[counter])
sequence_data[counter + 'Err'].append(yerr2plot[counter])
# append names and labels to their lists
names.append(name)
label_texts.append(lt)
return sequence_data, parameters, names, label_texts
def export_scan_sequence(self, scan_sequence, path, fileName, yerr='std',
xerr='std', xgrid=[], norm2one=False, binning=True):
"""Exports spec data for each scan list in the sequence as individual file.
Args:
scan_sequence (List[
List/Tuple[List[int],
int/str]]) : Sequence of scan lists and parameters.
path (str) : Path of the file to export to.
fileName (str) : Name of the file to export to.
yerr (Optional[ndarray]) : Type of the errors in y: [err, std, none]
default is 'std'.
xerr (Optional[ndarray]) : Type of the errors in x: [err, std, none]
default is 'std'.
xgrid (Optional[ndarray]) : Grid to bin the data to -
default in empty so use the
x-axis of the first scan.
norm2one (Optional[bool]) : Norm transient data to 1 for t < t0
default is False.
"""
# get scan_sequence data without plotting
sequence_data, parameters, names, label_texts = self.plot_scan_sequence(
scan_sequence,
xgrid=xgrid,
yerr=yerr,
xerr=xerr,
norm2one=norm2one,
binning=binning,
skip_plot=True)
for i, label_text in enumerate(label_texts):
# travserse the sequence
header = ''
saveData = []
for counter in sequence_data:
# travserse all counters in the data
# build the file header
header = header + counter + '\t '
# build the data matrix
saveData.append(sequence_data[counter][i])
# save data with header to text file
np.savetxt('{:s}/{:s}_{:s}.dat'.format(path, fileName,
"".join(x for x in label_text if x.isalnum())),
np.r_[saveData].T, delimiter='\t', header=header)
def fit_scans(self, scans, mod, pars, ylims=[], xlims=[], fig_size=[], xgrid=[],
yerr='std', xerr='std', norm2one=False, binning=True,
sequence_type='text', label_text='', title_text='', ytext='',
xtext='', select='', fit_report=0, show_single=False,
weights=False, fit_method='leastsq', offset_t0=False,
plot_separate=False, grid_on=True, fmt='o'):
"""Fit, plot, and return the data of scans.
This is just a wrapper for the fit_scan_sequence method
"""
scan_sequence = [[scans, '']]
return self.fit_scan_sequence(scan_sequence, mod, pars, ylims, xlims, fig_size,
xgrid, yerr, xerr, norm2one, binning,
'none', label_text, title_text, ytext,
xtext, select, fit_report, show_single,
weights, fit_method, offset_t0, plot_separate,
grid_on, fmt=fmt)
def fit_scan_sequence(self, scan_sequence, mod, pars, ylims=[], xlims=[], fig_size=[],
xgrid=[], yerr='std', xerr='std', norm2one=False,
binning=True, sequence_type='', label_text='',
title_text='', ytext='', xtext='', select='',
fit_report=0, show_single=False, weights=False,
fit_method='leastsq', offset_t0=False,
plot_separate=False, grid_on=True,
last_res_as_par=False, sequence_data=[], fmt='o'):
"""Fit, plot, and return the data of a scan sequence.
Args:
scan_sequence (List[
List/Tuple[List[int],
int/str]]) : Sequence of scan lists and parameters.
mod (Model[lmfit]) : lmfit model for fitting the data.
pars (Parameters[lmfit]) : lmfit parameters for fitting the data.
ylims (Optional[ndarray]) : ylim for the plot.
xlims (Optional[ndarray]) : xlim for the plot.
fig_size (Optional[ndarray]) : Figure size of the figure.
xgrid (Optional[ndarray]) : Grid to bin the data to -
default in empty so use the
x-axis of the first scan.
yerr (Optional[ndarray]) : Type of the errors in y: [err, std, none]
default is 'std'.
xerr (Optional[ndarray]) : Type of the errors in x: [err, std, none]
default is 'std'.
norm2one (Optional[bool]) : Norm transient data to 1 for t < t0
default is False.
sequence_type (Optional[str]): Type of the sequence: [fluence, delay,
energy, theta] - default is fluence.
label_text (Optional[str]) : Label of the plot - default is none.
title_text (Optional[str]) : Title of the figure - default is none.
ytext (Optional[str]) : y-Label of the plot - defaults is none.
xtext (Optional[str]) : x-Label of the plot - defaults is none.
select (Optional[str]) : String to evaluate as select statement
for the fit region - default is none
fit_report (Optional[int]) : Set the fit reporting level:
[0: none, 1: basic, 2: full]
default 0.
show_single (Optional[bool]) : Plot each fit seperately - default False.
weights (Optional[bool]) : Use weights for fitting - default False.
fit_method (Optional[str]) : Method to use for fitting; refer to
lmfit - default is 'leastsq'.
offset_t0 (Optional[bool]) : Offset time scans by the fitted
t0 parameter - default False.
plot_separate (Optional[bool]):A single plot for each counter
default False.
grid_on (Optional[bool]) : Add grid to plot - default is True.
last_res_as_par (Optional[bool]): Use the last fit result as start
values for next fit - default is False.
sequence_data (Optional[ndarray]): actual exp. data are externally given.
default is empty
fmt (Optional[str]) : format string of the plot - defaults is -o.
Returns:
res (Dict[ndarray]) : Fit results.
parameters (ndarray) : Parameters of the sequence.
sequence_data (OrderedDict) : Dictionary of the averaged scan data.equenceData
"""
# get the last open figure number
main_fig_num = self.get_last_fig_number()
if not fig_size:
# use default figure size if none is given
fig_size = mpl.rcParams['figure.figsize']
# initialization of returns
res = {} # initialize the results dict
for i, counter in enumerate(self.get_clist()):
# traverse all counters in the counter list to initialize the returns
# results for this counter is again a Dict
res[counter] = {}
if isinstance(pars, (list, tuple)):
# the fit paramters might individual for each counter
_pars = pars[i]
else:
_pars = pars
for pname, par in _pars.items():
# add a dict key for each fit parameter in the result dict
res[counter][pname] = []
res[counter][pname + 'Err'] = []
# add some more results
res[counter]['chisqr'] = []
res[counter]['redchi'] = []
res[counter]['CoM'] = []
res[counter]['int'] = []
res[counter]['fit'] = []
if len(sequence_data) > 0:
# get only the parameters
_, parameters, names, label_texts = self.plot_scan_sequence(
scan_sequence,
ylims=ylims,
xlims=xlims,
fig_size=fig_size,
xgrid=xgrid,
yerr=yerr,
xerr=xerr,
norm2one=norm2one,
binning=True,
sequence_type=sequence_type,
label_text=label_text,
title_text=title_text,
skip_plot=True)
else:
# get the sequence data and parameters
sequence_data, parameters, names, label_texts = self.plot_scan_sequence(
scan_sequence,
ylims=ylims,
xlims=xlims,
fig_size=fig_size,
xgrid=xgrid,
yerr=yerr,
xerr=xerr,
norm2one=norm2one,
binning=True,
sequence_type=sequence_type,
label_text=label_text,
title_text=title_text,
skip_plot=True)
# this is the number of different counters
num_sub_plots = len(self.get_clist())
# fitting and plotting the data
l_plot = 1 # counter for single plots
for i, parameter in enumerate(parameters):
# traverse all parameters of the sequence
lt = label_texts[i]
name = names[i]
x2plot = sequence_data[self.xcol][i]
xerr2plot = sequence_data[self.xcol + 'Err'][i]
if fit_report > 0:
# plot for basics and full fit reporting
print('')
print('='*10 + ' Parameter: ' + lt + ' ' + '='*15)
j = 0 # counter for counters ;)
k = 1 # counter for subplots
for counter in sequence_data:
# traverse all counters in the sequence
# plot only y counters - next is the coresp. error
if j >= 2 and j % 2 == 0:
# add the counter name to the label for not seperate plots
if sequence_type == 'none':
_lt = counter
else:
if plot_separate or num_sub_plots == 1:
_lt = lt
else:
_lt = lt + ' | ' + counter
# get the fit models and fit parameters if they are lists/tupels
if isinstance(mod, (list, tuple)):
_mod = mod[k-1]
else:
_mod = mod
if last_res_as_par and i > 0:
# use last results as start values for pars
_pars = pars
for pname, par in pars.items():
_pars[pname].value = res[counter][pname][i-1]
else:
if isinstance(pars, (list, tuple)):
_pars = pars[k-1]
else:
_pars = pars
# get the actual y-data and -errors for plotting and fitting
y2plot = sequence_data[counter][i]
yerr2plot = sequence_data[counter + 'Err'][i]
# evaluate the select statement
if select == '':
# select all
sel = np.ones_like(y2plot, dtype=bool)
else:
sel = eval(select)
# execute the select statement
y2plot = y2plot[sel]
x2plot = x2plot[sel]
yerr2plot = yerr2plot[sel]
xerr2plot = xerr2plot[sel]
# remove nans
y2plot = y2plot[~np.isnan(y2plot)]
x2plot = x2plot[~np.isnan(y2plot)]
yerr2plot = yerr2plot[~np.isnan(y2plot)]
xerr2plot = xerr2plot[~np.isnan(y2plot)]
# do the fitting with or without weighting the data
if weights:
out = _mod.fit(y2plot, _pars, x=x2plot,
weights=1/yerr2plot, method=fit_method,
nan_policy='propagate')
else:
out = _mod.fit(y2plot, _pars, x=x2plot,
method=fit_method, nan_policy='propagate')
if fit_report > 0:
# for basic and full fit reporting
print('')
print('-'*10 + ' ' + counter + ': ' + '-'*15)
for key in out.best_values:
print('{:>12}: {:>10.4e} '.format(
key, out.best_values[key]))
# set the x-offset for delay scans - offset parameter in
# the fit must be called 't0'
if offset_t0:
offsetX = out.best_values['t0']
else:
offsetX = 0
plt.figure(main_fig_num) # select the main figure
if plot_separate:
# use subplot for separate plotting
plt.subplot((num_sub_plots+num_sub_plots % 2)/2, 2, k)
# plot the fit and the data as errorbars
x2plotFit = np.linspace(
np.min(x2plot), np.max(x2plot), 10000)
plot = plt.plot(x2plotFit-offsetX,
out.eval(x=x2plotFit), '-', lw=2, alpha=1)
plt.errorbar(x2plot-offsetX, y2plot, fmt=fmt, xerr=xerr2plot,
yerr=yerr2plot, label=_lt, alpha=0.25, color=plot[0].get_color())
if len(parameters) > 5:
# move the legend outside the plot for more than
# 5 sequence parameters
plt.legend(bbox_to_anchor=(0., 1.08, 1, .102), frameon=True,
loc=3, numpoints=1, ncol=3, mode="expand",
borderaxespad=0.)
else:
plt.legend(frameon=True, loc=0, numpoints=1)
# set the axis limits, title, labels and gird
if xlims:
plt.xlim(xlims)
if ylims:
plt.ylim(ylims)
if len(title_text) > 0:
if isinstance(title_text, (list, tuple)):
plt.title(title_text[k-1])
else:
plt.title(title_text)
else:
plt.title(name)
if len(xtext) > 0:
plt.xlabel(xtext)
if len(ytext) > 0:
if isinstance(ytext, (list, tuple)):
plt.ylabel(ytext[k-1])
else:
plt.ylabel(ytext)
if grid_on:
plt.grid(True)
# show the single fits and residuals
if show_single:
plt.figure(main_fig_num+l_plot, figsize=fig_size)
gs = mpl.gridspec.GridSpec(
2, 1, height_ratios=[1, 3], hspace=0.1)
ax1 = plt.subplot(gs[0])
markerline, stemlines, baseline = plt.stem(
x2plot-offsetX, out.residual, markerfmt=' ',
use_line_collection=True)
plt.setp(stemlines, 'color',
plot[0].get_color(), 'linewidth', 2, alpha=0.5)
plt.setp(baseline, 'color', 'k', 'linewidth', 0)
ax1.xaxis.tick_top()
ax1.yaxis.set_major_locator(plt.MaxNLocator(3))
plt.ylabel('Residuals')
if xlims:
plt.xlim(xlims)
if ylims:
plt.ylim(ylims)
if len(xtext) > 0:
plt.xlabel(xtext)
if grid_on:
plt.grid(True)
if len(title_text) > 0:
if isinstance(title_text, (list, tuple)):
plt.title(title_text[k-1])
else:
plt.title(title_text)
else:
plt.title(name)
ax2 = plt.subplot(gs[1])
x2plotFit = np.linspace(
np.min(x2plot), np.max(x2plot), 1000)
ax2.plot(x2plotFit-offsetX, out.eval(x=x2plotFit),
'-', lw=2, alpha=1, color=plot[0].get_color())
ax2.errorbar(x2plot-offsetX, y2plot, fmt=fmt, xerr=xerr2plot,
yerr=yerr2plot, label=_lt, alpha=0.25,
color=plot[0].get_color())
plt.legend(frameon=True, loc=0, numpoints=1)
if xlims:
plt.xlim(xlims)
if ylims:
plt.ylim(ylims)
if len(xtext) > 0:
plt.xlabel(xtext)
if len(ytext) > 0:
if isinstance(ytext, (list, tuple)):
plt.ylabel(ytext[k-1])
else:
plt.ylabel(ytext)
if grid_on:
plt.grid(True)
l_plot += 1
if fit_report > 1:
# for full fit reporting
print('_'*40)
print(out.fit_report())
# add the fit results to the returns
for pname, par in _pars.items():
res[counter][pname] = np.append(
res[counter][pname], out.best_values[pname])
res[counter][pname + 'Err'] = np.append(
res[counter][pname + 'Err'], out.params[pname].stderr)
res[counter]['chisqr'] = np.append(
res[counter]['chisqr'], out.chisqr)
res[counter]['redchi'] = np.append(
res[counter]['redchi'], out.redchi)
res[counter]['CoM'] = np.append(
res[counter]['CoM'], sum(y2plot*x2plot)/sum(y2plot))
res[counter]['int'] = np.append(
res[counter]['int'], sum(y2plot))
res[counter]['fit'] = np.append(res[counter]['fit'], out)
k += 1
j += 1
plt.figure(main_fig_num) # set as active figure
return res, parameters, sequence_data
# move to the end for plotting
def get_last_fig_number(self):
"""get_last_fig_number
Return the last figure number of all opened figures for plotting
data in the same figure during for-loops.
Returns:
fig_number (int): last figure number of all opened figures.
"""
try:
# get the number of all opened figures
fig_number = mpl._pylab_helpers.Gcf.get_active().num
except Exception:
# there are no figures open
fig_number = 1
return fig_number
def get_next_fig_number(self):
"""get_next_fig_number
Return the number of the next available figure.
Returns:
next_fig_number (int): next figure number of all opened figures.
"""
return self.get_last_fig_number() + 1
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,582
|
dschick/pyEvalData
|
refs/heads/develop
|
/test/test_scan.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyEvalData.io import Scan
def test_scan():
scan = Scan(1)
assert scan.number == 1
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,583
|
dschick/pyEvalData
|
refs/heads/develop
|
/pyEvalData/io/scan.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015-2021 Daniel Schick
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from .. import config
import logging
__all__ = ['Scan']
__docformat__ = 'restructuredtext'
import numpy as np
class Scan(object):
"""Scan
Representation of a scan which holds the relevant data and meta information.
Args:
number (uint): number of the scan.
Keyword Args:
cmd (str): scan command.
user (str): scan user.
date (str): scan date.
time (str): scan time.
int_time (float): integration time.
init_mopo (dict(float)): initial motor position.
header (str): full scan header.
Attributes:
log (logging.logger): logger instance from logging.
number (uint): number of the scan.
meta (dict): meta data dictionary.
data (ndarray[float]): data recarray.
"""
def __init__(self, number, **kwargs):
self.log = logging.getLogger(__name__)
self.log.setLevel(config.LOG_LEVEL)
self.log.debug('Creating scan #{:d}'.format(number))
self.number = np.uint64(number)
# initialize empty data array and circumvent
# check for recarray here
self._data = None
self.scalar_data_names = []
self.oned_data_names = []
self.twod_data_names = []
self.index_data()
self.meta = {}
self.meta['number'] = self.number
self.meta['cmd'] = kwargs.get('cmd', '')
self.meta['user'] = kwargs.get('user', '')
self.meta['date'] = kwargs.get('date', '')
self.meta['time'] = kwargs.get('time', '')
self.meta['int_time'] = kwargs.get('int_time', '')
self.meta['init_mopo'] = kwargs.get('init_mopo', {})
self.meta['header'] = kwargs.get('header', '')
def __getattr__(self, attr):
"""__getattr__
Allows to access the data and meta(init_mopo) keys as scan attributes.
Returns:
attr (ndarray[float]|float|str): data/meta values.
"""
# check data recarray
try:
return self.data[attr]
except (ValueError, IndexError, TypeError):
pass
# check meta dict
try:
return self.meta[attr]
except KeyError:
pass
# check meta init_mopo dict
try:
return self.meta['init_mopo'][attr]
except KeyError:
raise AttributeError('\'{:s}\' has no attribute \'{:s}\''.format(__name__, attr))
def index_data(self):
"""index_data
Check the dimensions of the data recarray elements and
remember the names for scaler, 1d, and 2d data columns.
"""
if self.data is not None:
for descr in self.data.dtype.descr:
try:
if len(descr[2]) == 1:
self.oned_data_names.append(descr[0])
elif len(descr[2]) == 2:
self.twod_data_names.append(descr[0])
except Exception:
self.scalar_data_names.append(descr[0])
def get_scalar_data(self):
"""get_scalar_data
Returns only scalar data from the data recarray.
Returns:
data (ndarray[float]): scalar data.
"""
if self.scalar_data_names == []:
return None
else:
return self.data[self.scalar_data_names]
def get_oned_data(self):
"""get_oned_data
Returns only 1d data from the data recarray.
Returns:
data (ndarray[float]): 1d data.
"""
if self.oned_data_names == []:
return None
else:
return self.data[self.oned_data_names]
def get_twod_data(self):
"""get_twod_data
Returns only 2d data from the data recarray.
Returns:
data (ndarray[float]): 2d data.
"""
if self.twod_data_names == []:
return None
else:
return self.data[self.twod_data_names]
def clear_data(self):
"""clear_data
Clears the data to save memory.
"""
self._data = None
self.log.debug('Cleared data for scan #{:d}'.format(self.number))
@property
def data(self):
return self._data
@data.setter
def data(self, data):
if isinstance(data, np.recarray):
self._data = data
elif data is None:
self.log.info('Scan #{:d} contains no data!'.format(self.number))
self._data = None
else:
raise TypeError('Data must be a numpy.recarray!')
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,584
|
dschick/pyEvalData
|
refs/heads/develop
|
/setup.py
|
from setuptools import setup, find_packages
setup(
name='pyEvalData',
version='1.5.1',
packages=find_packages(),
url='https://github.com/dschick/pyEvalData',
install_requires=['numpy',
'matplotlib',
'lmfit',
'scipy',
'uncertainties',
'xrayutilities',
'h5py>=3.0',
'nexusformat'],
extras_require={
'testing': ['flake8', 'pytest'],
'documentation': ['sphinx', 'nbsphinx', 'sphinxcontrib-napoleon'],
},
license='MIT',
author='Daniel Schick',
author_email='schick.daniel@gmail.com',
description='Python module to evaluate experimental data',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
package_data={
'pyEvalData': ['*.conf']
},
python_requires='>=3.5',
keywords='data evaluation analysis SPEC h5 NeXus',
)
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,585
|
dschick/pyEvalData
|
refs/heads/develop
|
/pyEvalData/io/source.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015-2021 Daniel Schick
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from .. import config
import logging
from .scan import Scan
import os.path as path
from numpy.core.records import fromarrays
import nexusformat.nexus as nxs
__all__ = ['Source']
__docformat__ = 'restructuredtext'
class Source(object):
"""Source
Class of default source implementation.
Args:
file_name (str): file name including extension,
can include regex pattern.
file_path (str, optional): file path - defaults to ``./``.
Keyword Args:
start_scan_number (uint): start of scan numbers to parse.
stop_scan_number (uint): stop of scan numbers to parse.
This number is included.
nexus_file_name (str): name for generated nexus file.
nexus_file_name_postfix (str): postfix for nexus file name.
nexus_file_path (str): path for generated nexus file.
read_all_data (bool): read all data on parsing.
If false, data will be read only on demand.
read_and_forget (bool): clear data after read to save memory.
update_before_read (bool): always update from source
before reading scan data.
use_nexus (bool): use nexus file to join/compress raw data.
force_overwrite (bool): forced re-read of raw source and
re-generated of nexus file.
Attributes:
log (logging.logger): logger instance from logging.
name (str): name of the source
scan_dict (dict(scan)): dict of scan objects with
key being the scan number.
start_scan_number (uint): start of scan numbers to parse.
stop_scan_number (uint): stop of scan numbers to parse.
This number is included.
file_name (str): file name including extension,
can include regex pattern.
file_path (str, optional): file path - defaults to ``./``.
nexus_file_name (str): name for generated nexus file.
nexus_file_name_postfix (str): postfix for nexus file name.
nexus_file_path (str): path for generated nexus file.
nexus_file_exists(bool): if nexus file exists.
read_all_data (bool): read all data on parsing.
read_and_forget (bool): clear data after read to save memory.
update_before_read (bool): always update from source
before reading scan data.
use_nexus (bool): use nexus file to join/compress raw data.
force_overwrite (bool): forced re-read of raw source and
re-generated of nexus file.
"""
def __init__(self, file_name, file_path='./', **kwargs):
self.log = logging.getLogger(__name__)
self.log.setLevel(config.LOG_LEVEL)
self.name = file_name
self.scan_dict = {}
self._start_scan_number = 0
self._stop_scan_number = -1
self.start_scan_number = kwargs.get('start_scan_number', 0)
self.stop_scan_number = kwargs.get('stop_scan_number', -1)
self.file_name = file_name
self.file_path = file_path
self.nexus_file_name_postfix = kwargs.get('nexus_file_name_postfix',
'.pyevaldata')
self.nexus_file_name = kwargs.get('nexus_file_name', self.file_name)
self.nexus_file_path = kwargs.get('nexus_file_path', self.file_path)
self.check_nexus_file_exists()
self.read_all_data = kwargs.get('read_all_data', False)
self.read_and_forget = kwargs.get('read_and_forget', False)
self.update_before_read = kwargs.get('update_before_read', False)
self.use_nexus = kwargs.get('use_nexus', True)
self.force_overwrite = kwargs.get('force_overwrite', False)
# update from the source
self.update()
def __getattr__(self, attr):
"""__getattr__
Allows to access scans as source attributes.
Returns:
scan (Scan): scan object.
"""
if attr.startswith("scan"):
index = attr[4:]
try:
scan_number = int(index)
except ValueError:
raise ValueError('Scan number must be convertable to an integer!')
return self.get_scan(scan_number)
else:
raise AttributeError('\'{:s}\' has no attribute \'{:s}\''.format(__name__, attr))
def __len__(self):
"""Returns length of ``scan_dict``"""
return self.scan_dict.__len__()
def update(self, scan_number_list=[]):
"""update
update the ``scan_dict`` either from the raw source file/folder
or from the nexus file.
The optional ``scan_number_list`` runs the update only if required
for the included scan.
Attributes:
scan_number_list (list[int]): explicit list of scans
"""
if not isinstance(scan_number_list, list):
scan_number_list = [int(scan_number_list)]
last_scan_number = self.get_last_scan_number()
if (len(scan_number_list) == 0) \
or (last_scan_number in scan_number_list) \
or any(list(set(scan_number_list) - set(self.scan_dict.keys()))):
self.log.info('Update source')
if self.use_nexus:
self.log.debug('Updating from nexus')
# do not combine cases for better flow control
if not self.nexus_file_exists:
self.log.debug('nexus file does not exist')
self.parse_raw()
self.save_all_scans_to_nexus()
elif self.update_before_read:
self.log.debug('Update before read')
self.parse_raw()
self.save_all_scans_to_nexus()
elif self.force_overwrite:
self.log.debug('Force overwrite')
self.parse_raw()
self.save_all_scans_to_nexus()
else:
self.parse_nexus()
else:
self.log.debug('Updating from raw source')
self.parse_raw()
else:
self.log.debug('Skipping update for scans {:s} '
'which are already present in '
'scan_dict.'.format(str(scan_number_list)))
def parse_raw(self):
"""parse_raw
Parse the raw source file/folder and populate the `scan_dict`.
"""
raise NotImplementedError('Needs to be implemented!')
def parse_nexus(self):
"""parse_nexus
Parse the nexus file and populate the `scan_dict`.
"""
self.log.info('parse_nexus')
nxs_file_path = path.join(self.nexus_file_path, self.nexus_file_name)
try:
nxs_file = nxs.nxload(nxs_file_path, mode='r')
except nxs.NeXusError:
raise nxs.NeXusError('NeXus file \'{:s}\' does not exist!'.format(nxs_file_path))
with nxs_file.nxfile:
for entry in nxs_file:
# check for scan number in given range
if (nxs_file[entry].number >= self.start_scan_number) and \
((nxs_file[entry].number <= self.stop_scan_number) or
(self.stop_scan_number == -1)):
last_scan_number = self.get_last_scan_number()
# check if Scan needs to be re-created
# if scan is not present, its the last one, or force overwrite
if (nxs_file[entry].number not in self.scan_dict.keys()) or \
(nxs_file[entry].number >= last_scan_number) or \
self.force_overwrite:
# create scan object
init_mopo = {}
for field in nxs_file[entry].init_mopo:
init_mopo[field] = nxs_file[entry]['init_mopo'][field]
scan = Scan(int(nxs_file[entry].number),
cmd=nxs_file[entry].cmd,
date=nxs_file[entry].date,
time=nxs_file[entry].time,
int_time=float(nxs_file[entry].int_time),
header=nxs_file[entry].header,
init_mopo=init_mopo)
self.scan_dict[nxs_file[entry].number] = scan
# check if the data needs to be read as well
if self.read_all_data:
self.read_scan_data(self.scan_dict[nxs_file[entry].number])
def check_nexus_file_exists(self):
"""check_nexus_file_exists
Check if the nexus file is present and set `self.nexus_file_exists`.
"""
if path.exists(path.join(self.nexus_file_path, self.nexus_file_name)):
self.nexus_file_exists = True
else:
self.nexus_file_exists = False
def get_last_scan_number(self):
"""get_last_scan_number
Return the number of the last scan in the `scan_dict`.
If the `scan_dict` is empty return 0.
"""
try:
return sorted(self.scan_dict.keys())[-1]
except IndexError:
return 0
def get_all_scan_numbers(self):
"""get_all_scan_numbers
Return the all scan number from the `scan_dict`.
"""
try:
return sorted(self.scan_dict.keys())
except IndexError:
return 0
def get_scan(self, scan_number, read_data=True, dismiss_update=False):
"""get_scan
Returns a scan object from the scan dict determined by the scan_number.
Args:
scan_number (uint): number of the scan.
read_data (bool, optional): read data from source.
Defaults to `False`.
dismiss_update (bool, optional): Dismiss update even if set as
object attribute. Defaults to `False`.
Returns:
scan (Scan): scan object.
"""
self.log.debug('get_scan')
if self.update_before_read and not dismiss_update:
self.update(scan_number)
try:
scan = self.scan_dict[scan_number]
except KeyError:
raise KeyError('Scan #{:d} not found in scan dict.'.format(scan_number))
if read_data:
self.read_scan_data(scan)
return scan
def get_scan_list(self, scan_number_list, read_data=True):
"""get_scan_list
Returns a list of scan object from the `scan_dict` determined by
the list of scan_number.
Args:
scan_number_list (list(uint)): list of numbers of the scan.
read_data (bool, optional): read data from source.
Defaults to `False`.
Returns:
scans (list(Scan)): list of scan object.
"""
self.log.debug('get_scan_list')
if self.update_before_read:
self.update(scan_number_list)
scans = []
for scan_number in scan_number_list:
scan = self.get_scan(scan_number, read_data, dismiss_update=True)
scans.append(scan)
return scans
def get_scan_data(self, scan_number):
"""get_scan_data
Returns data and meta information from a scan object from the `scan_dict`
determined by the scan_number.
Args:
scan_number (uint): number of the scan.
Returns:
data (numpy.recarray[float]): scan data.
meta (dict()): scan meta information.
"""
self.log.debug('get_scan_data')
scan = self.get_scan(scan_number)
if scan.data is not None:
data = scan.data.copy()
else:
data = None
meta = scan.meta.copy()
if self.read_and_forget:
scan.clear_data()
return data, meta
def get_scan_list_data(self, scan_number_list):
"""get_scan_list_data
Returns data and meta information for a list of scan objects from
the `scan_dict` determined by the scan_numbers.
Args:
scan_number_list (list(uint)): list of numbers of the scan.
Returns:
data (list(numpy.recarray[float])): list of scan data.
meta (list(dict())): list scan meta information.
"""
self.log.debug('get_scan_list_data')
data_list = []
meta_list = []
for scan in self.get_scan_list(scan_number_list):
data_list.append(scan.data.copy())
meta_list.append(scan.meta.copy())
if self.read_and_forget:
scan.clear_data()
return data_list, meta_list
def read_scan_data(self, scan):
"""read_scan_data
Reads the data for a given scan object.
Args:
scan (Scan): scan object.
"""
self.log.debug('read_scan_data for scan #{:d}'.format(scan.number))
last_scan_number = self.get_last_scan_number()
if (scan.data is None) or \
(scan.number >= last_scan_number) or self.force_overwrite:
if self.use_nexus:
self.read_nexus_scan_data(scan)
else:
self.read_raw_scan_data(scan)
else:
self.log.debug('data not updated for scan #{:d}'.format(scan.number))
def read_raw_scan_data(self, scan):
"""read_raw_scan_data
Reads the data for a given scan object from raw source.
Args:
scan (Scan): scan object.
"""
raise NotImplementedError('Needs to be implemented!')
def read_nexus_scan_data(self, scan):
"""read_nexus_scan_data
Reads the data for a given scan object from the nexus file.
Args:
scan (Scan): scan object.
"""
self.log.debug('read_nexus_scan_data for scan #{:d}'.format(scan.number))
# try to open the file
nxs_file_path = path.join(self.nexus_file_path, self.nexus_file_name)
try:
nxs_file = nxs.nxload(nxs_file_path, mode='r')
except nxs.NeXusError:
raise nxs.NeXusError('NeXus file \'{:s}\' does not exist!'.format(nxs_file_path))
entry_name = 'entry{:d}'.format(scan.number)
# try to enter entry
try:
entry = nxs_file[entry_name]
except nxs.NeXusError:
self.log.exception('Entry #{:d} not present in NeXus file!'.format(scan.number))
return
# iterate through data fields
data_list = []
dtype_list = []
for field in entry.data:
data_list.append(entry.data[field])
dtype_list.append((field, entry.data[field].dtype, entry.data[field].shape))
if len(data_list) > 0:
scan.data = fromarrays(data_list, dtype=dtype_list)
else:
scan.data = None
def clear_scan_data(self, scan):
"""clear_scan_data
Clear the data for a given scan object.
Args:
scan (Scan): scan object.
"""
self.log.debug('clear_scan_data')
scan.clear_data()
def read_all_scan_data(self):
"""read_all_scan_data
Reads the data for all scan objects in the `scan_dict` from source.
"""
self.log.debug('read_all_scan_data')
for scan_number, scan in self.scan_dict.items():
self.read_scan_data(scan)
def clear_all_scan_data(self):
"""clear_all_scan_data
Clears the data for all scan objects in the `scan_dict`.
"""
self.log.debug('clear_all_scan_data')
for scan_number, scan in self.scan_dict.items():
self.clear_scan_data(scan)
def save_scan_to_nexus(self, scan, nxs_file=''):
"""save_scan_to_nexus
Saves a scan to the nexus file.
"""
if nxs_file == '':
nxs_file = self.get_nexus_file()
entry_name = 'entry{:d}'.format(scan.number)
# evaluate if we need to forget the data again
if scan.data is None:
clear_data = True
else:
clear_data = False
# read the raw data
self.read_raw_scan_data(scan)
self.log.info('save_scan_to_nexus for scan #{:d}'.format(scan.number))
with nxs_file.nxfile:
# if the entry already exists, it must be deleted in advance
try:
del nxs_file[entry_name]
except nxs.NeXusError:
pass
# (re-)create entry
entry = nxs_file[entry_name] = nxs.NXentry()
# iterate meta information
for key, value in scan.meta.items():
if key == 'init_mopo':
# create dedicated collection for initial motor positions
entry['init_mopo'] = nxs.NXcollection()
# iterate through initial motor positions
for mopo_key, mopo_value in scan.meta['init_mopo'].items():
entry.init_mopo[mopo_key] = nxs.NXfield(mopo_value)
else:
# add meta information as attribute to entry
entry.attrs[key] = value
# create dedicated collection for data
entry['data'] = nxs.NXcollection()
# check if there is any data present at all
if scan.data is not None:
# iterate data
for col in scan.data.dtype.names:
entry.data[col] = nxs.NXfield(scan.data[col])
# clear data of the scan if it was not present before
# or read and forget
if clear_data or self.read_and_forget:
scan.clear_data()
def save_all_scans_to_nexus(self):
"""save_all_scans_to_nexus
Saves all scan objects in the `scan_dict` to the nexus file.
"""
self.log.info('save_all_scans_to_nexus')
nxs_file = self.get_nexus_file()
try:
last_scan_in_nexus = sorted(int(num.strip('entry')) for num in nxs_file.keys())[-1]
except IndexError:
last_scan_in_nexus = -1
for scan_number, scan in self.scan_dict.items():
entry_name = 'entry{:d}'.format(scan.number)
try:
_ = nxs_file[entry_name]
scan_in_nexus = True
except (KeyError, nxs.NeXusError):
scan_in_nexus = False
if (not scan_in_nexus) or (scan.number >= last_scan_in_nexus) \
or self.force_overwrite:
self.save_scan_to_nexus(scan, nxs_file)
def get_nexus_file(self, mode='rw'):
"""get_nexus_file
Return the file handle to the NeXus file in a given ``mode```.
Args:
mode (str, optional): file mode. defaults to 'rw'.
Returns:
nxs_file (NXFile): file handle to NeXus file.
"""
self.log.debug('get_nexus_file')
try:
nxs_file = nxs.nxload(path.join(self.nexus_file_path, self.nexus_file_name), mode='rw')
except nxs.NeXusError:
nxs.NXroot().save(path.join(self.nexus_file_path, self.nexus_file_name))
nxs_file = nxs.nxload(path.join(self.nexus_file_path, self.nexus_file_name), mode='rw')
return nxs_file
@property
def nexus_file_name(self):
return self._nexus_file_name
@nexus_file_name.setter
def nexus_file_name(self, nexus_file_name):
self._nexus_file_name = nexus_file_name + self.nexus_file_name_postfix + '.nxs'
@property
def start_scan_number(self):
return self._start_scan_number
@start_scan_number.setter
def start_scan_number(self, start_scan_number):
if start_scan_number < 0:
self.log.warning('start_scan_number must not be negative!')
return
elif (start_scan_number > self.stop_scan_number) and (self.stop_scan_number > -1):
self.log.warning('start_scan_number must be <= stop_scan_number!')
return
else:
self._start_scan_number = start_scan_number
@property
def stop_scan_number(self):
return self._stop_scan_number
@stop_scan_number.setter
def stop_scan_number(self, stop_scan_number):
if stop_scan_number < -1:
self.log.warning('stop_scan_number cannot be smaller than -1!')
return
elif (stop_scan_number < self.start_scan_number) and (stop_scan_number > -1):
self.log.warning('stop_scan_number must be >= start_scan_number!')
return
else:
self._stop_scan_number = stop_scan_number
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,586
|
dschick/pyEvalData
|
refs/heads/develop
|
/test/test_source.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
@pytest.mark.parametrize('mysource, sname, scan_num, scan_delay',
[
('source_spec', 'example_file_spec.spec', 1, -0.998557475),
('source_pal', 'pal_file', 40, -33)
])
def test_source(mysource, sname, scan_num, scan_delay, request):
s = request.getfixturevalue(mysource)
assert s.name == sname
assert s.get_scan(scan_num).meta['number'] == scan_num
assert s.get_scan(scan_num).data['delay'][0] == pytest.approx(scan_delay)
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,587
|
dschick/pyEvalData
|
refs/heads/develop
|
/pyEvalData/io/palxfel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015-2021 Daniel Schick
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from numpy.core.records import fromarrays
import os
import h5py
from .source import Source
from .scan import Scan
__all__ = ['PalH5']
__docformat__ = 'restructuredtext'
class PalH5(Source):
"""PalH5
Source implementation for PalH5 folder/files.
Args:
name (str): name of the source
file_name (str): file name including extension,
can include regex pattern.
file_path (str, optional): file path - defaults to ``./``.
Keyword Args:
start_scan_number (uint): start of scan numbers to parse.
stop_scan_number (uint): stop of scan numbers to parse.
This number is included.
nexus_file_name (str): name for generated nexus file.
nexus_file_name_postfix (str): postfix for nexus file name.
nexus_file_path (str): path for generated nexus file.
read_all_data (bool): read all data on parsing.
If false, data will be read only on demand.
read_and_forget (bool): clear data after read to save memory.
update_before_read (bool): always update from source
before reading scan data.
use_nexus (bool): use nexus file to join/compress raw data.
force_overwrite (bool): forced re-read of raw source and
re-generated of nexus file.
Attributes:
log (logging.logger): logger instance from logging.
name (str): name of the source
scan_dict (dict(scan)): dict of scan objects with
key being the scan number.
start_scan_number (uint): start of scan numbers to parse.
stop_scan_number (uint): stop of scan numbers to parse.
This number is included.
file_name (str): file name including extension,
can include regex pattern.
file_path (str, optional): file path - defaults to ``./``.
nexus_file_name (str): name for generated nexus file.
nexus_file_name_postfix (str): postfix for nexus file name.
nexus_file_path (str): path for generated nexus file.
nexus_file_exists(bool): if nexus file exists.
read_all_data (bool): read all data on parsing.
read_and_forget (bool): clear data after read to save memory.
update_before_read (bool): always update from source
before reading scan data.
use_nexus (bool): use nexus file to join/compress raw data.
force_overwrite (bool): forced re-read of raw source and
re-generated of nexus file.
"""
def __init__(self, name, file_name, file_path, **kwargs):
super().__init__(file_name, file_path, **kwargs)
self.name = name
def parse_raw(self):
"""parse_raw
Parse the PalH5 folder and populate the `scan_dict`.
"""
self.log.info('parse_raw')
if not os.path.exists(self.file_path):
self.log.error('File path does not exist!')
return
for root, subdirectories, files in os.walk(self.file_path):
for sub_dir in sorted(subdirectories):
# check for scan number in given range
try:
scan_number = int(sub_dir)
except ValueError:
self.log.exception('{:s} is no scan folder - skipping'.format(sub_dir))
continue
if (scan_number >= self.start_scan_number) and \
((scan_number <= self.stop_scan_number) or
(self.stop_scan_number == -1)):
last_scan_number = self.get_last_scan_number()
# check if Scan needs to be re-created
# if scan is not present, its the last one, or force overwrite
if (scan_number not in self.scan_dict.keys()) or \
(scan_number >= last_scan_number) or \
self.force_overwrite:
# create scan object
h5_file = os.path.join(self.file_path,
self.file_name.format(scan_number),
self.file_name.format(scan_number) + '.h5')
try:
with h5py.File(h5_file, 'r') as h5:
header = h5['R{0:04d}/header'.format(scan_number)]
init_motor_pos = {}
for key in header['motor_init_pos'].keys():
init_motor_pos[key] = \
header['motor_init_pos/{:s}'.format(key)][()]
# create scan object
try:
# this is a fixQ fix
int_time = float(header['scan_cmd'].asstr()[()].split(' ')[-1])
except ValueError:
int_time = float(header['scan_cmd'].asstr()[()].split(' ')[-2])
scan = Scan(int(scan_number),
cmd=header['scan_cmd'].asstr()[()],
date=header['time'].asstr()[()].split(' ')[0],
time=header['time'].asstr()[()].split(' ')[1],
int_time=int_time,
header='',
init_mopo=init_motor_pos)
self.scan_dict[scan_number] = scan
# check if the data needs to be read as well
if self.read_all_data:
self.read_scan_data(self.scan_dict[scan_number])
except OSError:
self.log.warning('Could not open file {:s}'.format(h5_file))
continue
def read_raw_scan_data(self, scan):
"""read_raw_scan_data
Reads the data for a given scan object from Sardana NeXus file.
Args:
scan (Scan): scan object.
"""
self.log.info('read_raw_scan_data for scan #{:d}'.format(scan.number))
# try to open the file
h5_file = os.path.join(self.file_path,
self.file_name.format(scan.number),
self.file_name.format(scan.number) + '.h5')
with h5py.File(h5_file, 'r') as h5:
entry = h5['R{0:04d}'.format(scan.number)]
# iterate through data fields
data_list = []
dtype_list = []
for key in entry['scan_dat'].keys():
if '_raw' not in key:
data_list.append(entry['scan_dat'][key])
dtype_list.append((key,
entry['scan_dat'][key].dtype,
entry['scan_dat'][key].shape))
if len(data_list) > 0:
scan.data = fromarrays(data_list, dtype=dtype_list)
else:
scan.data = None
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,588
|
dschick/pyEvalData
|
refs/heads/develop
|
/pyEvalData/io/__init__.py
|
from .source import Source
from .scan import Scan
from .spec import Spec
from .sardana_nexus import SardanaNeXus
from .palxfel import PalH5
__all__ = ['Scan', 'Source', 'Spec', 'SardanaNeXus', 'PalH5']
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,589
|
dschick/pyEvalData
|
refs/heads/develop
|
/pyEvalData/helpers.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015-2020 Daniel Schick
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
from scipy.stats import binned_statistic
__all__ = ['edges4grid', 'bin_data']
__docformat__ = 'restructuredtext'
def edges4grid(grid):
"""edges4grid
Returns the edges for a given grid vector as well as the
corresponding width of these bins.
The ``grid`` is NOT altered - on purpose!
So even if the ``grid`` is not *unique* there will be bins of width 0.
Be also aware of the hanling of the first and last bin, as they will
contain values which will lay outside of the original ``grid``.
grid x x x x x x x x
edges | | | | | | | | |
binwidth <---> <---> <---> <---> <---> <---> <---> <--->
Attributes:
grid (ndarray[float]): array of grid points.
Returns:
(tuple):
- *edges (ndarray[float])* - array of edges.
- *binwidth (ndarray[float])* - array of bin widths.
"""
diff = np.diff(grid)
edges = np.hstack([grid[0]-diff[0]/2,
grid[0:-1] + diff/2,
grid[-1]+diff[-1]/2])
binwidth = np.diff(edges)
return edges, binwidth
def bin_data(y, x, X, statistic='mean'):
"""bin_data
This is a wrapper around `scipy's binned_statistic
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binned_statistic.html>`_.
In the first step possbile masked elements from the input arrays `x` and
`y` are removed. The same applies for the new grid array `X` which is also
sorted and made unique.
In a second step the edges for the new grid are calculated by
``edges4grid`` and used to calculate the new binned values `Y` by using
``scipy.stats.binned_statistsic``.
The type of `statistic` can be chosen. In case of `sum` Poisson statistics
are applied to calculate the standard derivation of the binned values `Y`.
Also errors due to the horizontal binning are calculated and returned.
All return values contain only elements with according non-zero bins.
Arguments:
y (ndarray[float]): input y array.
x (ndarray[float]): input x array.
X (ndarray[float]): new grid array.
statistic (str, optional): type of statistics used for scipy's
``binned_statistic`` - default is ``mean``.
Returns:
(tuple):
- *Y (ndarray[float])* - binned Y data without zero-bins.
- *X (ndarray[float])* - new X grid array.
- *Yerr (ndarray[float])* - Error for Y, according to statistic.
- *Xerr (ndarray[float])* - Error for Y, according to statistic.
- *Ystd (ndarray[float])* - Std for Y, according to statistic.
- *Xstd (ndarray[float])* - Std for X, according to statistic.
- *edges (ndarray[float])* - Edges of binned data.
- *bins (ndarray[float])* - Indices of the bins.
- *n (ndarray[float])* - Number of values per given bin.
"""
# get only unmasked data
idx = ~np.ma.getmask(x)
idy = ~np.ma.getmask(y)
y = y[idx & idy].flatten('F')
x = x[idx & idy].flatten('F')
idX = ~np.ma.getmask(X)
X = np.unique(np.sort(X[idX].flatten('F')))
# set non-finite values to 0
y[~np.isfinite(y)] = 0
# create bins for the grid
edges, _ = edges4grid(X)
if np.array_equal(x, X):
# no binning since the new grid is the same as the old one
Y = y
bins = np.ones_like(Y)
n = np.ones_like(Y)
else:
# do the binning and get the Y results
Y, _, bins = binned_statistic(x, y, statistic, edges)
bins = bins.astype(np.int_)
n = np.bincount(bins[bins > 0], minlength=len(X)+1)
n = n[1:len(X)+1]
if np.array_equal(x, X) and statistic != 'sum':
# if no binning is applied and no Poisson statistics is applied, all
# errors and stds are set to zero
Ystd = np.zeros_like(Y)
Xstd = np.zeros_like(X)
Yerr = np.zeros_like(Y)
Xerr = np.zeros_like(X)
else:
# calculate the std of X and Y
if statistic == 'sum':
# the std and error are calculated as 1/sqrt(N) for each bin
Ystd = np.sqrt(Y)
Yerr = Ystd
else:
Ystd, _, _ = binned_statistic(x, y, 'std', edges)
Yerr = Ystd/np.sqrt(n)
# calculate the std and error for the horizontal x grid
Xstd, _, _ = binned_statistic(x, x, 'std', edges)
Xerr = Xstd/np.sqrt(n)
# remove zero-bins
Y = Y[n > 0]
X = X[n > 0]
Yerr = Yerr[n > 0]
Xerr = Xerr[n > 0]
Ystd = Ystd[n > 0]
Xstd = Xstd[n > 0]
return Y, X, Yerr, Xerr, Ystd, Xstd, edges, bins, n
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,590
|
dschick/pyEvalData
|
refs/heads/develop
|
/test/test_evaluation.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
def test_evaluation(evaluation):
data = evaluation
assert data.cdef == {}
assert data.clist == []
cdef = {
'Pumped_demod': 'Pumped-(Unpumped-mean(Unpumped))',
'PumpedM_demod': 'PumpedM-(UnpumpedM-mean(UnpumpedM))',
'M': 'Pumped_demod-PumpedM_demod',
}
data.cdef = cdef
data.xcol = 'delay'
data.clist = ['M']
data1 = data.get_scan_data(1)
assert data1['delay'][0] == pytest.approx(-0.998557475)
y, x, yerr, xerr, name = data.plot_scans([1])
assert y[data.clist[0]][0] == pytest.approx(0.02183873769)
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,591
|
dschick/pyEvalData
|
refs/heads/develop
|
/pyEvalData/__init__.py
|
import logging
import sys
from . import config
logging.basicConfig(stream=sys.stdout,
format=config.LOG_FORMAT)
from .evaluation import Evaluation
from . import io
from . import helpers
__all__ = ['Evaluation', 'io', 'helpers']
__version__ = '1.5.1'
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
29,959,592
|
dschick/pyEvalData
|
refs/heads/develop
|
/pyEvalData/io/sardana_nexus.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015-2021 Daniel Schick
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from numpy.core.records import fromarrays
import nexusformat.nexus as nxs
import os.path as path
from .source import Source
from .scan import Scan
__all__ = ['SardanaNeXus']
__docformat__ = 'restructuredtext'
class SardanaNeXus(Source):
"""SardanaNeXus
Source implementation for Sardana NeXus files.
Args:
file_name (str): file name including extension,
can include regex pattern.
file_path (str, optional): file path - defaults to ``./``.
Keyword Args:
start_scan_number (uint): start of scan numbers to parse.
stop_scan_number (uint): stop of scan numbers to parse.
This number is included.
nexus_file_name (str): name for generated nexus file.
nexus_file_name_postfix (str): postfix for nexus file name.
nexus_file_path (str): path for generated nexus file.
read_all_data (bool): read all data on parsing.
If false, data will be read only on demand.
read_and_forget (bool): clear data after read to save memory.
update_before_read (bool): always update from source
before reading scan data.
use_nexus (bool): use nexus file to join/compress raw data.
force_overwrite (bool): forced re-read of raw source and
re-generated of nexus file.
Attributes:
log (logging.logger): logger instance from logging.
name (str): name of the source
scan_dict (dict(scan)): dict of scan objects with
key being the scan number.
start_scan_number (uint): start of scan numbers to parse.
stop_scan_number (uint): stop of scan numbers to parse.
This number is included.
file_name (str): file name including extension,
can include regex pattern.
file_path (str, optional): file path - defaults to ``./``.
nexus_file_name (str): name for generated nexus file.
nexus_file_name_postfix (str): postfix for nexus file name.
nexus_file_path (str): path for generated nexus file.
nexus_file_exists(bool): if nexus file exists.
read_all_data (bool): read all data on parsing.
read_and_forget (bool): clear data after read to save memory.
update_before_read (bool): always update from source
before reading scan data.
use_nexus (bool): use nexus file to join/compress raw data.
force_overwrite (bool): forced re-read of raw source and
re-generated of nexus file.
"""
def __init__(self, file_name, file_path, **kwargs):
super().__init__(file_name, file_path, **kwargs)
def parse_raw(self):
"""parse_raw
Parse the Sardana NeXus file and populate the `scan_dict`.
"""
self.log.info('parse_raw')
nxs_file_path = path.join(self.file_path, self.file_name)
try:
nxs_file = nxs.nxload(nxs_file_path, mode='r')
except nxs.NeXusError:
raise nxs.NeXusError('Sardana NeXus file \'{:s}\' does not exist!'.format(
nxs_file_path))
with nxs_file.nxfile:
for entry in nxs_file:
# check for scan number in given range
entry_number = int(nxs_file[entry].entry_identifier)
if (entry_number >= self.start_scan_number) and \
((entry_number <= self.stop_scan_number) or
(self.stop_scan_number == -1)):
last_scan_number = self.get_last_scan_number()
# check if Scan needs to be re-created
# if scan is not present, its the last one, or force overwrite
if (entry_number not in self.scan_dict.keys()) or \
(entry_number >= last_scan_number) or \
self.force_overwrite:
# create scan object
init_mopo = {}
for field in nxs_file[entry].measurement.pre_scan_snapshot:
init_mopo[field] = \
nxs_file[entry]['measurement/pre_scan_snapshot'][field]
scan = Scan(int(entry_number),
cmd=nxs_file[entry].title,
date=nxs_file[entry].start_time,
time=nxs_file[entry].start_time,
int_time=float(0),
header='',
init_mopo=init_mopo)
self.scan_dict[entry_number] = scan
# check if the data needs to be read as well
if self.read_all_data:
self.read_scan_data(self.scan_dict[entry_number])
def read_raw_scan_data(self, scan):
"""read_raw_scan_data
Reads the data for a given scan object from Sardana NeXus file.
Args:
scan (Scan): scan object.
"""
self.log.info('read_raw_scan_data for scan #{:d}'.format(scan.number))
# try to open the file
nxs_file_path = path.join(self.file_path, self.file_name)
try:
nxs_file = nxs.nxload(nxs_file_path, mode='r')
except nxs.NeXusError:
raise nxs.NeXusError('Sardana NeXus file \'{:s}\' does not exist!'.format(
nxs_file_path))
entry_name = 'entry{:d}'.format(scan.number)
# try to enter entry
try:
entry = nxs_file[entry_name]
except nxs.NeXusError:
self.log.exception('Entry #{:d} not present in NeXus file!'.format(scan.number))
return
# iterate through data fields
data_list = []
dtype_list = []
for field in entry.measurement:
# do not add data which is already in the pre-scan snapshot
# that is tricky if it is in the snapshot and scanned ...
if field != 'pre_scan_snapshot':
data_list.append(entry.measurement[field])
dtype_list.append((field,
entry.measurement[field].dtype,
entry.measurement[field].shape))
if len(data_list) > 0:
scan.data = fromarrays(data_list, dtype=dtype_list)
else:
scan.date = None
|
{"/pyEvalData/__init__.py": ["/pyEvalData/evalData.py", "/pyEvalData/evaluation.py"], "/pyEvalData/io/spec.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/test/conftest.py": ["/pyEvalData/__init__.py"], "/pyEvalData/evaluation.py": ["/pyEvalData/__init__.py", "/pyEvalData/helpers.py"], "/test/test_scan.py": ["/pyEvalData/io/__init__.py"], "/pyEvalData/io/scan.py": ["/pyEvalData/__init__.py"], "/pyEvalData/io/source.py": ["/pyEvalData/__init__.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/palxfel.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"], "/pyEvalData/io/__init__.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py", "/pyEvalData/io/spec.py", "/pyEvalData/io/sardana_nexus.py", "/pyEvalData/io/palxfel.py"], "/pyEvalData/io/sardana_nexus.py": ["/pyEvalData/io/source.py", "/pyEvalData/io/scan.py"]}
|
30,011,190
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0028_auto_20201018_0856.py
|
# Generated by Django 3.0.7 on 2020-10-18 07:56
from django.db import migrations
import django.db.models.expressions
class Migration(migrations.Migration):
dependencies = [
('main', '0027_auto_20201018_0852'),
]
operations = [
migrations.AlterModelOptions(
name='partenaire',
options={'ordering': [django.db.models.expressions.OrderBy(django.db.models.expressions.F('ordre'), nulls_last=True)]},
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,191
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0022_auto_20201017_1403.py
|
# Generated by Django 3.0.7 on 2020-10-17 13:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0021_auto_20200930_1131'),
]
operations = [
migrations.AlterModelOptions(
name='contactform',
options={'verbose_name': 'Formulaire de contact'},
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,192
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0016_auto_20200930_0947.py
|
# Generated by Django 3.0.7 on 2020-09-30 08:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0015_auto_20200927_1554'),
]
operations = [
migrations.RemoveField(
model_name='categories_solution',
name='caractéristique_1',
),
migrations.RemoveField(
model_name='categories_solution',
name='caractéristique_10',
),
migrations.RemoveField(
model_name='categories_solution',
name='caractéristique_11',
),
migrations.RemoveField(
model_name='categories_solution',
name='caractéristique_2',
),
migrations.RemoveField(
model_name='categories_solution',
name='caractéristique_3',
),
migrations.RemoveField(
model_name='categories_solution',
name='caractéristique_4',
),
migrations.RemoveField(
model_name='categories_solution',
name='caractéristique_5',
),
migrations.RemoveField(
model_name='categories_solution',
name='caractéristique_6',
),
migrations.RemoveField(
model_name='categories_solution',
name='caractéristique_7',
),
migrations.RemoveField(
model_name='categories_solution',
name='caractéristique_8',
),
migrations.RemoveField(
model_name='categories_solution',
name='caractéristique_9',
),
migrations.AddField(
model_name='categories_solution',
name='image2',
field=models.ImageField(blank=True, upload_to='slides/'),
),
migrations.AddField(
model_name='categories_solution',
name='text1',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='categories_solution',
name='text2',
field=models.TextField(blank=True),
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,193
|
miyou995/msenergy
|
refs/heads/master
|
/main/admin.py
|
from django.contrib import admin
from .models import Slide, Categorie_produit, Categories_Solution, SliderAPropos, Catalogue, ContactForm, Partenaire, Produit2, ProduitDetail, Post
# Register your models here.
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id','name')
list_display_links = ('id','name',)
list_per_page = 40
list_filter = ('name',)
search_fields = ('id', 'name',)
class SolutionCategoryAdmin(CategoryAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('id','ordre','name', 'active')
list_display_links = ('id', 'name')
list_editable = ('ordre','active',)
class SliderAProposAdmin(CategoryAdmin):
pass
class CatalogueAdmin(CategoryAdmin):
list_display = ('id','name', 'categorie')
list_filter = ('name', 'categorie',)
class ContactFormAdmin(CategoryAdmin):
list_display = ('id','name', 'date_added', 'departement')
readonly_fields = ('date_added',)
class PartenairesAdmin(CategoryAdmin):
list_display = ('id', 'ordre', 'name', 'active')
list_editable = ('ordre', 'active')
class Produit2Admin(CategoryAdmin):
list_display = ('id','ordre','name')
list_display_links = ('id','name')
list_editable = ('ordre',)
prepopulated_fields = {"slug": ("name",)}
class ProduitDetailAdmin(CategoryAdmin):
list_display = ('id','name', 'gamme')
list_filter = ('gamme',)
class PostAdmin(admin.ModelAdmin):
list_display = ('id','titre')
list_display_links = ('id','titre',)
prepopulated_fields = {"slug": ("titre",)}
admin.site.register(Slide, CategoryAdmin)
admin.site.register(Categorie_produit, CategoryAdmin)
admin.site.register(Categories_Solution, SolutionCategoryAdmin)
admin.site.register(SliderAPropos, SliderAProposAdmin)
admin.site.register(Catalogue, CatalogueAdmin)
admin.site.register(ContactForm, ContactFormAdmin)
admin.site.register(Partenaire, PartenairesAdmin)
admin.site.register(Produit2, Produit2Admin)
admin.site.register(ProduitDetail, ProduitDetailAdmin)
admin.site.register(Post, PostAdmin)
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,194
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0025_auto_20201018_0839.py
|
# Generated by Django 3.0.7 on 2020-10-18 07:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0024_auto_20201017_1504'),
]
operations = [
migrations.AlterModelOptions(
name='produit2',
options={'ordering': ['ordre'], 'verbose_name': 'Produits', 'verbose_name_plural': 'Produits'},
),
migrations.AddField(
model_name='categories_solution',
name='ordre',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='produit2',
name='ordre',
field=models.IntegerField(),
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,195
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0021_auto_20200930_1131.py
|
# Generated by Django 3.0.7 on 2020-09-30 10:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0020_remove_post_published_date'),
]
operations = [
migrations.AddField(
model_name='post',
name='intro',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='post',
name='slug',
field=models.SlugField(max_length=100),
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,196
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0001_initial.py
|
# Generated by Django 3.0.7 on 2020-07-13 08:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Categorie_produit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField(blank=True)),
('image', models.FileField(blank=True, upload_to='slides/')),
],
options={
'verbose_name': 'Catégorie produit',
'verbose_name_plural': 'Catégories produits',
},
),
migrations.CreateModel(
name='Categories_Solution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField(blank=True, max_length=100, null=True)),
('active', models.BooleanField(default=True)),
('description', models.TextField(blank=True)),
('image', models.FileField(blank=True, upload_to='slides/')),
('caractéristique_1', models.CharField(blank=True, max_length=100)),
('détail_de_caractéristique_1', models.CharField(blank=True, max_length=100)),
('caractéristique_2', models.CharField(blank=True, max_length=100)),
('détail_de_caractéristique_2', models.CharField(blank=True, max_length=200)),
('caractéristique_3', models.CharField(blank=True, max_length=100)),
('détail_de_caractéristique_3', models.CharField(blank=True, max_length=200)),
('caractéristique_4', models.CharField(blank=True, max_length=100)),
('détail_de_caractéristique_4', models.CharField(blank=True, max_length=200)),
('caractéristique_5', models.CharField(blank=True, max_length=100)),
('détail_de_caractéristique_5', models.CharField(blank=True, max_length=200)),
('caractéristique_6', models.CharField(blank=True, max_length=100)),
('détail_de_caractéristique_6', models.CharField(blank=True, max_length=200)),
('caractéristique_7', models.CharField(blank=True, max_length=100)),
('détail_de_caractéristique_7', models.CharField(blank=True, max_length=200)),
('caractéristique_8', models.CharField(blank=True, max_length=100)),
('détail_de_caractéristique_8', models.CharField(blank=True, max_length=200)),
('caractéristique_9', models.CharField(blank=True, max_length=100)),
('détail_de_caractéristique_9', models.CharField(blank=True, max_length=200)),
('caractéristique_10', models.CharField(blank=True, max_length=100)),
('détail_de_caractéristique_10', models.CharField(blank=True, max_length=200)),
('caractéristique_11', models.CharField(blank=True, max_length=100)),
('détail_de_caractéristique_11', models.CharField(blank=True, max_length=200)),
],
options={
'verbose_name': 'solution',
'verbose_name_plural': 'solutions',
},
),
migrations.CreateModel(
name='Slide',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('title', models.CharField(blank=True, max_length=200)),
('image', models.ImageField(upload_to='slides/')),
],
options={
'verbose_name': "Photo page d'accueil",
'verbose_name_plural': "Photos page d'accueil",
},
),
migrations.CreateModel(
name='Slider_a_propos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='slides/')),
],
options={
'verbose_name': 'photo page a propos',
'verbose_name_plural': 'photos page a propos',
},
),
migrations.CreateModel(
name='Catalogue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='slides/')),
('description', models.TextField(blank=True)),
('categorie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Categorie_produit', verbose_name='catégorie gamme')),
],
options={
'verbose_name': 'gamme',
'verbose_name_plural': 'catalogue',
},
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,197
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0014_auto_20200924_1537.py
|
# Generated by Django 3.0.7 on 2020-09-24 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0013_produit_description'),
]
operations = [
migrations.AlterField(
model_name='produit',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='produit',
name='sous_titre',
field=models.CharField(blank=True, max_length=100, verbose_name='Sous titre'),
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,198
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0019_auto_20200930_1053.py
|
# Generated by Django 3.0.7 on 2020-09-30 09:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0018_auto_20200930_1047'),
]
operations = [
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='catalogue',
name='categorie',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Categorie_produit', verbose_name='catégorie'),
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,199
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0023_produit2.py
|
# Generated by Django 3.0.7 on 2020-10-17 13:45
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('main', '0022_auto_20201017_1403'),
]
operations = [
migrations.CreateModel(
name='Produit2',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('slug', models.SlugField(max_length=70)),
('titre', models.CharField(blank=True, max_length=50)),
('sous_titre', models.CharField(blank=True, max_length=100, verbose_name='Sous titre')),
('description', models.TextField(blank=True)),
('image', models.FileField(blank=True, upload_to='slides/')),
('gamme', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Categorie_produit', verbose_name='catégorie gamme')),
],
options={
'verbose_name': 'Produits',
'verbose_name_plural': 'Produits',
},
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,200
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0027_auto_20201018_0852.py
|
# Generated by Django 3.0.7 on 2020-10-18 07:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0026_auto_20201018_0849'),
]
operations = [
migrations.AlterModelOptions(
name='partenaire',
options={'ordering': ['ordre', 'name']},
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,201
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0006_contactform_date_added.py
|
# Generated by Django 3.0.7 on 2020-07-27 07:32
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('main', '0005_auto_20200727_0818'),
]
operations = [
migrations.AddField(
model_name='contactform',
name='date_added',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,202
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0026_auto_20201018_0849.py
|
# Generated by Django 3.0.7 on 2020-10-18 07:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0025_auto_20201018_0839'),
]
operations = [
migrations.AlterModelOptions(
name='categories_solution',
options={'ordering': ['ordre'], 'verbose_name': 'solution', 'verbose_name_plural': 'solutions'},
),
migrations.AlterModelOptions(
name='partenaire',
options={'ordering': ['ordre']},
),
migrations.AddField(
model_name='partenaire',
name='ordre',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='produit2',
name='ordre',
field=models.IntegerField(blank=True, null=True),
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,203
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0024_auto_20201017_1504.py
|
# Generated by Django 3.0.7 on 2020-10-17 14:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0023_produit2'),
]
operations = [
migrations.AlterModelOptions(
name='produit',
options={'verbose_name': 'ProduitsB', 'verbose_name_plural': 'ProduitsB'},
),
migrations.RemoveField(
model_name='produit2',
name='titre',
),
migrations.AddField(
model_name='produit2',
name='ordre',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='produit2',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,204
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0017_auto_20200930_1007.py
|
# Generated by Django 3.0.7 on 2020-09-30 09:07
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0016_auto_20200930_0947'),
]
operations = [
migrations.AlterField(
model_name='categories_solution',
name='text1',
field=models.TextField(blank=True, verbose_name='Grand titre'),
),
migrations.AlterField(
model_name='categories_solution',
name='text2',
field=ckeditor.fields.RichTextField(blank=True, null=True, verbose_name='Text en plus'),
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,205
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0003_auto_20200713_1141.py
|
# Generated by Django 3.0.7 on 2020-07-13 10:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20200713_0941'),
]
operations = [
migrations.RemoveField(
model_name='categories_solution',
name='détail_de_caractéristique_1',
),
migrations.RemoveField(
model_name='categories_solution',
name='détail_de_caractéristique_10',
),
migrations.RemoveField(
model_name='categories_solution',
name='détail_de_caractéristique_11',
),
migrations.RemoveField(
model_name='categories_solution',
name='détail_de_caractéristique_2',
),
migrations.RemoveField(
model_name='categories_solution',
name='détail_de_caractéristique_3',
),
migrations.RemoveField(
model_name='categories_solution',
name='détail_de_caractéristique_4',
),
migrations.RemoveField(
model_name='categories_solution',
name='détail_de_caractéristique_5',
),
migrations.RemoveField(
model_name='categories_solution',
name='détail_de_caractéristique_6',
),
migrations.RemoveField(
model_name='categories_solution',
name='détail_de_caractéristique_7',
),
migrations.RemoveField(
model_name='categories_solution',
name='détail_de_caractéristique_8',
),
migrations.RemoveField(
model_name='categories_solution',
name='détail_de_caractéristique_9',
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,206
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0009_produit.py
|
# Generated by Django 3.0.7 on 2020-09-24 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_auto_20200730_1053'),
]
operations = [
migrations.CreateModel(
name='Produit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField(blank=True)),
('image', models.FileField(blank=True, upload_to='slides/')),
],
options={
'verbose_name': 'Produits',
'verbose_name_plural': 'Produits',
},
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,207
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0018_auto_20200930_1047.py
|
# Generated by Django 3.0.7 on 2020-09-30 09:47
import ckeditor.fields
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('main', '0017_auto_20200930_1007'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titre', models.CharField(max_length=200)),
('image', models.ImageField(blank=True, upload_to='slides/', verbose_name='Image')),
('text', ckeditor.fields.RichTextField(blank=True, null=True, verbose_name='Article')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
],
),
migrations.AlterField(
model_name='categories_solution',
name='image',
field=models.ImageField(blank=True, upload_to='slides/', verbose_name='Image principale'),
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,208
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0005_auto_20200727_0818.py
|
# Generated by Django 3.0.7 on 2020-07-27 07:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20200726_1517'),
]
operations = [
migrations.RenameField(
model_name='contactform',
old_name='département',
new_name='departement',
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,209
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0002_auto_20200713_0941.py
|
# Generated by Django 3.0.7 on 2020-07-13 08:41
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='categories_solution',
name='slug',
field=models.SlugField(blank=True, default=django.utils.timezone.now, max_length=100),
preserve_default=False,
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,210
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0004_auto_20200726_1517.py
|
# Generated by Django 3.0.7 on 2020-07-26 14:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20200713_1141'),
]
operations = [
migrations.CreateModel(
name='ContactForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('département', models.CharField(choices=[('C', 'Commercial'), ('D', 'Direction'), ('M', 'Marketing'), ('SC', 'Service client')], default='D', max_length=2)),
('email', models.EmailField(max_length=254)),
('phone', models.CharField(max_length=20)),
('subject', models.CharField(max_length=50)),
('fichier', models.FileField(blank=True, max_length=20, upload_to='fichiers/% d/% m/% Y/')),
('message', models.TextField()),
],
),
migrations.RenameModel(
old_name='Slider_a_propos',
new_name='SliderAPropos',
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,211
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0013_produit_description.py
|
# Generated by Django 3.0.7 on 2020-09-24 14:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0012_auto_20200924_1532'),
]
operations = [
migrations.AddField(
model_name='produit',
name='description',
field=models.TextField(blank=True, verbose_name='Sous titre'),
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,212
|
miyou995/msenergy
|
refs/heads/master
|
/main/migrations/0012_auto_20200924_1532.py
|
# Generated by Django 3.0.7 on 2020-09-24 14:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0011_produit_slug'),
]
operations = [
migrations.RemoveField(
model_name='produit',
name='description',
),
migrations.AddField(
model_name='produit',
name='sous_titre',
field=models.TextField(blank=True, verbose_name='Sous titre'),
),
migrations.AddField(
model_name='produit',
name='titre',
field=models.CharField(blank=True, max_length=50),
),
migrations.CreateModel(
name='ProduitDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('image', models.FileField(blank=True, upload_to='slides/')),
('gamme', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Produit', verbose_name='catégorie gamme')),
],
),
]
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,011,213
|
miyou995/msenergy
|
refs/heads/master
|
/main/models.py
|
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.db.models import F
import uuid
from ckeditor.fields import RichTextField
# Create your models here.
class Slide(models.Model):
name = models.CharField(max_length=50)
title = models.CharField(max_length= 200, blank = True)
image = models.ImageField(upload_to= 'slides/')
class Meta:
verbose_name = "Photo page d'accueil"
verbose_name_plural = "Photos page d'accueil"
class Categorie_produit(models.Model):
name = models.CharField(max_length=50)
description = models.TextField(blank= True)
image = models.FileField(upload_to='slides/', blank= True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("catalogue", kwargs={"pk": self.pk})
class Meta:
verbose_name = 'Catégorie produit'
verbose_name_plural = 'Catégories produits'
class Produit(models.Model):
name = models.CharField( max_length=50)
slug = models.SlugField( max_length=70)
titre = models.CharField( max_length=50, blank= True)
gamme = models.ForeignKey("Categorie_produit", verbose_name=("catégorie gamme"), on_delete=models.CASCADE)
sous_titre = models.CharField(max_length=100, verbose_name=("Sous titre"), blank= True)
description = models.TextField(blank= True)
image = models.FileField(upload_to='slides/', blank= True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'ProduitsB'
verbose_name_plural = 'ProduitsB'
class Produit2(models.Model):
ordre = models.IntegerField(blank=True, null=True)
name = models.CharField( max_length=50)
slug = models.SlugField( max_length=70)
gamme = models.ForeignKey("Categorie_produit", verbose_name=("catégorie gamme"), on_delete=models.CASCADE)
sous_titre = models.CharField(max_length=100, verbose_name=("Sous titre"), blank= True)
description = models.TextField(blank= True)
image = models.FileField(upload_to='slides/', blank= True)
def __str__(self):
return self.name
class Meta:
ordering = ['ordre']
verbose_name = 'Produits'
verbose_name_plural = 'Produits'
class ProduitDetail(models.Model):
name = models.CharField(max_length=50)
gamme = models.ForeignKey("Produit2", verbose_name=("catégorie gamme"), on_delete=models.CASCADE)
image = models.FileField(upload_to='slides/', blank= True)
def __str__(self):
return self.name
def __unicode__(self):
return
class Categories_Solution(models.Model):
ordre = models.IntegerField(blank=True, null=True )
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=100, blank=True)
text1 = models.TextField(verbose_name='Grand titre' ,blank= True)
image = models.ImageField(verbose_name='Image principale' ,upload_to='slides/', blank= True)
image2 = models.ImageField(upload_to='slides/', blank= True)
description = models.TextField(blank= True)
text2 = RichTextField(verbose_name='Text en plus', blank= True, null=True)
active = models.BooleanField(default=True)
class Meta:
ordering = ['ordre']
verbose_name = 'solution'
verbose_name_plural = 'solutions'
class SliderAPropos(models.Model):
name = models.CharField(max_length=50)
image = models.ImageField(upload_to='slides/')
class Meta:
verbose_name = 'photo page a propos'
verbose_name_plural = 'photos page a propos'
class Catalogue(models.Model):
categorie = models.ForeignKey("Categorie_produit", verbose_name=("catégorie"), on_delete=models.CASCADE)
name = models.CharField(max_length=50)
image = models.ImageField(upload_to='slides/')
description = models.TextField(blank= True)
# # def get_absolute_url(self):
# # # return reverse("gamme", kwargs={'id': self.id, 'categorie_id': self.categorie_id})
# # return "/catalogue/%i/" %self.id
def __str__(self):
return self.name
class Meta:
verbose_name = 'gamme'
verbose_name_plural = 'catalogue'
DEPARTEMENT_CHOICES=[
('C', 'Commercial'),
('D', 'Direction'),
('M', 'Marketing'),
('SC', 'Service client'),
]
class ContactForm(models.Model):
name = models.CharField(max_length=50)
departement = models.CharField(max_length=2, choices=DEPARTEMENT_CHOICES, default='D',)
email = models.EmailField()
phone = models.CharField(max_length=20)
subject = models.CharField(max_length=50)
fichier = models.FileField(upload_to='fichiers/% d/% m/% Y/', max_length=20, blank=True)
message = models.TextField(blank=True)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Formulaire de contact'
class Partenaire(models.Model):
ordre = models.IntegerField(blank=True, null=True)
name = models.CharField( max_length=50)
logo = models.ImageField(upload_to='part/')
url_marque = models.URLField(max_length=200, blank=True)
active = models.BooleanField(default=True)
def __str__(self):
return self.name
class Meta:
ordering = [F('ordre').asc(nulls_last=True)]
class Post(models.Model):
titre = models.CharField(max_length=200)
slug = models.SlugField(max_length=100)
intro = models.CharField(max_length=200, blank=True)
image = models.ImageField(verbose_name='Image' ,upload_to='slides/', blank= True)
text = RichTextField(verbose_name='Article', blank= True, null=True)
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.titre
|
{"/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"]}
|
30,129,750
|
aidotse/Team-Yenomze
|
refs/heads/main
|
/src/dataloader/TestDataset.py
|
import cv2
import math
import numpy as np
from monai.utils import NumpyPadMode
from monai.transforms import LoadImage
from torch.utils.data import IterableDataset
from src.util.DataUtils import get_mag_level, preprocess
from typing import Any, Callable, Hashable, Optional, Sequence, Tuple, Union
class OverlappyGridyDataset(IterableDataset):
def __init__(self,
data: Sequence,
patch_size: int,
overlap_ratio: float,
data_reader: Callable):
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.patch_size = patch_size
self.overlap_ratio = overlap_ratio
self.data = data
# Get mag level of file
self.mag_level = get_mag_level(self.data[0])
if self.mag_level == "20x":
self.sample_patch_size = self.patch_size // 2
self.resize=True
else:
self.sample_patch_size = self.patch_size
self.resize=False
self.overlap_pix = int(overlap_ratio*self.sample_patch_size)
self.nonoverlap_pix = int((1-overlap_ratio)*self.sample_patch_size)
self.start_pos = ()
self.mode = NumpyPadMode.WRAP
self.image_reader = LoadImage(data_reader, image_only=True)
self.img = np.expand_dims(np.stack([self.image_reader(x) for x in self.data]), axis=0)
#self.img = self.img / 30000.0
#self.img = (np.log(1 + self.img) - 5.5)/5.5
# Preprocessing - 1,10,256,256
# if not is_test:
# self.img[0][7,:,:] = preprocess(self.img[0,7,:,:], self.mag_level, "C01")
# self.img[0][8,:,:] = preprocess(self.img[0,8,:,:], self.mag_level, "C02")
# self.img[0][9,:,:] = preprocess(self.img[0,9,:,:], self.mag_level, "C03")
self.img[0][:7,:,:] = preprocess(self.img[0,:7,:,:], self.mag_level, "C04")
self.img_h, self.img_w = self.img.shape[-2:]
self.num_grids_h = math.ceil(self.img_h/self.nonoverlap_pix)
self.num_grids_w = math.ceil(self.img_w/self.nonoverlap_pix)
def __len__(self) -> int:
return self.get_num_patches()
def get_num_patches(self):
return self.num_grids_h*self.num_grids_w
def merge_patches(self, patches):
num_pred_matrix = np.zeros(self.img.shape[-2:])
img_merged = np.zeros(self.img.shape[-2:])
i = 0
for h in range(self.num_grids_h):
for w in range(self.num_grids_w):
if self.resize:
patch = cv2.resize(patches[i].numpy(), (self.sample_patch_size, self.sample_patch_size), interpolation = cv2.INTER_CUBIC)
else:
patch = patches[i].numpy()
slice_h_start = 0
slice_w_start = 0
if h == (self.num_grids_h-1) and w == (self.num_grids_w-1):
slice_h_start = self.img_h
slice_w_start = self.img_w
elif h == (self.num_grids_h-1):
slice_h_end = self.img_h
slice_w_end = min(self.nonoverlap_pix*w + self.sample_patch_size, self.img_w)
elif w == (self.num_grids_w-1):
slice_h_end = min(self.nonoverlap_pix*h + self.sample_patch_size, self.img_h)
slice_w_end = self.img_w
else:
slice_h_end = min(self.nonoverlap_pix*h + self.sample_patch_size, self.img_h)
slice_w_end = min(self.nonoverlap_pix*w + self.sample_patch_size, self.img_w)
slice_h_start = slice_h_end - self.sample_patch_size
slice_w_start = slice_w_end - self.sample_patch_size
img_merged[slice_h_start: slice_h_end, slice_w_start: slice_w_end] = img_merged[slice_h_start: slice_h_end, slice_w_start: slice_w_end] + patch
num_pred_matrix[slice_h_start: slice_h_end, slice_w_start: slice_w_end] = num_pred_matrix[slice_h_start: slice_h_end, slice_w_start: slice_w_end] + 1.0
i += 1
img_merged = img_merged / num_pred_matrix
return img_merged
def __iter__(self):
for h in range(self.num_grids_h):
for w in range(self.num_grids_w):
slice_h_start = 0
slice_w_start = 0
if h == (self.num_grids_h-1) and w == (self.num_grids_w-1):
slice_h_start = self.img_h
slice_w_start = self.img_w
elif h == (self.num_grids_h-1):
slice_h_end = self.img_h
slice_w_end = min(self.nonoverlap_pix*w + self.sample_patch_size, self.img_w)
elif w == (self.num_grids_w-1):
slice_h_end = min(self.nonoverlap_pix*h + self.sample_patch_size, self.img_h)
slice_w_end = self.img_w
else:
slice_h_end = min(self.nonoverlap_pix*h + self.sample_patch_size, self.img_h)
slice_w_end = min(self.nonoverlap_pix*w + self.sample_patch_size, self.img_w)
slice_h_start = slice_h_end - self.sample_patch_size
slice_w_start = slice_w_end - self.sample_patch_size
img_patch = self.img[:, :, slice_h_start: slice_h_end, slice_w_start: slice_w_end]
if self.resize:
img_resized = []
for i, img_patch_slice in enumerate(img_patch[0]):
img_resized.append(cv2.resize(img_patch_slice, (self.patch_size, self.patch_size), interpolation = cv2.INTER_CUBIC))
img_patch = np.expand_dims(np.stack(img_resized, axis=0), axis=0)
yield img_patch
|
{"/train_model_final.py": ["/data_utils.py"], "/_train.py": ["/FlowArrayDataset.py"], "/src/dataloader/TestDataset.py": ["/src/util/DataUtils.py"], "/src/dataloader/TrainDataset.py": ["/src/util/DataUtils.py"], "/src/model_handler/TestHandler.py": ["/src/util/DataUtils.py", "/src/model/Generator.py"], "/train.py": ["/src/model_handler/TrainHandler.py"], "/test.py": ["/src/model_handler/TestHandler.py", "/src/dataloader/TestDataset.py", "/src/model/Generator.py"], "/src/model_handler/TrainHandler.py": ["/src/dataloader/TrainDataset.py", "/src/dataloader/ValidationDataset.py", "/src/loss/VGGLoss.py", "/src/model/Discriminator.py", "/src/model/Generator.py", "/src/util/DataUtils.py"]}
|
30,129,751
|
aidotse/Team-Yenomze
|
refs/heads/main
|
/src/model/Generator.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# modified from https://github.com/milesial/Pytorch-UNet
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
# using LeakyRELU with negative slope 0.1 as in Rivenson et al.
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.Dropout2d(p=0.2),
nn.BatchNorm2d(mid_channels),
nn.LeakyReLU(0.1),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.Dropout2d(p=0.2),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(0.1)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
# modified max pooling to average pooling as in Rivenson et al.
def __init__(self, in_channels, out_channels):
super().__init__()
self.pool_conv = nn.Sequential(
# nn.MaxPool2d(2),
nn.AvgPool2d(kernel_size=3, stride=2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.pool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.conv(x)
#x = self.tanh(x)
#x = (1.0 + self.tanh(x))/2.0
x = self.sigmoid(x)
return x
# Generator network
class GeneratorUnet(nn.Module):
def __init__(self, split = False):
super(GeneratorUnet, self).__init__()
self.split = split
factor = 2
# self.inc = DoubleConv(1, 32) # 1 or 3
# Real parameters
# self.inc1 = DoubleConv(1, 32)
# self.inc2 = DoubleConv(1, 32)
# self.inc3 = DoubleConv(1, 32)
# self.inc4 = DoubleConv(1, 32)
# self.inc5 = DoubleConv(1, 32)
# self.inc6 = DoubleConv(1, 32)
# self.inc7 = DoubleConv(1, 32)
# self.down1 = Down(32*7, 64*7)
# self.down2 = Down(64*7, 128*7)
# self.down3 = Down(128*7, 256*7)
# self.down4 = Down(256*7, 512*7 // factor)
# self.up1 = Up(512*7, 256*7 // factor)
# self.up2 = Up(256*7, 128*7 // factor)
# self.up3 = Up(128*7, 64*7 // factor)
# self.up4 = Up(64*7, 32*7)
# self.outc1 = OutConv(32*7, 1)
# self.outc2 = OutConv(32*7, 1)
# self.outc3 = OutConv(32*7, 1)
# Tiny parameters
self.inc1 = DoubleConv(1, 16)
self.inc2 = DoubleConv(1, 16)
self.inc3 = DoubleConv(1, 16)
self.inc4 = DoubleConv(1, 16)
self.inc5 = DoubleConv(1, 16)
self.inc6 = DoubleConv(1, 16)
self.inc7 = DoubleConv(1, 16)
self.down1 = Down(16*7, 32*7)
self.down2 = Down(32*7, 64*7)
self.down3 = Down(64*7, 128*7)
self.down4 = Down(128*7, 256*7 // factor)
self.up1 = Up(256*7, 128*7 // factor)
self.up2 = Up(128*7, 64*7 // factor)
self.up3 = Up(64*7, 32*7 // factor)
if self.split:
#self.up3_1 = Up(64*7, 32*7 // factor)
#self.up3_2 = Up(64*7, 32*7 // factor)
#self.up3_3 = Up(64*7, 32*7 // factor)
self.up4_1 = Up(32*7, 16*7)
self.up4_2 = Up(32*7, 16*7)
self.up4_3 = Up(32*7, 16*7)
else:
self.up4 = Up(32*7, 16*7)
self.outc1 = OutConv(16*7, 1)
self.outc2 = OutConv(16*7, 1)
self.outc3 = OutConv(16*7, 1)
def forward(self, input1, input2, input3, input4, input5, input6, input7):
# Idea of computing multi-input-multi-output from Ferdian et al (4DflowNET)
# convolve each input respectively (6 x1s)
# ins = [input1, input2, input3, input4, input5, input6, input7]
# first_activations = [inconv(inp) for inp, inconv in zip(ins, self.inc_list)]
# print('inc :' + str(inc1.size()))
# now concat 6 inputs
inc1 = self.inc1(input1)
inc2 = self.inc2(input2)
inc3 = self.inc3(input3)
inc4 = self.inc4(input4)
inc5 = self.inc5(input5)
inc6 = self.inc6(input6)
inc7 = self.inc7(input7)
x_concat = torch.cat((inc1, inc2, inc3, inc4, inc5, inc6, inc7), dim=1)
# print('concat :' + str(x_concat.size()))
x2 = self.down1(x_concat)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
if self.split:
#ssemi_out1 = self.up3_1(x, x2)
#ssemi_out2 = self.up3_2(x, x2)
#ssemi_out3 = self.up3_3(x, x2)
semi_out1 = self.up4_1(x, x_concat)
semi_out2 = self.up4_2(x, x_concat)
semi_out3 = self.up4_3(x, x_concat)
out1 = self.outc1(semi_out1)
out2 = self.outc2(semi_out2)
out3 = self.outc3(semi_out3)
else:
x = self.up4(x, x_concat)
# now multi ouput for 3 channels respectively
out1 = self.outc1(x)
out2 = self.outc2(x)
out3 = self.outc3(x)
return out1, out2, out3
|
{"/train_model_final.py": ["/data_utils.py"], "/_train.py": ["/FlowArrayDataset.py"], "/src/dataloader/TestDataset.py": ["/src/util/DataUtils.py"], "/src/dataloader/TrainDataset.py": ["/src/util/DataUtils.py"], "/src/model_handler/TestHandler.py": ["/src/util/DataUtils.py", "/src/model/Generator.py"], "/train.py": ["/src/model_handler/TrainHandler.py"], "/test.py": ["/src/model_handler/TestHandler.py", "/src/dataloader/TestDataset.py", "/src/model/Generator.py"], "/src/model_handler/TrainHandler.py": ["/src/dataloader/TrainDataset.py", "/src/dataloader/ValidationDataset.py", "/src/loss/VGGLoss.py", "/src/model/Discriminator.py", "/src/model/Generator.py", "/src/util/DataUtils.py"]}
|
30,129,752
|
aidotse/Team-Yenomze
|
refs/heads/main
|
/src/dataloader/TrainDataset.py
|
import numpy as np
from torch.utils.data import Dataset as _TorchDataset
from monai.transforms import apply_transform, LoadImage, RandSpatialCropSamples
from typing import Any, Callable, Hashable, Optional, Sequence, Tuple, Union
from src.util.DataUtils import get_mag_level, preprocess
class OurDataset(_TorchDataset):
def __init__(self,
data: Sequence,
samples_per_image: int,
roi_size: int,
data_reader: Callable,
transform: Optional[Callable] = None) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.samples_per_image = samples_per_image
self.roi_size = (10, roi_size, roi_size)
self.data = data
self.image_reader = LoadImage(data_reader, image_only=True)
self.transform = transform
self.sampler = RandSpatialCropSamples(roi_size=self.roi_size,
num_samples=self.samples_per_image,
random_center=True,
random_size=False)
def __len__(self) -> int:
return len(self.data) * self.samples_per_image
def __getitem__(self, index: int):
image_id = int(index / self.samples_per_image)
image_paths = self.data[image_id]
images = np.expand_dims(np.stack([self.image_reader(x) for x in image_paths]), axis=0)
# Get mag level of file
mag_level = get_mag_level(image_paths[0])
patches = self.sampler(images)
if len(patches) != self.samples_per_image:
raise RuntimeWarning(
f"`patch_func` must return a sequence of length: samples_per_image={self.samples_per_image}.")
patch_id = (index - image_id * self.samples_per_image) * (-1 if index < 0 else 1)
patch = patches[patch_id]
if self.transform is not None:
# Preprocessing - 1,10,256,256
patch[0,7,:,:] = preprocess(patch[0,7,:,:], mag_level, "C01")
patch[0,8,:,:] = preprocess(patch[0,8,:,:], mag_level, "C02")
patch[0,9,:,:] = preprocess(patch[0,9,:,:], mag_level, "C03")
patch[0,:7,:,:] = preprocess(patch[0,:7,:,:], mag_level, "C04")
patch = apply_transform(self.transform, patch, map_items=False)
return patch
|
{"/train_model_final.py": ["/data_utils.py"], "/_train.py": ["/FlowArrayDataset.py"], "/src/dataloader/TestDataset.py": ["/src/util/DataUtils.py"], "/src/dataloader/TrainDataset.py": ["/src/util/DataUtils.py"], "/src/model_handler/TestHandler.py": ["/src/util/DataUtils.py", "/src/model/Generator.py"], "/train.py": ["/src/model_handler/TrainHandler.py"], "/test.py": ["/src/model_handler/TestHandler.py", "/src/dataloader/TestDataset.py", "/src/model/Generator.py"], "/src/model_handler/TrainHandler.py": ["/src/dataloader/TrainDataset.py", "/src/dataloader/ValidationDataset.py", "/src/loss/VGGLoss.py", "/src/model/Discriminator.py", "/src/model/Generator.py", "/src/util/DataUtils.py"]}
|
30,129,753
|
aidotse/Team-Yenomze
|
refs/heads/main
|
/src/dataloader/ValidationDataset.py
|
import torch
import numpy as np
from torch.utils.data import IterableDataset
from monai.utils import NumpyPadMode
from monai.transforms import LoadImage
from monai.data.utils import iter_patch
from typing import Any, Callable, Hashable, Optional, Sequence, Tuple, Union
class OurGridyDataset(IterableDataset):
def __init__(self,
data: Sequence,
patch_size: int,
data_reader: Callable):
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.patch_size = (None,) + (10, patch_size, patch_size)
self.start_pos = ()
self.mode = NumpyPadMode.WRAP
self.data = data
self.image_reader = LoadImage(data_reader, image_only=True)
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
iter_start = 0
iter_end = len(self.data)
if worker_info is not None:
# split workload
per_worker = int(math.ceil((iter_end - iter_start) / float(worker_info.num_workers)))
worker_id = worker_info.id
iter_start = iter_start + worker_id * per_worker
iter_end = min(iter_start + per_worker, iter_end)
for index in range(iter_start, iter_end):
img_paths = self.data[index]
arrays = np.expand_dims(np.stack([self.image_reader(x) for x in img_paths]), axis=(0,1))
#arrays = arrays / 30000.0
#arrays = (np.log(1 + arrays) - 5.5)/5.5
# Get mag level of file
mag_level = get_mag_level(img_paths[0])
# Preprocessing - 1,1,10,256,256
arrays[0,0,7,:,:] = preprocess(arrays[0,0,7,:,:], mag_level, "C01")
arrays[0,0,8,:,:] = preprocess(arrays[0,0,8,:,:], mag_level, "C02")
arrays[0,0,9,:,:] = preprocess(arrays[0,0,9,:,:], mag_level, "C03")
arrays[0,0,:7,:,:] = preprocess(arrays[0,0,:7,:,:], mag_level, "C04")
iters = [iter_patch(a, self.patch_size, self.start_pos, False, self.mode) for a in arrays]
yield from zip(*iters)
|
{"/train_model_final.py": ["/data_utils.py"], "/_train.py": ["/FlowArrayDataset.py"], "/src/dataloader/TestDataset.py": ["/src/util/DataUtils.py"], "/src/dataloader/TrainDataset.py": ["/src/util/DataUtils.py"], "/src/model_handler/TestHandler.py": ["/src/util/DataUtils.py", "/src/model/Generator.py"], "/train.py": ["/src/model_handler/TrainHandler.py"], "/test.py": ["/src/model_handler/TestHandler.py", "/src/dataloader/TestDataset.py", "/src/model/Generator.py"], "/src/model_handler/TrainHandler.py": ["/src/dataloader/TrainDataset.py", "/src/dataloader/ValidationDataset.py", "/src/loss/VGGLoss.py", "/src/model/Discriminator.py", "/src/model/Generator.py", "/src/util/DataUtils.py"]}
|
30,129,754
|
aidotse/Team-Yenomze
|
refs/heads/main
|
/save_preds_as_png.py
|
import os
import cv2
import glob
import argparse
import numpy as np
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(description="TIF (16bit) to PNG (8bit) Tool for Predictions")
parser.add_argument("-n", "--number", type=str,
help="number of png images to save.")
parser.add_argument("-i", "--input_dir", type=str,
help="input directory that has predictions for specific magnification.", required=True)
parser.add_argument("-g", "--gt_dir", type=str,
help="ground truth directory for specific magnification.", required=True)
parser.add_argument("-o", "--output_dir", type=str,
help="output directory to save predictions as 16bit TIF files.", required=True)
args = parser.parse_args()
print(args)
return args
def get_gt_path(img_path, gt_dir):
img_name = img_path.split("/")[-1]
return os.path.join(gt_dir, img_name)
def main():
# get arguments
args = parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
gt_dir = args.gt_dir
N = args.number
# get image paths
preds = [
sorted(glob.glob(os.path.join(input_dir, f"*A0{i}Z01C0{i}*.tif"), recursive=True))
for i in range(1,4)
]
preds = list(zip(*preds))
# sample from the list
if N is not None:
sample_indexes = np.random.choice(np.arange(len(preds)), size=int(N), replace=False)
preds = [preds[i] for i in sample_indexes]
# get the ground truths
gts = [tuple([get_gt_path(img_path, gt_dir) for img_path in channels]) for channels in preds]
# construct directories
pred_dir = os.path.join(output_dir, "pred")
gt_dir = os.path.join(output_dir, "gt")
for dir_path in [pred_dir, gt_dir]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# save images
pred_dir = os.path.join(output_dir, "pred")
gt_dir = os.path.join(output_dir, "gt")
for dir_path in [pred_dir, gt_dir]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for pred_paths, gt_paths in tqdm(zip(preds, gts), total=int(N)):
# read 16 bit images
pred16 = np.stack([cv2.imread(path, cv2.IMREAD_ANYDEPTH) for path in pred_paths], axis=2)
gt16 = np.stack([cv2.imread(path, cv2.IMREAD_ANYDEPTH) for path in gt_paths], axis=2)
# scale them to 8bit
pred8 = (pred16 / (pred16.max()/256)).astype(np.uint8)
gt8 = (gt16 / (gt16.max()/256)).astype(np.uint8)
# save single channels
for i in range(3):
img_name = pred_paths[i].split("/")[-1][:-4] + ".png"
cv2.imwrite(os.path.join(pred_dir, img_name), pred8[:,:,i])
cv2.imwrite(os.path.join(gt_dir, img_name), gt8[:,:,i])
# save rgb images
rgb_name = pred_paths[0].split("/")[-1][:-16] + "_RGB.png"
cv2.imwrite(os.path.join(pred_dir, rgb_name), pred8)
cv2.imwrite(os.path.join(gt_dir, rgb_name), gt8)
if __name__ == "__main__":
main()
|
{"/train_model_final.py": ["/data_utils.py"], "/_train.py": ["/FlowArrayDataset.py"], "/src/dataloader/TestDataset.py": ["/src/util/DataUtils.py"], "/src/dataloader/TrainDataset.py": ["/src/util/DataUtils.py"], "/src/model_handler/TestHandler.py": ["/src/util/DataUtils.py", "/src/model/Generator.py"], "/train.py": ["/src/model_handler/TrainHandler.py"], "/test.py": ["/src/model_handler/TestHandler.py", "/src/dataloader/TestDataset.py", "/src/model/Generator.py"], "/src/model_handler/TrainHandler.py": ["/src/dataloader/TrainDataset.py", "/src/dataloader/ValidationDataset.py", "/src/loss/VGGLoss.py", "/src/model/Discriminator.py", "/src/model/Generator.py", "/src/util/DataUtils.py"]}
|
30,129,755
|
aidotse/Team-Yenomze
|
refs/heads/main
|
/src/loss/VGGLoss.py
|
from torch import nn
from torchvision.models.vgg import vgg19
class VGGLoss(nn.Module):
def __init__(self, device):
super(VGGLoss, self).__init__()
vgg19_model = vgg19(pretrained=True, progress=False)
feature_extractor = nn.Sequential(*list(vgg19_model.features)[:31]).eval()
for param in feature_extractor.parameters():
param.requires_grad = False
self.feature_extractor = feature_extractor.to(device)
self.mse = nn.MSELoss()
def forward(self, sr, hr):
sr = self.feature_extractor(sr)
hr = self.feature_extractor(hr)
perceptual_loss = self.mse(sr, hr)
return perceptual_loss
|
{"/train_model_final.py": ["/data_utils.py"], "/_train.py": ["/FlowArrayDataset.py"], "/src/dataloader/TestDataset.py": ["/src/util/DataUtils.py"], "/src/dataloader/TrainDataset.py": ["/src/util/DataUtils.py"], "/src/model_handler/TestHandler.py": ["/src/util/DataUtils.py", "/src/model/Generator.py"], "/train.py": ["/src/model_handler/TrainHandler.py"], "/test.py": ["/src/model_handler/TestHandler.py", "/src/dataloader/TestDataset.py", "/src/model/Generator.py"], "/src/model_handler/TrainHandler.py": ["/src/dataloader/TrainDataset.py", "/src/dataloader/ValidationDataset.py", "/src/loss/VGGLoss.py", "/src/model/Discriminator.py", "/src/model/Generator.py", "/src/util/DataUtils.py"]}
|
30,129,756
|
aidotse/Team-Yenomze
|
refs/heads/main
|
/src/util/DataUtils.py
|
import numpy as np
from monai.transforms import Compose
def find_first_occurance(tuple_list, target_str):
for i, t in enumerate(tuple_list):
if target_str in t[0]:
return i
def split_train_val(data_list, N_valid_per_magn=1, is_val_split=True):
indexes = [
find_first_occurance(data_list, mag_lev)
for mag_lev in ["20x"]#["20x", "40x", "60x"]
]
indexes = [i for initial in indexes for i in range(initial, initial+N_valid_per_magn)]
train_split = [data_list[i] for i in range(len(data_list)) if i not in indexes]
val_split = [data_list[i] for i in indexes]
if is_val_split:
return train_split, val_split
else:
return train_split + val_split, val_split
def get_mag_level(img_file_path):
if "20x" in img_file_path:
return "20x"
elif "40x" in img_file_path:
return "40x"
else:
return "60x"
class MozartTheComposer(Compose):
def __call__(self, input_):
vol=input_
for t in self.transforms:
vol = t(vol)
return vol
def preprocess(img, mag_level, channel):
std_dict = {"20x": {"C01": 515.0, "C02": 573.0, "C03": 254.0, "C04": 974.0},
"40x": {"C01": 474.0, "C02": 513.0, "C03": 146.0, "C04": 283.0},
"60x": {"C01": 379.0, "C02": 1010.0, "C03": 125.0, "C04": 228.0}}
threshold_99_dict = {"20x": {"C01": 5.47, "C02": 4.08, "C03": 5.95, "C04": 7.28},
"40x": {"C01": 5.81, "C02": 3.97, "C03": 6.09, "C04": 7.16},
"60x": {"C01": 5.75, "C02": 3.88, "C03": 6.27, "C04": 6.81}}
max_log_value_dict = {"C01": 1.92, "C02": 1.63, "C03": 1.99, "C04": 2.12}
normalized_img = img/std_dict[mag_level][channel]
clipped_img = np.clip(normalized_img, None, threshold_99_dict[mag_level][channel])
log_transform_img = np.log(1 + clipped_img)
standardized_img = log_transform_img / max_log_value_dict[channel]
return standardized_img
def adjust_intensity(img, mag_level, channel):
slope_dict = {"20x": {"C01": 1.0, "C02": 1.27, "C03": 1.1},
"40x": {"C01": 1.0, "C02": 2.39, "C03": 1.7},
"60x": {"C01": 1.0, "C02": 2.4, "C03": 0.8}}
intercept_dict = {"20x": {"C01": 0.0, "C02": 14.0, "C03": 320.0},
"40x": {"C01": 0.0, "C02": -427.0, "C03": 74.0},
"60x": {"C01": 0.0, "C02": -887.0, "C03": 128.0}}
adjusted_img = img * slope_dict[mag_level][channel] + intercept_dict[mag_level][channel]
return adjusted_img
def postprocess(img, mag_level, channel):
std_dict = {"20x": {"C01": 515.0, "C02": 573.0, "C03": 254.0, "C04": 974.0},
"40x": {"C01": 474.0, "C02": 513.0, "C03": 146.0, "C04": 283.0},
"60x": {"C01": 379.0, "C02": 1010.0, "C03": 125.0, "C04": 228.0}}
threshold_99_dict = {"20x": {"C01": 5.47, "C02": 4.08, "C03": 5.95, "C04": 7.28},
"40x": {"C01": 5.81, "C02": 3.97, "C03": 6.09, "C04": 7.16},
"60x": {"C01": 5.75, "C02": 3.88, "C03": 6.27, "C04": 6.81}}
max_log_value_dict = {"C01": 1.92, "C02": 1.63, "C03": 1.99, "C04": 2.12}
log_transform_img = img * max_log_value_dict[channel]
normalized_img = np.exp(log_transform_img - 1)
final_img = normalized_img * std_dict[mag_level][channel]
final_adjusted_img = adjust_intensity(final_img, mag_level, channel)
return final_adjusted_img
|
{"/train_model_final.py": ["/data_utils.py"], "/_train.py": ["/FlowArrayDataset.py"], "/src/dataloader/TestDataset.py": ["/src/util/DataUtils.py"], "/src/dataloader/TrainDataset.py": ["/src/util/DataUtils.py"], "/src/model_handler/TestHandler.py": ["/src/util/DataUtils.py", "/src/model/Generator.py"], "/train.py": ["/src/model_handler/TrainHandler.py"], "/test.py": ["/src/model_handler/TestHandler.py", "/src/dataloader/TestDataset.py", "/src/model/Generator.py"], "/src/model_handler/TrainHandler.py": ["/src/dataloader/TrainDataset.py", "/src/dataloader/ValidationDataset.py", "/src/loss/VGGLoss.py", "/src/model/Discriminator.py", "/src/model/Generator.py", "/src/util/DataUtils.py"]}
|
30,129,757
|
aidotse/Team-Yenomze
|
refs/heads/main
|
/src/model_handler/TestHandler.py
|
import os
import sys
import torch
import numpy as np
from PIL import Image
from tqdm import tqdm
from monai.data import PILReader, DataLoader
from src.util.DataUtils import postprocess
from src.model.Generator import GeneratorUnet
class TestHandler():
def __init__(self,
patch_iterator: torch.utils.data.IterableDataset,
model: GeneratorUnet,
output_dir: str="./output",
) -> None:
self.patch_iterator = patch_iterator
self.output_dir = output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.device = torch.device('cuda') if torch.cuda.is_available() else 'cpu'
self.model = model.to(self.device)
self.model.eval()
def generate_patch_iterator(self, image_list):
for img_tuple in image_list:
fname = img_tuple[0].split("/")[-1]
img_prefix = fname[:-16]
dataset = self.patch_iterator(data=img_tuple,
patch_size=256,
overlap_ratio=0.5,
data_reader=PILReader())
data_loader = DataLoader(dataset,
batch_size=8,
shuffle=False)
yield img_prefix, data_loader
def run_test(self, image_list, mag_level):
merged_images_list = []
with torch.no_grad():
for img_prefix, data_loader in tqdm(self.generate_patch_iterator(image_list),
total=len(image_list),
file=sys.stdout):
patchesC01, patchesC02, patchesC03 = [], [], []
for batch_index, batch in enumerate(data_loader):
# unpack the inputs
inpZ01, inpZ02, inpZ03, inpZ04, inpZ05, inpZ06, inpZ07 = \
batch[:,:,0,:,:].to(self.device), \
batch[:,:,1,:,:].to(self.device), \
batch[:,:,2,:,:].to(self.device), \
batch[:,:,3,:,:].to(self.device), \
batch[:,:,4,:,:].to(self.device), \
batch[:,:,5,:,:].to(self.device), \
batch[:,:,6,:,:].to(self.device)
# predict with model
outC01, outC02, outC03 = self.model(inpZ01, inpZ02, inpZ03,
inpZ04, inpZ05, inpZ06, inpZ07)
outC01, outC02, outC03 = [p[0] for p in outC01.data.cpu()], \
[p[0] for p in outC02.data.cpu()], \
[p[0] for p in outC03.data.cpu()]
patchesC01.extend(outC01)
patchesC02.extend(outC02)
patchesC03.extend(outC03)
# (3,256,256)
merged_images = []
channels = ["C01", "C02", "C03"]
for i, patches in enumerate([patchesC01, patchesC02, patchesC03]):
#print(len(patches))
#print(patches[0].shape)
merged_img = data_loader.dataset.merge_patches(patches)
merged_img = postprocess(merged_img, mag_level, channels[i])
merged_images.append(merged_img)
out_dir = os.path.join(self.output_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
self.save_img(merged_img,
os.path.join(out_dir,
f"{img_prefix}L01A0{i+1}Z01C0{i+1}.tif"))
# merged_images_list.append(np.stack(merged_images))
# return merged_images_list
@staticmethod
def save_img(img,
output_path):
# write 16-bit TIF image
Image.fromarray(img.astype(np.uint16)).save(output_path)
|
{"/train_model_final.py": ["/data_utils.py"], "/_train.py": ["/FlowArrayDataset.py"], "/src/dataloader/TestDataset.py": ["/src/util/DataUtils.py"], "/src/dataloader/TrainDataset.py": ["/src/util/DataUtils.py"], "/src/model_handler/TestHandler.py": ["/src/util/DataUtils.py", "/src/model/Generator.py"], "/train.py": ["/src/model_handler/TrainHandler.py"], "/test.py": ["/src/model_handler/TestHandler.py", "/src/dataloader/TestDataset.py", "/src/model/Generator.py"], "/src/model_handler/TrainHandler.py": ["/src/dataloader/TrainDataset.py", "/src/dataloader/ValidationDataset.py", "/src/loss/VGGLoss.py", "/src/model/Discriminator.py", "/src/model/Generator.py", "/src/util/DataUtils.py"]}
|
30,129,758
|
aidotse/Team-Yenomze
|
refs/heads/main
|
/train.py
|
import json
import argparse
from src.model_handler.TrainHandler import start_training
def parse_args():
parser = argparse.ArgumentParser(description="Adipocyte Fluorescence Predictor CLI Tool")
parser.add_argument("-s", "--setting_file", type=str,
help="JSON filepath that contains settings.")
args = parser.parse_args()
print(args)
return args
def get_settings(json_path):
with open(json_path, "r") as json_file:
settings = json.load(json_file)
print(settings)
return settings
def main():
args = parse_args()
settings = get_settings(args.setting_file)
start_training(**settings)
if __name__ == "__main__":
main()
|
{"/train_model_final.py": ["/data_utils.py"], "/_train.py": ["/FlowArrayDataset.py"], "/src/dataloader/TestDataset.py": ["/src/util/DataUtils.py"], "/src/dataloader/TrainDataset.py": ["/src/util/DataUtils.py"], "/src/model_handler/TestHandler.py": ["/src/util/DataUtils.py", "/src/model/Generator.py"], "/train.py": ["/src/model_handler/TrainHandler.py"], "/test.py": ["/src/model_handler/TestHandler.py", "/src/dataloader/TestDataset.py", "/src/model/Generator.py"], "/src/model_handler/TrainHandler.py": ["/src/dataloader/TrainDataset.py", "/src/dataloader/ValidationDataset.py", "/src/loss/VGGLoss.py", "/src/model/Discriminator.py", "/src/model/Generator.py", "/src/util/DataUtils.py"]}
|
30,129,759
|
aidotse/Team-Yenomze
|
refs/heads/main
|
/test.py
|
import os
import glob
import torch
import argparse
import numpy as np
from src.model_handler.TestHandler import TestHandler
from src.dataloader.TestDataset import OverlappyGridyDataset
from src.model.Generator import GeneratorUnet
BEST_MODELS = {
"20x": "checkpoints/model20x/G_epoch_548.pth",
"40x": "checkpoints/model40x/G_epoch_125.pth",
"60x": "checkpoints/model60x/G_epoch_228.pth"
}
def parse_args():
parser = argparse.ArgumentParser(description="Adipocyte Fluorescence Predictor CLI Tool")
parser.add_argument("-c", "--model_checkpoint", type=str,
help="checkpoint path for UNet model.")
parser.add_argument("-m", "--magnification", type=str,
help="magnification level of input images.", required=True, choices=['20x', '40x', '60x'])
parser.add_argument("-i", "--input_dir", type=str,
help="input directory that has brightfield images.", required=True)
parser.add_argument("-o", "--output_dir", type=str,
help="output directory to save predictions as 16bit TIF files.", required=True)
args = parser.parse_args()
print(args)
return args
def main():
args = parse_args()
# get image paths
inputs = [
sorted(glob.glob(os.path.join(args.input_dir, f"*A04Z0{i}*.tif"), recursive=True))
for i in range(1,8)
]
inputs = list(zip(*inputs))
# Load Model
model = GeneratorUnet(split=True)
model_chkp_path = BEST_MODELS[args.magnification] if args.model_checkpoint is None else args.model_checkpoint
print(f'Loading checkpoint: {model_chkp_path}')
chkp = torch.load(model_chkp_path)
model.load_state_dict(chkp["model_state_dict"])
# Patch Iterator Dataset
patch_iterator = OverlappyGridyDataset
# Test Handler
test_handler = TestHandler(patch_iterator,
model=model,
output_dir=args.output_dir)
# Run test
test_handler.run_test(inputs, mag_level=args.magnification)
if __name__ == "__main__":
main()
|
{"/train_model_final.py": ["/data_utils.py"], "/_train.py": ["/FlowArrayDataset.py"], "/src/dataloader/TestDataset.py": ["/src/util/DataUtils.py"], "/src/dataloader/TrainDataset.py": ["/src/util/DataUtils.py"], "/src/model_handler/TestHandler.py": ["/src/util/DataUtils.py", "/src/model/Generator.py"], "/train.py": ["/src/model_handler/TrainHandler.py"], "/test.py": ["/src/model_handler/TestHandler.py", "/src/dataloader/TestDataset.py", "/src/model/Generator.py"], "/src/model_handler/TrainHandler.py": ["/src/dataloader/TrainDataset.py", "/src/dataloader/ValidationDataset.py", "/src/loss/VGGLoss.py", "/src/model/Discriminator.py", "/src/model/Generator.py", "/src/util/DataUtils.py"]}
|
30,129,760
|
aidotse/Team-Yenomze
|
refs/heads/main
|
/src/model_handler/TrainHandler.py
|
import os
import sys
import pandas as pd
from tqdm import tqdm
import numpy as np
import glob
import multiprocessing
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
from torch import autograd
from torchio.transforms import CropOrPad
from monai.data import ArrayDataset, DataLoader, PILReader
from monai.transforms import Compose, LoadImage, RandFlip, RandRotate, RandRotate90, ToTensor
from src.dataloader.TrainDataset import OurDataset
from src.dataloader.ValidationDataset import OurGridyDataset
from src.loss.VGGLoss import VGGLoss
from src.model.Discriminator import Discriminator
from src.model.Generator import GeneratorUnet
from src.util.DataUtils import (
split_train_val,
MozartTheComposer
)
def start_training(batch_size=16,
num_epoch=500,
num_epoch_pretrain_G=None,
lr=1e-5,
unet_split_mode=True,
data_dir = "/data/*",
load_weight_dir=None,
save_weight_dir="./checkpoints/",
log_dir="./logs/",
loss_dir="./lossinfo/",
augmentation_prob=50,
adversarial_weight=5e-2,
mse_loss_weight=50,
c01_weight=0.3,
c02_weight=0.3,
c03_weight=0.4,
is_val_split=False,
):
if not os.path.exists(save_weight_dir):
os.makedirs(save_weight_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(loss_dir):
os.makedirs(loss_dir)
device = torch.device('cuda') if torch.cuda.is_available() else 'cpu'
print(f'using GPU? : {torch.cuda.is_available()}, GPU: {torch.cuda.get_device_name()}', )
writer = SummaryWriter(log_dir=log_dir) # tensorboard
# collect instances
inputs = [
sorted(glob.glob(os.path.join(data_dir, f'*A04Z0{i}*.tif'), recursive=True))
for i in range(1,8)
]
targets = [
sorted(glob.glob(os.path.join(data_dir, f'*C0{i}.tif'), recursive=True))
for i in range(1,4)
]
# merge inputs and targets
all_data = inputs + targets
# match the slices and match all of the data for one input instance
data_all_ch = list(zip(*all_data))
train_split, val_split = split_train_val(data_all_ch,
N_valid_per_magn=4,
is_val_split = is_val_split)
# data preprocessing/augmentation
trans_train = MozartTheComposer(
[
RandRotate(range_x=15, prob=augmentation_prob, keep_size=True, padding_mode="reflection"),
RandRotate90(prob=augmentation_prob, spatial_axes=(1, 2)),
RandFlip(spatial_axis=(1, 2), prob=augmentation_prob),
ToTensor()
]
)
trans_val = MozartTheComposer(
[
ToTensor()
]
)
# create dataset class
train_dataset = OurDataset(
data=train_split,
data_reader=PILReader(),
transform=trans_train,
roi_size=256, samples_per_image=8
)
val_dataset = OurGridyDataset(
data=val_split,
data_reader=PILReader(),
patch_size=256
)
# now create data loader ( MONAI DataLoader)
training_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=8 #multiprocessing.cpu_count(),
)
validation_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=4 #multiprocessing.cpu_count(),
)
# load model / criterion / optimizer
netG = GeneratorUnet(split=unet_split_mode).to(device)
# print(netG)
netD = Discriminator().to(device)
mseloss = nn.MSELoss()
bceloss = nn.BCELoss()
vggloss = VGGLoss(device=device)
optimizerG = optim.Adam(netG.parameters(), lr=lr)
optimizerD = optim.Adam(netD.parameters(), lr=lr)
# load weight if load_weight_dir is defined
if load_weight_dir is not None:
print(f'Loading checkpoint: {load_weight_dir}')
checkpoint = torch.load(load_weight_dir)
netG.load_state_dict(checkpoint['model_state_dict'])
optimizerG.load_state_dict(checkpoint['optimizer_state_dict'])
init_epoch = checkpoint['epoch']
loss = checkpoint['loss']
else:
init_epoch = 0
if num_epoch_pretrain_G is not None:
print(f'pre-training Generator for {num_epoch_pretrain_G} epochs')
save_gene = pd.DataFrame(columns=['TotalLoss', 'lossC01', 'lossC02', 'lossC03'])
for epoch in range(1, num_epoch_pretrain_G + 1):
pretrainedG_losses = []
lossC01s = []
lossC02s = []
lossC03s = []
netG.train()
# torch.Size([8, 1, 10, 256, 256])
for batch_index, batch in enumerate(tqdm(training_loader, file=sys.stdout)):
inputZ01, inputZ02, inputZ03, inputZ04, inputZ05, inputZ06, inputZ07 = \
batch[:,:,0,:,:].to(device), \
batch[:,:,1,:,:].to(device), \
batch[:,:,2,:,:].to(device), \
batch[:,:,3,:,:].to(device), \
batch[:,:,4,:,:].to(device), \
batch[:,:,5,:,:].to(device), \
batch[:,:,6,:,:].to(device)
targetC01, targetC02, targetC03 = batch[:,:,7,:,:].to(device), \
batch[:,:,8,:,:].to(device), \
batch[:,:,9,:,:].to(device)
netG.zero_grad()
outputC01, outputC02, outputC03 = netG(inputZ01, inputZ02, inputZ03, inputZ04, inputZ05, inputZ06, inputZ07)
# with autograd.detect_anomaly():
loss01 = mseloss(outputC01, targetC01)
loss02 = mseloss(outputC02, targetC02)
loss03 = mseloss(outputC03, targetC03)
loss_gene = c01_weight*loss01 + c02_weight*loss02 + c03_weight*loss03
loss_gene.backward()
optimizerG.step()
pretrainedG_losses.append(loss_gene.detach().item())
lossC01s.append(loss01.detach().item())
lossC02s.append(loss02.detach().item())
lossC03s.append(loss03.detach().item())
epoch_loss = np.array(pretrainedG_losses).mean()
epoch_lossC01 = np.array(lossC01s).mean()
epoch_lossC02 = np.array(lossC02s).mean()
epoch_lossC03 = np.array(lossC03s).mean()
print(f'Pre-training Generator - Epoch {epoch}/{num_epoch_pretrain_G} loss: {epoch_loss}, loss01: {epoch_lossC01}, loss02: {epoch_lossC02}, loss03: {epoch_lossC03}')
save_gene = save_gene.append({'TotalLoss': epoch_loss, 'lossC01': epoch_lossC01, 'lossC02': epoch_lossC02, 'lossC03': epoch_lossC03}, ignore_index=True)
save_gene.to_csv(os.path.join(loss_dir, 'generator_loss_info.csv'))
# save images
if epoch % 1 == 0:
writer.add_images('Groundtruth_G_only/C01', targetC01, epoch)
writer.add_images('Groundtruth_G_only/C02', targetC02, epoch)
writer.add_images('Groundtruth_G_only/C03', targetC03, epoch)
writer.add_images('train_G_only/C01', outputC01, epoch)
writer.add_images('train_G_only/C02', outputC02, epoch)
writer.add_images('train_G_only/C03', outputC03, epoch)
# save model parameters
weight = f'pretrained_G_epoch_{epoch}.pth'
torch.save(netG.state_dict(), os.path.join(save_weight_dir, weight))
torch.save({'epoch': epoch,
'model_state_dict': netG.state_dict(),
'optimizer_state_dict': optimizerG.state_dict(),
'loss': epoch_loss}, os.path.join(save_weight_dir, weight))
if num_epoch_pretrain_G is not None:
print(f'Loading weights from pretrained generator')
finalweight = f'pretrained_G_epoch_{num_epoch_pretrain_G}.pth'
checkpoint = torch.load(os.path.join(save_weight_dir, finalweight))
netG.load_state_dict(checkpoint['model_state_dict'])
optimizerG.load_state_dict(checkpoint['optimizer_state_dict'])
init_epoch = checkpoint['epoch']
loss = checkpoint['loss']
else:
pass
if num_epoch_pretrain_G is not None:
init_epoch = num_epoch_pretrain_G + 1
num_epoch = num_epoch + num_epoch_pretrain_G
else:
num_epoch = num_epoch
if load_weight_dir is None:
init_epoch = 1
else:
init_epoch = init_epoch
num_epoch = num_epoch + init_epoch
save_gan_train = pd.DataFrame(columns=['TotalLoss', 'v_lossC01', 'v_lossC02', 'v_lossC03','m_lossC01', 'm_lossC02', 'm_lossC03', 'adv' ,'valTotalLoss',
'val_v_lossC01', 'val_v_lossC02', 'val_v_lossC03','val_m_lossC01', 'val_m_lossC02', 'val_m_lossC03', 'val_adv'])
for epoch in range(init_epoch, num_epoch + 1):
print(f'Epoch : [{epoch} / {num_epoch}]')
netG.train()
G_losses = []
D_losses = []
v_lossC01s = []
v_lossC02s = []
v_lossC03s = []
m_lossC01s = []
m_lossC02s = []
m_lossC03s = []
for batch_index, batch in enumerate(tqdm(training_loader, file=sys.stdout)):
inputZ01, inputZ02, inputZ03, inputZ04, inputZ05, inputZ06, inputZ07 = \
batch[:,:,0,:,:].to(device), \
batch[:,:,1,:,:].to(device), \
batch[:,:,2,:,:].to(device), \
batch[:,:,3,:,:].to(device), \
batch[:,:,4,:,:].to(device), \
batch[:,:,5,:,:].to(device), \
batch[:,:,6,:,:].to(device)
targetC01, targetC02, targetC03 = batch[:,:,7,:,:].to(device), \
batch[:,:,8,:,:].to(device), \
batch[:,:,9,:,:].to(device)
shape = inputZ01.size()
real_label = torch.ones((shape[0])).to(device)
fake_label = torch.zeros((shape[0])).to(device)
###############################################################
# First train discriminator network : maximize D(x)-1-D(G(z)) #
###############################################################
netD.zero_grad()
outputC01, outputC02, outputC03 = netG(inputZ01, inputZ02, inputZ03, inputZ04, inputZ05, inputZ06, inputZ07)
targetCs = torch.cat((targetC01, targetC02, targetC03), dim=1)
realCs_prob = netD(targetCs)
outputCs = torch.cat((outputC01, outputC02, outputC03), dim=1)
fakeCs_prob = netD(outputCs)
d_loss_real = bceloss(realCs_prob, real_label)
d_loss_fake = bceloss(fakeCs_prob, fake_label)
d_loss = d_loss_real + d_loss_fake
d_loss.backward()
optimizerD.step()
D_losses.append(d_loss.item())
#################################################################
# Now train Generator network #
# option 1 (mode='MSE') : minimize mseloss + 10^-3 * -logD(G(z))#
# option 2 (mode='VGG') : minimize vggloss + 10^-3 * -logD(G(z))#
#################################################################
netG.zero_grad()
outputC01, outputC02, outputC03 = netG(inputZ01, inputZ02, inputZ03, inputZ04, inputZ05, inputZ06, inputZ07)
# calculating MSE loss
m_lossC01 = mseloss(outputC01, targetC01)
m_lossC02 = mseloss(outputC02, targetC02)
m_lossC03 = mseloss(outputC03, targetC03)
m_content_loss = c01_weight*m_lossC01 + c02_weight*m_lossC02 + c03_weight*m_lossC03
# calculating VGG loss
targetC01_vgg = torch.cat((targetC01, targetC01, targetC01), dim=1) # concat
targetC02_vgg = torch.cat((targetC02, targetC02, targetC02), dim=1)
targetC03_vgg = torch.cat((targetC03, targetC03, targetC03), dim=1)
outputC01_vgg = torch.cat((outputC01,outputC01,outputC01), dim=1)
outputC02_vgg = torch.cat((outputC02,outputC02,outputC02), dim=1)
outputC03_vgg = torch.cat((outputC03,outputC03,outputC03), dim=1)
v_lossC01 = vggloss(outputC01_vgg, targetC01_vgg)
v_lossC02 = vggloss(outputC02_vgg, targetC02_vgg)
v_lossC03 = vggloss(outputC03_vgg, targetC03_vgg)
v_content_loss = c01_weight*v_lossC01 + c02_weight*v_lossC02 + c03_weight*v_lossC03
# calculating weighted content loss
content_loss = mse_loss_weight * m_content_loss + v_content_loss
outputCs = torch.cat((outputC01, outputC02, outputC03), dim=1)
fakeCs_prob = netD(outputCs)
adversarial_loss = bceloss(fakeCs_prob, real_label)
g_loss = content_loss + adversarial_weight * adversarial_loss
g_loss.backward()
optimizerG.step()
G_losses.append(g_loss.detach().item())
v_lossC01s.append(v_lossC01.detach().item())
v_lossC02s.append(v_lossC02.detach().item())
v_lossC03s.append(v_lossC03.detach().item())
m_lossC01s.append(m_lossC01.detach().item())
m_lossC02s.append(m_lossC02.detach().item())
m_lossC03s.append(m_lossC03.detach().item())
# log to tensorboard every 10 steps
if batch_index % 10 == 0:
writer.add_scalar('Train/G_loss', g_loss.item(), epoch)
writer.add_scalar('Train/D_loss', d_loss.item(), epoch)
if epoch % 1 == 0:
writer.add_images('Groundtruth_train/C01', targetC01, epoch)
writer.add_images('Groundtruth_train/C02', targetC02, epoch)
writer.add_images('Groundtruth_train/C03', targetC03, epoch)
writer.add_images('train/C01', outputC01, epoch)
writer.add_images('train/C02', outputC02, epoch)
writer.add_images('train/C03', outputC03, epoch)
G_loss = np.array(G_losses).mean()
D_loss = np.array(D_losses).mean()
epoch_v_lossC01 = np.array(v_lossC01s).mean()
epoch_v_lossC02 = np.array(v_lossC02s).mean()
epoch_v_lossC03 = np.array(v_lossC03s).mean()
epoch_m_lossC01 = np.array(m_lossC01s).mean()
epoch_m_lossC02 = np.array(m_lossC02s).mean()
epoch_m_lossC03 = np.array(m_lossC03s).mean()
print(f'Epoch {epoch}/{num_epoch} Training g_loss : {G_loss}, d_loss : {D_loss}')
################################################################################################################
# Validation
netG.eval()
val_G_losses = []
val_v_lossC01s = []
val_v_lossC02s = []
val_v_lossC03s = []
val_m_lossC01s = []
val_m_lossC02s = []
val_m_lossC03s = []
with torch.no_grad():
for batch_index, batch in enumerate(tqdm(validation_loader, file=sys.stdout)):
inputZ01, inputZ02, inputZ03, inputZ04, inputZ05, inputZ06, inputZ07 = \
batch[0][:,:,0,:,:].to(device), \
batch[0][:,:,1,:,:].to(device), \
batch[0][:,:,2,:,:].to(device), \
batch[0][:,:,3,:,:].to(device), \
batch[0][:,:,4,:,:].to(device), \
batch[0][:,:,5,:,:].to(device), \
batch[0][:,:,6,:,:].to(device)
targetC01, targetC02, targetC03 = batch[0][:,:,7,:,:].to(device), \
batch[0][:,:,8,:,:].to(device), \
batch[0][:,:,9,:,:].to(device)
shape = inputZ01.size()
real_label = torch.ones((shape[0])).to(device)
fake_label = torch.zeros((shape[0])).to(device)
outputC01, outputC02, outputC03 = netG(inputZ01, inputZ02, inputZ03, inputZ04, inputZ05, inputZ06, inputZ07)
# calculating MSE loss
val_m_lossC01 = mseloss(outputC01, targetC01)
val_m_lossC02 = mseloss(outputC02, targetC02)
val_m_lossC03 = mseloss(outputC03, targetC03)
val_m_content_loss = c01_weight*val_m_lossC01 + c02_weight*val_m_lossC02 + c03_weight*val_m_lossC03
# calculating VGG loss
targetC01_vgg = torch.cat((targetC01, targetC01, targetC01), dim=1)
targetC02_vgg = torch.cat((targetC02, targetC02, targetC02), dim=1)
targetC03_vgg = torch.cat((targetC03, targetC03, targetC03), dim=1)
outputC01_vgg = torch.cat((outputC01,outputC01,outputC01), dim=1)
outputC02_vgg = torch.cat((outputC02,outputC02,outputC02), dim=1)
outputC03_vgg = torch.cat((outputC03,outputC03,outputC03), dim=1)
val_v_lossC01 = vggloss(outputC01_vgg, targetC01_vgg)
val_v_lossC02 = vggloss(outputC02_vgg, targetC02_vgg)
val_v_lossC03 = vggloss(outputC03_vgg, targetC03_vgg)
val_v_content_loss = c01_weight*val_v_lossC01 + c02_weight*val_v_lossC02 + c03_weight*val_v_lossC03
# calculating weighted content loss
val_content_loss = mse_loss_weight * val_m_content_loss + val_v_content_loss
outputCs = torch.cat((outputC01, outputC02, outputC03), dim=1)
fakeCs_prob = netD(outputCs)
val_adversarial_loss = bceloss(fakeCs_prob, real_label)
val_g_loss = val_content_loss + adversarial_weight * val_adversarial_loss
val_G_losses.append(val_g_loss.detach().item())
val_v_lossC01s.append(val_v_lossC01.detach().item())
val_v_lossC02s.append(val_v_lossC02.detach().item())
val_v_lossC03s.append(val_v_lossC03.detach().item())
val_m_lossC01s.append(val_m_lossC01.detach().item())
val_m_lossC02s.append(val_m_lossC02.detach().item())
val_m_lossC03s.append(val_m_lossC03.detach().item())
# log to tensorboard every 10 steps
if batch_index % 10 == 0:
writer.add_scalar('Val/G_loss', val_g_loss.item(), epoch)
if epoch % 1 == 0:
writer.add_images('Groundtruth_val/C01', targetC01, epoch)
writer.add_images('Groundtruth_val/C02', targetC02, epoch)
writer.add_images('Groundtruth_val/C03', targetC03, epoch)
writer.add_images('val/C01', outputC01, epoch)
writer.add_images('val/C02', outputC02, epoch)
writer.add_images('val/C03', outputC03, epoch)
val_G_loss = np.array(val_G_losses).mean()
val_epoch_v_lossC01 = np.array(val_v_lossC01s).mean()
val_epoch_v_lossC02 = np.array(val_v_lossC02s).mean()
val_epoch_v_lossC03 = np.array(val_v_lossC03s).mean()
val_epoch_m_lossC01 = np.array(val_m_lossC01s).mean()
val_epoch_m_lossC02 = np.array(val_m_lossC02s).mean()
val_epoch_m_lossC03 = np.array(val_m_lossC03s).mean()
save_gan_train = save_gan_train.append(
{'TotalLoss': G_loss, 'v_lossC01': epoch_v_lossC01, 'v_lossC02': epoch_v_lossC02, 'v_lossC03': epoch_v_lossC03,
'm_lossC01': epoch_m_lossC01, 'm_lossC02': epoch_m_lossC02, 'm_lossC03': epoch_m_lossC03, 'adv': adversarial_loss,
'valTotalLoss': val_G_loss, 'val_v_lossC01': val_epoch_v_lossC01, 'val_v_lossC02': val_epoch_v_lossC02, 'val_v_lossC03': val_epoch_v_lossC03,
'val_m_lossC01': val_epoch_m_lossC01, 'val_m_lossC02': val_epoch_m_lossC02, 'val_m_lossC03': val_epoch_m_lossC03, 'val_adv': val_adversarial_loss},
ignore_index=True)
save_gan_train.to_csv(os.path.join(loss_dir, 'gan_train_loss_info.csv'))
print(f'Epoch {epoch}/{num_epoch} Validation g_loss : {val_G_loss}')
# now save model parameter (from training)
weight_g = f'G_epoch_{epoch}.pth'
torch.save({'epoch': epoch,
'model_state_dict': netG.state_dict(),
'optimizer_state_dict': optimizerG.state_dict(),
'loss': G_loss}, os.path.join(save_weight_dir, weight_g))
|
{"/train_model_final.py": ["/data_utils.py"], "/_train.py": ["/FlowArrayDataset.py"], "/src/dataloader/TestDataset.py": ["/src/util/DataUtils.py"], "/src/dataloader/TrainDataset.py": ["/src/util/DataUtils.py"], "/src/model_handler/TestHandler.py": ["/src/util/DataUtils.py", "/src/model/Generator.py"], "/train.py": ["/src/model_handler/TrainHandler.py"], "/test.py": ["/src/model_handler/TestHandler.py", "/src/dataloader/TestDataset.py", "/src/model/Generator.py"], "/src/model_handler/TrainHandler.py": ["/src/dataloader/TrainDataset.py", "/src/dataloader/ValidationDataset.py", "/src/loss/VGGLoss.py", "/src/model/Discriminator.py", "/src/model/Generator.py", "/src/util/DataUtils.py"]}
|
30,129,761
|
aidotse/Team-Yenomze
|
refs/heads/main
|
/src/model/Discriminator.py
|
import torch
import torch.nn as nn
# discriminator from GAN (Goodfellow et al.)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.net = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(512, 1024, kernel_size=1),
nn.LeakyReLU(0.2),
nn.Conv2d(1024, 1, kernel_size=1)
)
def forward(self, x):
# print('D input size :' + str(x.size()))
y = self.net(x)
# print ('D output size :' + str(y.size()))
output = torch.sigmoid(y).view(y.size()[0])
return output
|
{"/train_model_final.py": ["/data_utils.py"], "/_train.py": ["/FlowArrayDataset.py"], "/src/dataloader/TestDataset.py": ["/src/util/DataUtils.py"], "/src/dataloader/TrainDataset.py": ["/src/util/DataUtils.py"], "/src/model_handler/TestHandler.py": ["/src/util/DataUtils.py", "/src/model/Generator.py"], "/train.py": ["/src/model_handler/TrainHandler.py"], "/test.py": ["/src/model_handler/TestHandler.py", "/src/dataloader/TestDataset.py", "/src/model/Generator.py"], "/src/model_handler/TrainHandler.py": ["/src/dataloader/TrainDataset.py", "/src/dataloader/ValidationDataset.py", "/src/loss/VGGLoss.py", "/src/model/Discriminator.py", "/src/model/Generator.py", "/src/util/DataUtils.py"]}
|
30,163,324
|
basitaliitpt/engish-dictionary-old-
|
refs/heads/main
|
/frontend.py
|
from tkinter import *
from backend import *
def execute_translate():
"""
docstring
"""
result_list.delete(0,END)
output = translate(word_field.get())
result_list.insert(1, output)
window=Tk()
label1 = Label(window, text = "Enter Word")
label1.grid(row=0, column=0)
label2 = Label(window,text ="Definations")
label2.grid(row=2, column=0)
word_text= StringVar()
word_field = Entry(window,textvariable = word_text)
word_field.grid(row = 0, column = 1)
result_list = Listbox(window, height=10, width = 200)
result_list.grid(row = 3, column = 1, columnspan = 6,rowspan=6)
#result_list.insert(1, "abcsasfsdfsd fs s df sd sd fesd s df sdf sdf sd sd sd sd sdfsdfs dw fdssd s ddadf")
search_btn = Button(window, text= "Search", command = execute_translate)
search_btn.grid(row = 0, column =2 )
scbar = Scrollbar(window)
scbar.grid(row=3,column=7, rowspan=6)
result_list.configure(yscrollcommand = scbar.set)
scbar.configure(command = result_list.yview)
#print(word_field.get())
window.mainloop()
|
{"/frontend.py": ["/backend.py"]}
|
30,204,841
|
janvishah21/events-page
|
refs/heads/main
|
/event/views.py
|
from django.shortcuts import render, get_object_or_404, redirect
from .models import Event, Like
from .forms import CreateEventForm
from django.conf import settings
# Create your views here.
def home(request, liked):
if not request.user.is_authenticated:
return redirect('user:signin')
events = Event.objects.all()
event_list = []
for event in events:
event_list.append({'event' : event, 'liked' : len(request.user.likes.filter(event=event)) != 0})
print(event_list)
return render(request, 'event/event-list.html', {'events' : event_list, 'liked' : liked})
def toggle(request, pk, liked):
event = get_object_or_404(Event, pk=pk)
likes = request.user.likes.filter(event=event)
if len(likes):
like = likes[0]
like.delete()
else:
like = Like()
like.user = request.user
like.event = event
like.save()
if liked:
return redirect('event:liked')
else:
return redirect('event:home')
def upload_file(f):
with open(settings.MEDIA_URL + 'event/event-img' + str(f._get_name()), 'wb') as destination:
for chunk in f.chunks():
destination.write(chunk)
def create(request):
create_form = CreateEventForm()
if request.method == "POST":
create_form = CreateEventForm(request.POST)
if create_form.is_valid():
event = Event()
event.name = create_form.cleaned_data['name']
event.location = create_form.cleaned_data['location']
event.date = create_form.cleaned_data['time'].date()
event.time = create_form.cleaned_data['time'].time()
# upload_file(request.FILES['image'])
event.save()
return redirect('event:home')
return render(request, 'event/create-event.html', { 'create_form' : create_form })
|
{"/event/views.py": ["/event/models.py", "/event/forms.py"]}
|
30,204,842
|
janvishah21/events-page
|
refs/heads/main
|
/event/forms.py
|
from django import forms
from .widgets import DateTimePickerInput
class CreateEventForm(forms.Form):
name = forms.CharField(label='Event Name', max_length=100, widget=forms.TextInput(attrs={'class': 'form-control'}))
time = forms.DateTimeField(
label='Date and Time',
input_formats=['%d/%m/%Y %H:%M'],
widget=DateTimePickerInput()
)
location = forms.CharField(label='Event Location', max_length=30, widget=forms.TextInput(attrs={'class': 'form-control'}))
image = forms.FileField(label='Event Image', required=False)
|
{"/event/views.py": ["/event/models.py", "/event/forms.py"]}
|
30,204,843
|
janvishah21/events-page
|
refs/heads/main
|
/event/models.py
|
from django.db import models
from django.conf import settings
# Create your models here.
class Event(models.Model):
name = models.CharField(max_length=100)
date = models.DateField('date')
time = models.TimeField()
location = models.CharField(max_length=30)
image = models.ImageField(upload_to="event/event-img", blank=True)
def __str__(self):
return self.name
class Like(models.Model):
event = models.ForeignKey(Event, related_name='likes', on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='likes', on_delete=models.CASCADE)
def __str__(self):
return self.user.username + '-' + self.event.name
class Meta:
unique_together = ['event', 'user']
|
{"/event/views.py": ["/event/models.py", "/event/forms.py"]}
|
30,204,844
|
janvishah21/events-page
|
refs/heads/main
|
/event/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, {'liked' : False}, name='home'),
path('toggle/<int:pk>/<int:liked>', views.toggle, name='toggle'),
path('create-event', views.create, name='create'),
path('liked', views.home, {'liked' : True}, name='liked')
]
|
{"/event/views.py": ["/event/models.py", "/event/forms.py"]}
|
30,204,845
|
janvishah21/events-page
|
refs/heads/main
|
/user/views.py
|
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from .forms import AuthenticationForm
from django.contrib import messages
from django.http import HttpResponse
def signin(request):
if request.user.is_authenticated:
return redirect('/')
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/')
else:
messages.error(request, 'Authentication Failed !', extra_tags='alert alert-warning alert-dismissible fade show')
form = AuthenticationForm(request.POST)
return render(request, 'user/signin.html', {'form': form})
else:
form = AuthenticationForm()
return render(request, 'user/signin.html', {'form': form})
def signout(request):
logout(request)
return redirect('/')
def signup(request):
if request.method == 'POST':
check1 = False
check2 = False
check3 = False
form = AuthenticationForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
username = form.cleaned_data['username']
password1 = form.cleaned_data['password1']
password2 = form.cleaned_data['password2']
email = form.cleaned_data['email']
if password1 != password2:
check1 = True
messages.error(request, 'Password doesn\'t matched', extra_tags='alert alert-warning alert-dismissible fade show')
if User.objects.filter(username=username).exists():
check2 = True
messages.error(request, 'Username already exists', extra_tags='alert alert-warning alert-dismissible fade show')
if User.objects.filter(email=email).exists():
check3 = True
messages.error(request, 'Email already registered', extra_tags='alert alert-warning alert-dismissible fade show')
if check1 or check2 or check3:
messages.error(request, "Registration Failed", extra_tags='alert alert-warning alert-dismissible fade show')
return redirect('user:signup')
else:
user = User.objects.create_user(username=username, password=password1, email=email, first_name=name)
messages.success(request, f'Thanks for registering {name}!', extra_tags='alert alert-success alert-dismissible fade show')
return redirect('user:signin')
else:
form = AuthenticationForm()
return render(request, 'user/signup.html', {'form': form})
|
{"/event/views.py": ["/event/models.py", "/event/forms.py"]}
|
30,214,747
|
xghjbvvg/economic-analysis
|
refs/heads/master
|
/demo.py
|
# from selenium import webdriver
# import time
#
# # 获取一个浏览器对象
# br = webdriver.Chrome()
#
# # 打开一个页面
# br.get('http://data.eastmoney.com/dataapi/invest/other?href=/api/Zgfxzs/json/AnalysisIndexNew.aspx¶msstr=index%3D1%26size%3D100%26code%3D11000200926')
#
# # 获取页面的源代码(运行后在内存中渲染的页面元素)
# print(br.page_source)
# import os
# import time
# from pathlib import Path
#
# path = str((Path(__file__).parent).absolute()) + "/static/img_" + str(time.time()).replace(".","") + '.png';
# fd = open(path, mode="w", encoding="utf-8")
# fd.close()
import socket
def get_host_ip():
"""
查询本机ip地址
:return: ip
"""
global s;
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
if __name__ == '__main__':
print(get_host_ip())
|
{"/main/controllers/stock_api.py": ["/app.py", "/main/config/ResponseBuilder.py", "/main/config/core.py", "/main/dto/userDto.py", "/main/models/StockAhNameDict.py"], "/app.py": ["/main/config/config.py", "/main/controllers/stock_api.py", "/main/controllers/stock_account_api.py"], "/main/service/stock_account_service.py": ["/app.py"]}
|
30,402,079
|
18505161903/fwshare
|
refs/heads/master
|
/futures/CFTC.py
|
# encoding: utf-8
# import tensorflow as tf
import quandl
import pandas as pd
from pymongo import MongoClient
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大列
quandl.ApiConfig.api_key = '-GGCYDJNb2cxMLTvqTho'
d = pd.read_excel(r'E:\code.xlsx', input_col=0)
code = d[['品种简称', 'code']]
data2 = pd.DataFrame()
for temp in d['code']:
try:
data = quandl.get('CFTC/' + temp + '_F_L_ALL', paginate=True)
data['code'] = temp
# 净持仓
data['大户净持仓'] = data.apply(lambda x: x['Noncommercial Long'] - x['Noncommercial Short'], axis=1)
data['套保净持仓'] = data.apply(lambda x: x['Commercial Long'] - x['Commercial Short'], axis=1)
data['散户净持仓'] = data.apply(lambda x: x['Nonreportable Positions Long'] - x['Nonreportable Positions Short'],
axis=1)
# 最大值最小值
chg = data[['大户净持仓', '套保净持仓', '散户净持仓']]
max = chg.rolling(window=156).max().dropna()
min = chg.rolling(window=156).min().dropna()
hb = pd.merge(max, min, on=['Date'], how='outer')
hb1 = pd.merge(data, hb, on=['Date'], how='outer')
# cot指标
data['大户cot(%)'] = round(
hb1.apply(lambda x: ((x['大户净持仓'] - x['大户净持仓_y']) / (x['大户净持仓_x'] - x['大户净持仓_y'])) * 100, axis=1), 2)
data['套保cot(%)'] = round(
hb1.apply(lambda x: ((x['套保净持仓'] - x['套保净持仓_y']) / (x['套保净持仓_x'] - x['套保净持仓_y'])) * 100, axis=1), 2)
data['散户cot(%)'] = round(
hb1.apply(lambda x: ((x['散户净持仓'] - x['散户净持仓_y']) / (x['散户净持仓_x'] - x['散户净持仓_y'])) * 100, axis=1), 2)
data = data[['code', '大户净持仓', '套保净持仓', '散户净持仓', '大户cot(%)', '套保cot(%)', '散户cot(%)']]
data = data.reset_index()
data = pd.merge(data, code)
# data = data[data['套保cot(%)']>=100 or data['套保cot(%)']<=0]
data = data.tail(10)
data2 = data2.append(data)
except:
print('??')
continue
# data2=data2[data2['套保cot(%)']>=100 or data2['套保cot(%)']<=0]
data2
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,080
|
18505161903/fwshare
|
refs/heads/master
|
/example/fushare1.1.11/cons.py
|
import re
import datetime
import json
import os
market_var = {'cffex': ['IF','IC','IH','T','TF','TS'],
'dce':['C','CS','A','B','M','Y','P','FB','BB','JD','L','V','PP','J','JM','I'],
'czce':['WH','PM','CF','SR','TA','OI','RI','MA','ME','FG','RS','RM','ZC','JR','LR','SF','SM','WT','TC','GN','RO','ER','SRX','SRY','WSX','WSY','CY','AP'],
'shfe':['CU','AL','ZN','PB','NI','SN','AU','AG','RB','WR','HC','FU','BU','RU']
}
vars=[]
[vars.extend(i) for i in market_var.values()]
headers = {'Host': 'www.czce.com.cn',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Accept': 'text/html, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'DNT': '1',
'Referer': 'http://www.super-ping.com/?ping=www.google.com&locale=sc',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8,ja;q=0.6'
}
shfe_headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
SYS_SPOTPRICE_URL = 'http://www.100ppi.com/sf/day-%s.html'
SYS_SPOTPRICE_LATEST_URL = 'http://www.100ppi.com/sf/'
SHFE_VOLRANK_URL = 'http://www.shfe.com.cn/data/dailydata/kx/pm%s.dat'
CFFEX_VOLRANK_URL = 'http://www.cffex.com.cn/fzjy/ccpm/%s/%s/%s_1.csv'
DCE_VOLRANK_URL = 'http://www.dce.com.cn/publicweb/quotesdata/exportMemberDealPosiQuotesData.html?memberDealPosiQuotes.variety=%s&memberDealPosiQuotes.trade_type=0&contract.contract_id=all&contract.variety_id=%s&year=%s&month=%s&day=%s&exportFlag=txt'
CZCE_VOLRANK_URL_1 = 'http://www.czce.com.cn/cn/exchange/jyxx/pm/pm%s.html'
CZCE_VOLRANK_URL_2 = 'http://www.czce.com.cn/cn/exchange/%s/datatradeholding/%s.htm'
CZCE_VOLRANK_URL_3 = 'http://www.czce.com.cn/cn/DFSStaticFiles/Future/%s/%s/FutureDataHolding.htm'
DCE_RECIEPT_URL = 'http://www.dce.com.cn/publicweb/quotesdata/wbillWeeklyQuotes.html?wbillWeeklyQuotes.variety=all&year=%s&month=%s&day=%s'
SHFE_RECIEPT_URL_1 = 'http://www.shfe.com.cn/data/dailydata/%sdailystock.html'
SHFE_RECIEPT_URL_2 = 'http://www.shfe.com.cn/data/dailydata/%sdailystock.dat'
CZCE_RECIEPT_URL_1 = 'http://www.czce.com.cn/cn/exchange/jyxx/sheet/sheet%s.html'
CZCE_RECIEPT_URL_2 = 'http://www.czce.com.cn/cn/exchange/%s/datawhsheet/%s.htm'
CZCE_RECIEPT_URL_3 = 'http://www.czce.com.cn/cn/DFSStaticFiles/Future/%s/%s/FutureDataWhsheet.htm'
CFFEX_DAILY_URL = 'http://www.cffex.com.cn/fzjy/mrhq/%s/%s/%s_1.csv'
SHFE_DAILY_URL = 'http://www.shfe.com.cn/data/dailydata/kx/kx%s.dat'
SHFE_VWAP_URL = 'http://www.shfe.com.cn/data/dailydata/ck/%sdailyTimePrice.dat'
DCE_DAILY_URL = 'http://www.dce.com.cn//publicweb/quotesdata/dayQuotesCh.html'
CZCE_DAILY_URL_1 = 'http://www.czce.com.cn/cn/exchange/jyxx/hq/hq%s.html'
CZCE_DAILY_URL_2 = 'http://www.czce.com.cn/cn/exchange/%s/datadaily/%s.txt'
CZCE_DAILY_URL_3 = 'http://www.czce.com.cn/cn/DFSStaticFiles/Future/%s/%s/FutureDataDaily.txt'
DATE_PATTERN = re.compile(r'^([0-9]{4})[-/]?([0-9]{2})[-/]?([0-9]{2})')
FUTURE_SYMBOL_PATTERN = re.compile(r'(^[A-Za-z]{1,2})[0-9]+')
CFFEX_COLUMNS = ['open','high','low','volume','turnover','open_interest','close','settle','change1','change2']
CZCE_COLUMNS = ['pre_settle','open','high','low','close','settle','change1','change2','volume','open_interest','oi_chg','turnover','final_settle']
CZCE_COLUMNS_2 = ['pre_settle','open','high','low','close','settle','change1','volume','open_interest','oi_chg','turnover','final_settle']
SHFE_COLUMNS = {'CLOSEPRICE': 'close', 'HIGHESTPRICE': 'high', 'LOWESTPRICE': 'low', 'OPENINTEREST': 'open_interest', 'OPENPRICE': 'open', 'PRESETTLEMENTPRICE': 'pre_settle', 'SETTLEMENTPRICE': 'settle', 'VOLUME': 'volume'}
SHFE_VWAP_COLUMNS = {':B1': 'date', 'INSTRUMENTID': 'symbol', 'TIME': 'time_range', 'REFSETTLEMENTPRICE': 'vwap'}
DCE_COLUMNS = ['open', 'high', 'low', 'close', 'pre_settle', 'settle', 'change1','change2','volume','open_interest','oi_chg','turnover']
DCE_OPTION_COLUMNS = ['open', 'high', 'low', 'close', 'pre_settle', 'settle', 'change1', 'change2', 'delta', 'volume', 'open_interest', 'oi_chg', 'turnover', 'exercise_volume']
OUTPUT_COLUMNS = ['symbol', 'date', 'open', 'high', 'low', 'close', 'volume', 'open_interest', 'turnover', 'settle', 'pre_settle', 'variety']
OPTION_OUTPUT_COLUMNS = ['symbol', 'date', 'open', 'high', 'low', 'close', 'pre_settle', 'settle', 'delta', 'volume', 'open_interest', 'oi_chg', 'turnover', 'implied_volatility', 'exercise_volume', 'variety']
DCE_MAP = {
'豆一': 'A',
'豆二': 'B',
'豆粕': 'M',
'豆油': 'Y',
'棕榈油': 'P',
'玉米': 'C',
'玉米淀粉': 'CS',
'鸡蛋': 'JD',
'纤维板': 'FB',
'胶合板': 'BB',
'聚乙烯': 'L',
'聚氯乙烯': 'V',
'聚丙烯': 'PP',
'焦炭': 'J',
'焦煤': 'JM',
'铁矿石': 'I'
}
def convert_date(date):
"""
transform a date string to datetime.date object.
:param day, string, e.g. 2016-01-01, 20160101 or 2016/01/01
:return: object of datetime.date(such as 2016-01-01) or None
"""
if isinstance(date, datetime.date):
return date
elif isinstance(date, str):
match = DATE_PATTERN.match(date)
if match:
groups = match.groups()
if len(groups) == 3:
return datetime.date(year=int(groups[0]), month=int(groups[1]), day=int(groups[2]))
return None
def getJsonPath(name, moduleFile):
"""
获取JSON配置文件的路径从模块所在目录查找:
"""
moduleFolder = os.path.abspath(os.path.dirname(moduleFile))
moduleJsonPath = os.path.join(moduleFolder, '.', name)
return moduleJsonPath
def get_calendar():
"""
获取交易日历至2018年结束
:return : list
"""
settingFileName = 'calendar.json'
settingfilePath = getJsonPath(settingFileName, __file__)
return json.load(open(settingfilePath,"r"))
def lastTradingDay(d):
"""
获取前一个交易日
:param d: '%Y%m%d' or datetime.date()
:return lastday: '%Y%m%d' or datetime.date()
"""
calendar = get_calendar()
if isinstance(d,(str)):
if d not in calendar:
print('Today is not tradingday:' + d)
return False
Pos = calendar.index(d)
lastday = calendar[Pos - 1]
return lastday
elif isinstance(d,(datetime.date)):
d_str = d.strftime('%Y%m%d')
if d_str not in calendar:
print('Today is not workingday:' + d_str)
return False
Pos = calendar.index(d_str)
lastday = calendar[Pos - 1]
lastday = datetime.datetime.strptime(lastday,'%Y%m%d').date()
return lastday
def get_latestDataDate(d):
"""
获取最新的有数据的日期
:param d: datetime.datetime()
:return string
"""
calendar = get_calendar()
if d.strftime('%Y%m%d') in calendar:
if d.time() > datetime.time(17, 0, 0):
return d.strftime('%Y%m%d')
else:
return lastTradingDay(d.strftime('%Y%m%d'))
else:
while d.strftime('%Y%m%d') not in calendar:
d = d - datetime.timedelta(days = 1)
return d.strftime('%Y%m%d')
if __name__ == '__main__':
d = datetime.datetime(2018,10,5,17,1,0)
print(get_latestDataDate(d))
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,081
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_lots.py
|
import json
from pymongo import MongoClient
import pandas as pd
if __name__=='__main__':
# 连接数据库
client = MongoClient('localhost', 27017)
futures = client.futures
data = pd.read_excel(r'c:\lots.xlsx',input_col=0)
df = pd.DataFrame(data)
futures.lots.insert(json.loads(df.T.to_json()).values())
print(json.loads(df.T.to_json()).values())
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,082
|
18505161903/fwshare
|
refs/heads/master
|
/opendatatools/common/string_util.py
|
# encoding: UTF-8
def remove_chinese(str):
s = ""
for w in str:
if w >= u'\u4e00' and w <= u'\u9fa5':
continue
s += w
return s
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,083
|
18505161903/fwshare
|
refs/heads/master
|
/example/test5.py
|
# encoding: utf-8
import pymongo,json
import pandas as pd
from pandas import Series,DataFrame
import future
from scipy.stats import pearsonr
import copy
from datetime import datetime, date, timedelta
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
start = "20190529"
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures
unit = futures.unit
position = futures.position
market = futures.market
positions = futures.positions
market = DataFrame(list(position.find({'date': {'$gte': start}})))
# sort_dict= sorted(market['volume'].items(),key=lambda X:X[1])
# market = market.sort_values( ascending=False, by=['variety','date','open_interest'])
# market = market.groupby(['date','variety']).head(2)
# market = market.groupby(['date','variety'])['close'].head(2)
# market = market.groupby(['date','variety'])["open_interest"].sum()
print(market)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,084
|
18505161903/fwshare
|
refs/heads/master
|
/opendatatools/realestate/__init__.py
|
# encoding: utf-8
from .realestate_interface import *
__all__ = ['get_rsf_list_lianjia', 'set_proxies', 'get_esf_list_by_distinct_lianjia']
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,085
|
18505161903/fwshare
|
refs/heads/master
|
/futures/mainSubmainPosition.py
|
import tushare as ts
import pandas as pd
from pymongo import MongoClient
import fushare,json
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = MongoClient('localhost', 27017)
pro = ts.pro_api('c0cad8f56caba4e70702d606290d04f88514a6bef046f60d13144151')
var=fushare.get_mainSubmain_bar(type = 'var')
var=var['symbol']
print(var)
for vars in var:
try:
df = pro.fut_holding(symbol=vars)
print(df)
df2=df.fillna(0)
df2=df2.loc[df2['broker']!='期货公司会员']
print(df2)
# mainSubmainPosition.insert_many(json.loads(df2.T.to_json()).values())
# print(json.loads(df2.T.to_json()).values())
except:
pass
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,086
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_mainSignal.py
|
# encoding: utf-8
import pandas as pd
from pandas import DataFrame
import json
from pymongo import MongoClient
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = MongoClient('localhost', 27017)
db = client.futures
market=db.market
position=db.position
mainSignal=db.mainSignal
date='20190101'
market=DataFrame(list(market.find({'date':{'$gte':date}})))
position=DataFrame(list(position.find({'date':{'$gte':date}})))
# 选取条件
# market=market.loc[market['open_interest']>1000]
# 以日期和持仓量2个字段分组筛选唯一主力合约
# 方法一:在分组中过滤出Count最大的行
market=market.groupby(['date','variety']).apply(lambda x: x[x.open_interest==x.open_interest.max()])
# print(market.head())
#方法二:用transform获取原dataframe的index,然后过滤出需要的行
# market1 = market.groupby(['date','variety'])['open_interest'].transform(max)
# market=market1=market['open_interest']
# print(market)
# market = market.groupby(['date','variety'])['open_interest'].agg(max)
#去重交易合约
# market=market.drop_duplicates()
# #价格变化量
market['change'] = market['close'] - market['open']
#删除_id,index,date,variety
market.drop(market.columns[[0,2,4,12]], axis=1,inplace=True)
# marketdata = market.copy()
#
# 净持仓变动量
netPosition=position.groupby(['date','variety'])[['long_openIntr','short_openIntr']].sum()
netPosition['净持仓']=netPosition.apply(lambda x:x['long_openIntr']-x['short_openIntr'],axis=1)
netPosition['上一日净持仓']=netPosition.groupby('variety')['净持仓'].shift(1)
netPosition['净持仓变化量']=netPosition.apply(lambda x: x['净持仓']-x['上一日净持仓'],axis=1)
netPosition=netPosition.dropna().reset_index()
# 两表合并
df=pd.merge(netPosition,market,on=['date', 'variety'],how='outer')
df['交易信号'] = df.apply(lambda x: 0 if x['净持仓变化量']*x['change']>=0 else 1 if x['净持仓变化量']>0 else -1,axis=1)
#删除nan并去重
df=df.dropna().drop_duplicates()
df=pd.DataFrame(df)
# print(df.head())
mainSignal.insert(json.loads(df.T.to_json()).values())
print(json.loads(df.T.to_json()).values())
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,087
|
18505161903/fwshare
|
refs/heads/master
|
/futures/差价走势.py
|
# encoding: utf-8
import pandas as pd
import pymongo
from pandas import DataFrame
import matplotlib.pyplot as plt
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures2
market = futures.market3
start = '20150105'
end ='20200811'
var1 = 'JD2001'
var2 = 'JD1910'
market1 = DataFrame(list(market.find({'date': {'$gte': start}, 'symbol': var1}))).drop_duplicates(['date','variety','symbol'], 'last')
market2 = DataFrame(list(market.find({'date': {'$gte': start}, 'symbol': var2}))).drop_duplicates(['date','variety','symbol'], 'last')
# 主力收盘
market1[var1] = market1['close']
# 次主力收盘
market2[var2] = market2['close']
#两表合并
merge = pd.merge(market1,market2, on=['date'], how='left')
merge = merge[['date',var1,var2]]
merge['差价'] = merge.apply(lambda x: x[var1] - x[var2], axis=1)
print(merge.tail(20))
# 画图
merge = merge.set_index('date')
with pd.plotting.plot_params.use('x_compat', True):
merge[['差价']].plot(color='r', title=start +' '+ end +' '+var1+' ' + var2+' 差价: '+str(merge['差价'].iloc[-1]))
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.show()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,088
|
18505161903/fwshare
|
refs/heads/master
|
/example/LSTM/ZcSummary.py
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from sklearn import metrics
from IPython import display
class ZcSummary:
# 从CSV文件中读取数据,返回DataFrame类型的数据集合。
def read_csv(self):
v_dataframe = pd.read_csv("california_housing_train.csv", sep=",")
# 打乱数据集合的顺序。有时候数据文件有可能是根据某种顺序排列的,会影响到我们对数据的处理。
v_dataframe = v_dataframe.reindex(np.random.permutation(v_dataframe.index))
return v_dataframe
# 预处理特征值
def preprocess_features(self, california_housing_dataframe):
selected_features = california_housing_dataframe[
["latitude",
"longitude",
"housing_median_age",
"total_rooms",
"total_bedrooms",
"population",
"households",
"median_income"]
]
processed_features = selected_features.copy()
# 增加一个新属性:人均房屋数量。
processed_features["rooms_per_person"] = (
california_housing_dataframe["total_rooms"] /
california_housing_dataframe["population"])
return processed_features
# 预处理标签
def preprocess_targets(self, california_housing_dataframe):
output_targets = pd.DataFrame()
# 数值过大可能引起训练过程中的错误。因此要把房价数值先缩小成原来的
# 千分之一,然后作为标签值返回。
output_targets["median_house_value"] = (
california_housing_dataframe["median_house_value"] / 1000.0)
return output_targets
# 主函数
def main(self):
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = self.read_csv()
# 对于训练集,我们从共 17000 个样本中选择前 12000 个样本。
training_examples = self.preprocess_features(california_housing_dataframe.head(12000))
training_targets = self.preprocess_targets(california_housing_dataframe.head(12000))
# 对于验证集,我们从共 17000 个样本中选择后 5000 个样本。
validation_examples = self.preprocess_features(california_housing_dataframe.tail(5000))
validation_targets = self.preprocess_targets(california_housing_dataframe.tail(5000))
# 展示数据集的概要情况。
print("Training examples summary:")
display.display(training_examples.describe())
print("Validation examples summary:")
display.display(validation_examples.describe())
print("Training targets summary:")
display.display(training_targets.describe())
print("Validation targets summary:")
display.display(validation_targets.describe())
t = ZcSummary()
t.main()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,089
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_gains.py
|
# encoding: utf-8
import pandas as pd
from pandas import *
import datetime
import json
from pymongo import MongoClient
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = MongoClient('localhost', 27017)
db = client.futures
indexMarket = db.indexMarket
indexMarket = DataFrame(list(indexMarket.find()))
df = indexMarket[['date', 'variety', 'set_high', 'set_low']]
for i in set(indexMarket['variety']):
# # print(i)
try:
df2 = df[df['variety'] == i]
# print(df2)
max = df2.rolling(window=60, on='variety').max().dropna() # ,min_periods=1
# print(max.tail(5))
min = df2.rolling(window=60, on='variety').min().dropna()
# # print(min.tail(5))
hb = pd.merge(max, min, on=['date', 'variety'], how='outer').fillna(method='ffill')
# print(hb)
data=hb[['date','variety','set_high_x','set_low_y']]
# print(data.head(5))
# 涨幅
data['gains'] = round((data['set_high_x'] / data['set_low_y'] - 1) * 100, 2)
# 跌幅
data['lesses'] = round((1 - data['set_low_y'] / data['set_high_x']) * 100, 2)
data['variety']='jd'
print(data)
except:
pass
continue
# print('完成')
# data['variety']='jd'
# data['gains']=round(data.apply(lambda x: (x['set_high_x'] / x['set_low_y']-1)*100, axis=1), 2)
# 跌幅
# data['losses'] = round(data.apply(lambda x: (1 - x['set_low_y'] / x['set_high_x']) * 100, axis=1), 2)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,090
|
18505161903/fwshare
|
refs/heads/master
|
/example/stock_demo.py
|
# encoding: utf-8
from opendatatools import stock
if __name__ == '__main__':
# 获取指数列表,market=SH/SZ/CSI
#index_list = stock.get_index_list('CSI')
#print(index_list)
# 获取指数成份股数据
#stock_list = stock.get_index_component('000050.SH')
#print(stock_list)
# 获取指数成份股数据
#stock_list = stock.get_index_component('399300.SZ')
#print(stock_list)
# 获取指数成份股数据
#stock_list = stock.get_index_component('000300.CSI')
#print(stock_list)
# 获取融资融券市场信息
#df_total, df_detail = stock.get_rzrq_info(market = 'SH', date = '20040529')
#print(df_total)
#print(df_detail)
# 获取分红信息
df = stock.get_dividend('600000.SH')
print(df)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,091
|
18505161903/fwshare
|
refs/heads/master
|
/opendatatools/stock/__init__.py
|
# encoding: utf-8
from .stock_interface import *
__all__ = ['get_index_list', 'get_index_component', 'get_rzrq_info', 'get_dividend']
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,092
|
18505161903/fwshare
|
refs/heads/master
|
/example/test_jiqixuexi.py
|
# import matplotlib.pyplot as plt
# from sklearn import linear_model
# import numpy as np
import pandas as pd
# datasets_X = []
# datasets_Y = []
# fr = open('e:\signal.csv','r')
# lines = fr.readlines()
# # for line in lines:
# # # items = line.strip().split(',')
# # datasets_X.append(int(items[0]))
# # datasets_Y.append(int(items[1]))
# length = len(datasets_X)
# datasets_X = np.arry(datasets_X).reshape([length,1])
# datasets_Y = np.arry(datasets_Y)
# minX = min(datasets_X)
# maxX = max(datasets_X)
# X = np.arange(minX,maxX).reshape([-1,1])
#
# linear = linear_model.LinearRegression()
# linear.fit(datasets_X,datasets_Y)
# plt.scatter(datasets_X,datasets_Y,color = 'red')
# plt.plot(X.linear.predict(X),color = 'blue')
# plt.ylabel('Price')
# plt.show()
df = pd.read_csv(r'e:\signal.csv',encoding = "ANSI",parse_dates=['date'], index_col='date')
# df['date'] = pd.to_datetime(df['date'])
print("s")
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,093
|
18505161903/fwshare
|
refs/heads/master
|
/futures/数据库去重.py
|
# def delete_single_database_repeat_data():
# import pymongo
# client = pymongo.MongoClient('localhost', 27017)
# db=client.futures2 #这里是将要清洗数据的数据库名字
# for table in db.collection_names():
# print 'table name is ',table
# collection=db[table]
# for url in collection.distinct('gif_url'):#使用distinct方法,获取每一个独特的元素列表
# num= collection.count({"gif_url":url})#统计每一个元素的数量
# print num
# for i in range(1,num):#根据每一个元素的数量进行删除操作,当前元素只有一个就不再删除
# print 'delete %s %d times '% (url,i)
# #注意后面的参数, 很奇怪,在mongo命令行下,它为1时,是删除一个元素,这里却是为0时删除一个
# collection.remove({"gif_url":url},0)
# for i in collection.find({"gif_url":url}):#打印当前所有元素
# print(i)
# encoding: utf-8
import pymongo, json
import pandas as pd
from pandas import DataFrame
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures2
p=futures.position
m=futures.market
unit=futures.unit
unit=DataFrame(list(unit.find()))
start='20200213'
end='20200213'
mem='一德期货'
var='I'
position = futures.position
position = DataFrame(list(position.find({'date': '20200213','variety': var,'short_party_name':mem})))
print(position)
# for var in unit['variety']:
# try:
# position = futures.position
# position = DataFrame(list(position.find({'variety':var})))
# # position = DataFrame(list(position.find()))
# position=position[['symbol','vol_party_name','vol','vol_chg','long_party_name','long_openIntr','long_openIntr_chg','short_party_name','short_openIntr','short_openIntr_chg','variety','date']]
# position.sort_values('date', inplace=False)
# position=position.drop_duplicates()
# p.insert_many(json.loads(position.T.to_json()).values())
# print(json.loads(position.T.to_json()).values())
# # print(position.head())
# except:
# print('数据异常')
# continue
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,094
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_dataframe_Mongodb.py
|
# encoding: utf-8
import pandas as pd
import tushare as ts
import datetime
import json
from pymongo import MongoClient
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
if __name__ == '__main__':
# 连接数据库
client = MongoClient('localhost', 27017)
db = client.futures
marketdata = db.marketdata
# 填写日期
begin = datetime.date(2018, 9, 22)
end = datetime.date(2018, 10, 21)
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y-%m-%d')
# 四大交易所行情
dce = ts.get_dce_daily(days)
shf = ts.get_shfe_daily(days)
zce = ts.get_czce_daily(days)
cff = ts.get_cffex_daily(days)
frames = [dce, shf, zce, cff]
try:
# 合并四大交易所行情表
df2 = pd.concat(frames)
# df2 = df2.convert_objects(convert_numeric=True)
df2 = df2.apply(pd.to_numeric, errors="ignore")
df2 = df2.reset_index()
print(df2)
marketdata.insert(json.loads(df2.T.to_json()).values())
# print(json.loads(df2.T.to_json()).values())
except:
print(days, frames, '数据异常')
continue
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,095
|
18505161903/fwshare
|
refs/heads/master
|
/example/futures_price.py
|
# encoding: utf-8
from opendatatools import futures
import datetime
import os
import pandas as pd
import math
import tushare as ts
import datetime
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
if __name__ == '__main__':
if os.path.exists(r"c:\price.csv"):
os.remove(r"c:\price.csv")
begin = datetime.date(2018, 10, 25)
end = datetime.date(2018, 10, 25)
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y-%m-%d')
dce = ts.get_dce_daily(days)
shf = ts.get_shfe_daily(days)
zce = ts.get_czce_daily(days)
cff = ts.get_cffex_daily(days)
frames = [dce, shf, zce, cff]
try:
df2 = pd.concat(frames)
#print(df2)
df2 = df2.dropna(axis=0, how='any')
df2['close'] = df2['close'].astype(float)
df2['test'] = df2['close'] * df2['volume']
print(df2.head())
df2 = df2.groupby('variety')['volume', 'test'].sum()
df2['set_close'] = round(df2['test'] / df2['volume'])
df2['date'] = days
df2 = df2.dropna(axis=0, how='any')
df2 = df2.reset_index()
df2 = df2[['date', 'variety', 'set_close']]
except:
print(days, frames, '数据异常')
continue
if os.path.exists(r"c:\price.csv"):
df2.to_csv(r"c:\price.csv", mode='a', encoding='ANSI', header=False)
else:
df2.to_csv(r"c:\price.csv", encoding='ANSI')
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,096
|
18505161903/fwshare
|
refs/heads/master
|
/tbpy/test.py
|
import pandas as pd
import datetime
from fushare.dailyBar import get_future_daily
from futures.CfdBasis import get_mainSubmainMarket
from futures.CfdBasis import get_mainSubmainMarket_bar
begin = datetime.date(2020, 10, 20)
end = datetime.date(2020, 10, 20)
df = pd.DataFrame()
for i in range((end - begin).days + 1):
# print(i)
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y%m%d')
for market in ['dce', 'cffex', 'shfe', 'czce']:
df = df.append(get_future_daily(start=begin, end=end, market=market))
varList = list(set(df['variety']))
for var in varList:
if var:
ry = get_mainSubmainMarket(days, var)
print(ry)
# if ry:
# dfL = dfL.append(pd.DataFrame([ry], index=[var], columns=['差价', 'basicPrice(%)', 'symbol1', 'symbol2', 'M-differ', 'Slope(%)']))
# dfL['date'] = days
# # print(dfL)
# symbolList = list(set(dfL['symbol2']))#远月合约
# for symbol in symbolList:
# df=df[['date', 'variety', 'symbol', 'close']]
# if symbol:
# df1= df[df['symbol'] == symbol]
# # dfl=df.append(df1)
# print(df1)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,097
|
18505161903/fwshare
|
refs/heads/master
|
/example/market_tushare.py
|
# encoding: utf-8
import pandas as pd
import datetime
import json
from pymongo import MongoClient
import fushare
fushare.get_czce_daily()
print(tushare.__version__)
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
if __name__ == '__main__':
# 连接数据库
client = MongoClient('localhost', 27017)
db = client.futures
market = db.market
# 填写日期
begin = datetime.date(2018,4, 8)
end = datetime.date(2019, 4, 8)
for i in range((end - begin).days + 1):
day = begin + datetime.timedelta(days=i)
days = day.strftime('%Y%m%d')
# 四大交易所行情
dce = fushare.get_dce_daily(days)
shf = fushare.get_shfe_daily(days)
zce = fushare.get_czce_daily(days)
cff = fushare.get_cffex_daily(days)
frames = [dce,shf,zce,cff]
try:
# 合并四大交易所行情表
df2 = pd.concat(frames)
# 计算品种指数收盘价
df2 = df2.dropna(axis=0, how='any')
# df2 = df2.convert_objects(convert_numeric=True)
df2 = df2.dropna(axis=0, how='any')
df2 = df2.reset_index()
# print(df2)
market.insert(json.loads(df2.T.to_json()).values())
print(json.loads(df2.T.to_json()).values())
except:
print(days, '数据异常')
continue
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,098
|
18505161903/fwshare
|
refs/heads/master
|
/opendatatools/common/__init__.py
|
# encoding: UTF-8
from .rest_agent import *
from .date_util import *
from .string_util import *
__all__ = ['RestAgent']
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,099
|
18505161903/fwshare
|
refs/heads/master
|
/example/test1.py
|
# encoding: utf-8
import pymongo,json
import pandas as pd
from pandas import Series,DataFrame
import csv
from scipy.stats import pearsonr
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import time,datetime
# from datetime import datetime
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures
lots = futures.lots
signal = futures.indexMarket
unit = futures.unit
market = DataFrame(list(signal.find({'date': {'$gte': '20190601'}})))
market=market.duplicated()
# print(market)
# # 查询数据
mydict = {"date": "20190801"}
x = signal.find_one(mydict)
print(x)
# # 添加数据
# mydict = {"variety": "AAAA","lots":5}
# lots.insert_one(mydict)
# # 删除数据
# myquery = {"variety": "IF"}
# unit.delete_one(myquery)
# myquery = {"variety": "IF"}
# unit.delete_one(myquery)
# 修改数据
# myquery = {"variety": "IF","lots":5}
# newvalues = {"$set": {"variety": "IF","lots":1}}
# lots.update_one(myquery, newvalues)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,100
|
18505161903/fwshare
|
refs/heads/master
|
/futures/差价图.py
|
# encoding: utf-8
import pandas as pd
import pymongo
from pandas import DataFrame
import matplotlib.pyplot as plt
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = pymongo.MongoClient('localhost', 27017)
futures = client.futures2
market = futures.market2
start = '20190101'
end ='20200306'
var1 = 'OI'
var2 = 'Y'
print(var1)
market1 = DataFrame(list(market.find({'date': {'$gte': start}, 'variety': var1}))).drop_duplicates(['date','variety','symbol'], 'last')
market2 = DataFrame(list(market.find({'date': {'$gte': start}, 'variety': var2}))).drop_duplicates(['date','variety','symbol'], 'last')
print(market1.head())
# 指数收盘
market1['cv'] = market1.apply(lambda x: x['close'] * x['open_interest'], axis=1)
market1 = market1.groupby(['date', 'variety'])[['cv', 'open_interest']].sum()
market1[var1] = market1['cv'] / market1['open_interest']
market1 = market1[var1].reset_index()
# 指数收盘
market2['cv'] = market2.apply(lambda x: x['close'] * x['open_interest'], axis=1)
market2 = market2.groupby(['date', 'variety'])[['cv', 'open_interest']].sum()
market2[var2] = market2['cv'] / market2['open_interest']
market2 = market2[var2].reset_index()
#两表合并
merge = pd.merge(market1,market2, on=['date'], how='left')
merge = merge[['date',var1,var2]]
merge['差价'] = merge.apply(lambda x: x[var1] - x[var2], axis=1)
print(merge.tail(20))
# 画图
merge = merge.set_index('date')
with pd.plotting.plot_params.use('x_compat', True): # 方法一
merge[['差价']].plot(color='r', title=start +' '+ end +' '+var1+var2+' 价差:'+str(int(merge['差价'].iloc[-1])))
# mergee['today_net'].plot(secondary_y=['today_net'])
# mergee.ylabel('净持仓')
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.show()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,101
|
18505161903/fwshare
|
refs/heads/master
|
/opendatatools/hkex/hkex_interface.py
|
# encoding: UTF-8
import datetime
from .hkex_agent import HKExAgent
hkex_agent = HKExAgent()
def set_proxies(proxies):
hkex_agent.set_proxies(proxies)
def get_lgt_share(market = 'SH', date = None):
if date is None:
i = 0
while i < 5:
pub_time = datetime.datetime.now() - datetime.timedelta(days=i)
date = datetime.datetime.strftime(pub_time, "%Y-%m-%d")
df = hkex_agent.get_lgt_share(market, date)
if len(df) > 0:
return df
i = i + 1
return None
else:
return hkex_agent.get_lgt_share(market, date)
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
30,402,102
|
18505161903/fwshare
|
refs/heads/master
|
/futures/净持仓.py
|
# encoding: utf-8
import pandas as pd
from pandas import *
import numpy as np
from pymongo import MongoClient
import matplotlib.pyplot as plt
pd.set_option('display.width', None) # 设置字符显示宽度
pd.set_option('display.max_rows', None) # 设置显示最大行
pd.set_option('display.max_columns', None) # 设置显示最大行
client = MongoClient('localhost', 27017)
db = client.futures3
jd= db.jd
start='20191107'
broker2=['宏源期货','方正中期','英大期货','美尔雅期货','格林大华']
jd = DataFrame(list(jd.find({'trade_date': {'$gte': start}})))
# print(jd)
# jd=jd.loc[jd['broker']!='期货公司会员']
jd['净持仓']=jd.apply(lambda x: x['long_hld'] - x['short_hld'], axis=1)
jd=jd[jd['净持仓']<0]
# print(jd)
sums =jd.groupby(['trade_date', 'symbol'])['净持仓'].sum().reset_index(name='净空汇总')
df=pd.DataFrame()
for i in broker2:
try:
brokers = jd[jd['broker'] == i]
df2=pd.DataFrame(brokers)
df = df.append(df2)
except:
pass
sums2 = df.groupby(['trade_date', 'symbol'])['净持仓'].sum().reset_index(name='五少净空')
merge = pd.merge(sums2, sums, on=['trade_date', 'symbol'], how='outer').fillna(0)
merge['五少占比']=merge.apply(lambda x: x['五少净空']/x['净空汇总'], axis=1)
merge=merge[merge['symbol']=='JD2005']
print(merge)
# 画图
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
a= pd.DataFrame({'五少占比':np.array(merge['五少占比']),'五少净空':np.array(merge['五少净空'])},index=merge['trade_date'])
ax = a.plot(
secondary_y=['五少净空'],
x_compat=True,
grid=True)
ax.set_title("五少占比-净空")
ax.set_ylabel('占比')
ax.grid(linestyle="--", alpha=0.3)
ax.right_ax.set_ylabel('净空')
plt.show()
|
{"/opendatatools/realestate/__init__.py": ["/opendatatools/realestate/realestate_interface.py"], "/opendatatools/stock/__init__.py": ["/opendatatools/stock/stock_interface.py"], "/opendatatools/common/__init__.py": ["/opendatatools/common/string_util.py"], "/opendatatools/stock/stock_agent.py": ["/opendatatools/common/__init__.py"], "/opendatatools/hkex/__init__.py": ["/opendatatools/hkex/hkex_interface.py"], "/opendatatools/stock/stock_interface.py": ["/opendatatools/stock/stock_agent.py", "/opendatatools/common/__init__.py"], "/opendatatools/fx/fx_interface.py": ["/opendatatools/common/__init__.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.