index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
75,288 | GreatGodApollo/bashbot | refs/heads/master | /cogs/chance.py | import discord
from discord.ext import commands
from permissions import admincheck
class Chance:
"""Chance Commands"""
def __init__(self, bot):
self.bot = bot
@commands.check(admincheck)
@commands.command()
async def chance(self):
"""Have a chance to get a punishment"""
await self.bot.say("Not yet implemented")
def setup(bot):
bot.add_cog(Chance(bot))
| {"/cogs/hidden.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/administration.py": ["/permissions.py"], "/cogs/moderation.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/chance.py": ["/permissions.py"], "/bot.py": ["/utils/db_declarative.py", "/utils/mutes.py"], "/cogs/apt.py": ["/permissions.py"], "/cogs/misc.py": ["/bot.py"], "/cogs/config.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/cogs/owner.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/permissions.py": ["/bot.py", "/utils/db_declarative.py"], "/utils/mutes.py": ["/bot.py", "/utils/db_declarative.py"]} |
75,289 | GreatGodApollo/bashbot | refs/heads/master | /cogs/random.py | import discord
from discord.ext import commands
import random
class Random:
"""Random Cog"""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def roll(self, ctx, sides: int = 6):
"""Roll a die"""
if sides >= 2:
await self.bot.say(f"You rolled a {sides} sided die.\n> {random.randint(1, sides)}")
elif sides == 1:
await self.bot.say("Why would you want to roll a 1 sided die?")
else:
await self.bot.say(f"A number of sides greater than 2 must be specified. You sent:\n>{sides}")
@commands.command(pass_context=True)
async def choose(self, ctx, *choices):
"""Get a random choice"""
if len(choices) >= 2:
choice = choices[random.randint(0, len(choices) - 1)]
await self.bot.say(f"I choose\n> {choice}")
else:
await self.bot.say(":x: At least 2 options must be provided :x:")
def setup(bot):
bot.add_cog(Random(bot))
| {"/cogs/hidden.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/administration.py": ["/permissions.py"], "/cogs/moderation.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/chance.py": ["/permissions.py"], "/bot.py": ["/utils/db_declarative.py", "/utils/mutes.py"], "/cogs/apt.py": ["/permissions.py"], "/cogs/misc.py": ["/bot.py"], "/cogs/config.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/cogs/owner.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/permissions.py": ["/bot.py", "/utils/db_declarative.py"], "/utils/mutes.py": ["/bot.py", "/utils/db_declarative.py"]} |
75,290 | GreatGodApollo/bashbot | refs/heads/master | /bot.py | import asyncio
import logging
import sys
import time
import traceback
import discord
from discord.ext import commands
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from config import Config
from utils.db_declarative import ServerConfig, Base
from utils.mutes import *
logging.basicConfig(level=logging.ERROR)
# this specifies what extensions to load when the bot starts up
startup_extensions = Config.cogs
bot = commands.Bot(command_prefix=Config.prefixes, description=Config.description)
start_time = time.time()
version = "0.11.0"
dbengine = create_engine(Config.dburl,
pool_pre_ping=True)
Base.metadata.bind = dbengine
DBSession = sessionmaker(bind=dbengine)
session = DBSession()
starttime = None
@bot.event
async def on_ready():
global start
global starttime
print("------------")
print("Bash bot v{}".format(version))
print("------------")
print("Logged in as")
print(bot.user)
print(bot.user.id)
print("Invite me using https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=8".format(bot.user.id))
print('READY')
await bot.change_presence(game=discord.Game(name="for {}help | v{}".format(Config.prefixes[0], version), type=3),
status="dnd")
@bot.event
async def on_command_error(event, ctx):
if isinstance(event, commands.CheckFailure):
if "ownercheck" in str(ctx.command.checks):
await self.bot.send_message(ctx.message.channel, "You are not root")
return
elif "guildonly" in str(ctx.command.checks):
await self.bot.send_message(ctx.message.channel, ":x: Sorry, this command can only be used in servers :x:")
return
await self.bot.send_message(ctx.message.channel, ":no_entry: Access to this command is restricted.")
return
if isinstance(event, commands.MissingRequiredArgument):
await send_cmd_help(ctx)
return
if isinstance(event, commands.CommandNotFound):
pass
if isinstance(event, commands.errors.BadArgument):
await send_cmd_help(ctx)
return
else:
print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)
traceback.print_exception(type(event), event, event.__traceback__, file=sys.stderr)
@bot.event
async def on_server_join(server):
new_server = ServerConfig(serverId=server.id, adminRole="Administrator", modRole="Moderator")
session.add(new_server)
session.commit()
@bot.event
async def on_server_remove(server):
session.query(ServerConfig).filter(ServerConfig.serverId == server.id).delete()
session.commit()
async def send_cmd_help(ctx):
if ctx.invoked_subcommand:
pages = bot.formatter.format_help_for(ctx, ctx.invoked_subcommand)
for page in pages:
await self.bot.send_message(ctx.message.channel, page)
else:
pages = bot.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await self.bot.send_message(ctx.message.channel, page)
if __name__ == "__main__":
for extension in startup_extensions:
try:
bot.load_extension(extension)
except Exception as e:
exc = '{}: {}'.format(type(e).__name__, e)
print('Failed to install package {}\n{}'.format(extension, exc))
bot.run(Config.token)
| {"/cogs/hidden.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/administration.py": ["/permissions.py"], "/cogs/moderation.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/chance.py": ["/permissions.py"], "/bot.py": ["/utils/db_declarative.py", "/utils/mutes.py"], "/cogs/apt.py": ["/permissions.py"], "/cogs/misc.py": ["/bot.py"], "/cogs/config.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/cogs/owner.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/permissions.py": ["/bot.py", "/utils/db_declarative.py"], "/utils/mutes.py": ["/bot.py", "/utils/db_declarative.py"]} |
75,291 | GreatGodApollo/bashbot | refs/heads/master | /cogs/apt.py | import discord
from discord.ext import commands
from permissions import ownercheck
class Apt:
"""'Package Manager'"""
def __init__(self, bot):
self.bot = bot
@commands.group(pass_context=True, aliases=["yum"])
async def apt(self, ctx):
"""Your average package manager.\nThis apt has Super Cow Powers"""
if ownercheck(ctx) and ctx.invoked_subcommand is None:
await self.bot.say("```\nUsage: apt/yum command package\n```")
elif not ownercheck(ctx) and ctx.invoked_subcommand is None:
await self.bot.say("You are not root")
@commands.check(ownercheck)
@apt.command()
async def install(self, extension_name: str):
"""Installs a package."""
msg = await self.bot.say("The package {} is being installed.".format(extension_name))
try:
self.bot.load_extension("cogs.{}".format(extension_name))
except Exception as e:
await self.bot.edit_message(msg, "We ran into an issue installing {}.".format(extension_name))
await self.bot.say("```py\n{}: {}\n```".format(type(e).__name__, str(e)))
return
await self.bot.edit_message(msg, "The package {} was successfully installed.".format(extension_name))
@commands.check(ownercheck)
@apt.command()
async def remove(self, extension_name: str):
"""Removes a package."""
msg = await self.bot.say("The package {} is being removed.".format(extension_name))
try:
self.bot.unload_extension("cogs.{}".format(extension_name))
except:
await self.bot.say("We ran into an issue removing {}.".format(extension_name))
await self.bot.edit_message(msg, "The package {} was successfully removed.".format(extension_name))
@commands.check(ownercheck)
@apt.command()
async def upgrade(self, extension_name: str):
"""Upgrades a package."""
msg = await self.bot.say("The package {} is being upgraded.".format(extension_name))
try:
self.bot.unload_extension("cogs.{}".format(extension_name))
except (AttributeError, ImportError) as error:
await self.bot.say("```py\n{}: {}\n```".format(type(error).__name__, str(error)))
return
try:
self.bot.load_extension("cogs.{}".format(extension_name))
except (AttributeError, ImportError) as e:
await self.bot.say("```py\n{}: {}\n```".format(type(e).__name__, str(e)))
return
await self.bot.edit_message(msg, new_content="The package {} was successfully upgraded.".format(extension_name))
def setup(bot):
bot.add_cog(Apt(bot)) | {"/cogs/hidden.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/administration.py": ["/permissions.py"], "/cogs/moderation.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/chance.py": ["/permissions.py"], "/bot.py": ["/utils/db_declarative.py", "/utils/mutes.py"], "/cogs/apt.py": ["/permissions.py"], "/cogs/misc.py": ["/bot.py"], "/cogs/config.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/cogs/owner.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/permissions.py": ["/bot.py", "/utils/db_declarative.py"], "/utils/mutes.py": ["/bot.py", "/utils/db_declarative.py"]} |
75,292 | GreatGodApollo/bashbot | refs/heads/master | /cogs/misc.py | import datetime
import time
import discord
from discord.ext import commands
import platform
import pkg_resources
from bot import start_time, version
class Misc:
"""Miscellaneous commands"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def ping(self):
"""Simple Ping with Response Time"""
em = discord.Embed(description="Ping?", color=discord.Color.blue())
t1 = time.perf_counter()
msg = await self.bot.say(embed=em)
emb = discord.Embed(title="Pong!", color=discord.Color.blue())
t2 = time.perf_counter()
data = (str(round((t2 - t1) * 1000)) + "ms")
emb.add_field(name="Response Time", value=data)
await self.bot.edit_message(msg, embed=emb)
@commands.command(aliases=["stats"])
async def info(self):
"""Get some info about the bot"""
current_time = time.time()
difference = int(round(current_time - start_time))
text = str(datetime.timedelta(seconds=difference))
embed = discord.Embed(title="Bash — Bot", description="Information", color=0x4DA825)
embed.add_field(name="Author", value="apollo#9292", inline=True)
embed.add_field(name="Bot Version", value=version, inline=True)
embed.add_field(name="Python Version", value=f"{platform.python_version()}", inline=True)
embed.add_field(name="Discord.py Version", value=f"{pkg_resources.get_distribution('discord.py').version}", inline=True)
embed.add_field(name="Hex Color", value="#4DA825", inline=True)
embed.add_field(name="Uptime", value=text, inline=True)
embed.add_field(name="Server Count", value=f"{len(self.bot.servers)} servers", inline=True)
embed.add_field(name="User Count", value=f"{len(set(self.bot.get_all_members()))} users", inline=True)
await self.bot.say(embed=embed)
@commands.command()
async def invite(self):
"""Get an invitation link!"""
await self.bot.say(f"> Invite me using <https://discordapp.com/oauth2/authorize?client_id={self.bot.user.id}&scope=bot&permissions=8>")
def setup(bot):
bot.add_cog(Misc(bot))
| {"/cogs/hidden.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/administration.py": ["/permissions.py"], "/cogs/moderation.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/chance.py": ["/permissions.py"], "/bot.py": ["/utils/db_declarative.py", "/utils/mutes.py"], "/cogs/apt.py": ["/permissions.py"], "/cogs/misc.py": ["/bot.py"], "/cogs/config.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/cogs/owner.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/permissions.py": ["/bot.py", "/utils/db_declarative.py"], "/utils/mutes.py": ["/bot.py", "/utils/db_declarative.py"]} |
75,293 | GreatGodApollo/bashbot | refs/heads/master | /cogs/config.py | from discord.ext import commands
from bot import session
from permissions import admincheck, guildonly
from utils.db_declarative import ServerConfig
class Config:
"""Configuration Cog"""
def __init__(self, bot):
self.bot = bot
async def send_cmd_help(self, ctx):
if ctx.invoked_subcommand:
pages = self.bot.formatter.format_help_for(ctx, ctx.invoked_subcommand)
for page in pages:
await self.bot.send_message(ctx.message.channel, page)
else:
pages = self.bot.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await self.bot.send_message(ctx.message.channel, page)
@commands.check(guildonly)
@commands.check(admincheck)
@commands.group(pass_context=True)
async def config(self, ctx):
"""The base config command"""
if ctx.invoked_subcommand is None:
await self.send_cmd_help(ctx)
else:
pass
@commands.check(guildonly)
@commands.check(admincheck)
@config.command(pass_context=True)
async def set(self, ctx, option: str, *, value: str):
"""Sets a Configuration Value"""
serverconf = session.query(ServerConfig).filter(ServerConfig.serverId == ctx.message.server.id).one()
opt = option.lower()
if opt == "adminrole":
serverconf.adminRole = value
elif opt == "modrole":
serverconf.modRole = value
else:
await self.bot.say("Invalid option `{}`".format(option))
return
session.commit()
await self.bot.say("Configuration Updated")
@commands.check(guildonly)
@commands.check(admincheck)
@config.command(pass_context=True)
async def get(self, ctx, option: str):
"""Gets a configuration value"""
serverconf = session.query(ServerConfig).filter(ServerConfig.serverId == ctx.message.server.id).one()
opt = option.lower()
if opt == "adminrole":
await self.bot.say("The current value for `adminRole` is {}.".format(serverconf.adminRole))
elif opt == "modrole":
await self.bot.say("The current value for `modRole` is {}.".format(serverconf.modRole))
else:
await self.bot.say("Invalid option `{}`".format(option))
@commands.check(guildonly)
@commands.check(admincheck)
@config.command(pass_context=True)
async def list(self, ctx):
"""List all configuration values"""
serverconf = session.query(ServerConfig).filter(ServerConfig.serverId == ctx.message.server.id).one()
servername = ctx.message.server.name
serverid = ctx.message.server.id
adminrole = serverconf.adminRole
modrole = serverconf.modRole
await self.bot.say("```\n"
"Listing all configuration values for '{0}' ({1})\n\n"
"modRole: {2}\n"
"adminRole: {3}\n"
"```".format(servername, serverid, modrole, adminrole))
def setup(bot):
bot.add_cog(Config(bot))
| {"/cogs/hidden.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/administration.py": ["/permissions.py"], "/cogs/moderation.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/chance.py": ["/permissions.py"], "/bot.py": ["/utils/db_declarative.py", "/utils/mutes.py"], "/cogs/apt.py": ["/permissions.py"], "/cogs/misc.py": ["/bot.py"], "/cogs/config.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/cogs/owner.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/permissions.py": ["/bot.py", "/utils/db_declarative.py"], "/utils/mutes.py": ["/bot.py", "/utils/db_declarative.py"]} |
75,294 | GreatGodApollo/bashbot | refs/heads/master | /cogs/owner.py | import string
import discord
import asyncio
from bot import session
from permissions import ownercheck
from discord.ext import commands
from utils.db_declarative import ServerConfig
class Owner:
"""Owner commands"""
def __init__(self, bot):
self.bot = bot
@commands.group(pass_context=True, aliases=["o"])
async def owner(self, ctx):
"""The base owner command"""
if ownercheck(ctx) and ctx.invoked_subcommand is None:
await self.bot.say("You are root")
elif not ownercheck(ctx) and ctx.invoked_subcommand is None:
await self.bot.say("You are not root")
@commands.check(ownercheck)
@owner.command(pass_context=True, hidden=True)
async def testing(self, ctx):
await self.bot.say("Just a generic testing command")
@commands.check(ownercheck)
@owner.command(pass_context=True, aliases=["eval"])
async def debug(self, ctx, *, code):
"""Evaluate code"""
global_vars = globals().copy()
global_vars['bot'] = self.bot
global_vars['ctx'] = ctx
global_vars['message'] = ctx.message
global_vars['author'] = ctx.message.author
global_vars['channel'] = ctx.message.channel
global_vars['server'] = ctx.message.server
global_vars['session'] = session
try:
result = eval(code, global_vars, locals())
if asyncio.iscoroutine(result):
result = await result
result = str(result) # the eval output was modified by me but originally submitted by DJ electro
if len(result) > 2000:
err2 = Exception("TooManyChars")
raise err2
embed = discord.Embed(title="✅ Evaluated successfully.", color=0x80ff80)
embed.add_field(name="Input :inbox_tray:", value="```" + code + "```")
embed.add_field(name="Output :outbox_tray:", value="```" + result + "```")
await self.bot.say(embed=embed)
except Exception as error:
if str(type(error).__name__ + str(error)) == "HTTPException: BAD REQUEST (status code: 400)":
return
else:
embed = discord.Embed(title="❌ Evaluation failed.", color=0xff0000)
embed.add_field(name="Input :inbox_tray:", value="```" + code + "```", inline=True)
embed.add_field(name="Error <:error2:442590069082161163>",
value='```{}: {}```'.format(type(error).__name__, str(error)))
await self.bot.say(embed=embed)
return
@commands.check(ownercheck)
@owner.command(pass_context=True)
async def importconfig(self, ctx, modrole: str, adminrole: str):
"""Import servers to the DB"""
msg = await self.bot.say("Importing servers to the DB")
for r in self.bot.servers:
new_server = ServerConfig(serverId=r.id, adminRole=adminrole, modRole=modrole)
session.add(new_server)
session.commit()
await self.bot.edit_message(msg, "Finished importing servers to the DB")
@commands.check(ownercheck)
@owner.command(pass_context=True, aliases=["incmn"])
async def incrementalmassnick(self, ctx, start_at: int, *, name):
"""Incremental MassNick"""
await self.bot.say(f"Incrementally renaming users to {name}{start_at}")
x = start_at
for user in ctx.message.server.members:
try:
await self.bot.change_nickname(user, f"{name}{x}")
x += 1
except:
continue
await self.bot.say("Finished Incremental MassNick")
@commands.check(ownercheck)
@owner.command(pass_context=True, aliases=["mn"])
async def massnick(self, ctx, *, name=""):
"""MassNick"""
if name is "":
msgToSend = "Un-nicknaming users"
else:
msgToSend = f"Renaming users to {name}"
msg = await self.bot.say(msgToSend)
for user in ctx.message.server.members:
try:
await self.bot.change_nickname(user, f"{name}")
except:
continue
await self.bot.edit_message(msg, "Finished MassNick")
async def send_cmd_help(self, ctx):
if ctx.invoked_subcommand:
pages = self.bot.formatter.format_help_for(ctx, ctx.invoked_subcommand)
for page in pages:
await self.bot.send_message(ctx.message.channel, page)
else:
pages = self.bot.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await self.bot.send_message(ctx.message.channel, page)
def setup(bot):
bot.add_cog(Owner(bot))
| {"/cogs/hidden.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/administration.py": ["/permissions.py"], "/cogs/moderation.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/chance.py": ["/permissions.py"], "/bot.py": ["/utils/db_declarative.py", "/utils/mutes.py"], "/cogs/apt.py": ["/permissions.py"], "/cogs/misc.py": ["/bot.py"], "/cogs/config.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/cogs/owner.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/permissions.py": ["/bot.py", "/utils/db_declarative.py"], "/utils/mutes.py": ["/bot.py", "/utils/db_declarative.py"]} |
75,295 | GreatGodApollo/bashbot | refs/heads/master | /create_db.py | from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from config import Config
Base = declarative_base()
class ServerConfig(Base):
__tablename__ = 'servers'
id = Column(Integer, primary_key=True, nullable=False)
serverId = Column(String(18), nullable=False)
adminRole = Column(String(100), nullable=False)
modRole = Column(String(100), nullable=False)
class ServerMutes(Base):
__tablename__ = 'mutes'
id = Column(Integer, primary_key=True, nullable=False)
serverId = Column(String(18), nullable=False)
userId = Column(String(18), nullable=False)
dbengine = create_engine(Config.dburl,
pool_pre_ping=True)
Base.metadata.create_all(dbengine)
| {"/cogs/hidden.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/administration.py": ["/permissions.py"], "/cogs/moderation.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/chance.py": ["/permissions.py"], "/bot.py": ["/utils/db_declarative.py", "/utils/mutes.py"], "/cogs/apt.py": ["/permissions.py"], "/cogs/misc.py": ["/bot.py"], "/cogs/config.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/cogs/owner.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/permissions.py": ["/bot.py", "/utils/db_declarative.py"], "/utils/mutes.py": ["/bot.py", "/utils/db_declarative.py"]} |
75,296 | GreatGodApollo/bashbot | refs/heads/master | /permissions.py | import discord
from bot import session
from config import Config
from utils.db_declarative import ServerConfig
def ownercheck(ctx):
return ctx.message.author.id in Config.owners
def serverownercheck(ctx):
return ctx.message.author == ctx.message.server.owner
def admincheck(ctx):
try:
roles = ctx.message.server.roles
try:
serverconf = session.query(ServerConfig).filter(ServerConfig.serverId == ctx.message.server.id).one()
rolename = serverconf.adminRole
if rolename:
role = discord.utils.get(roles, name=rolename)
return role in ctx.message.author.roles or serverownercheck(ctx) or ownercheck(ctx)
except Exception as e:
print(e)
return ownercheck(ctx)
except Exception as e:
return ownercheck(ctx)
def modcheck(ctx):
try:
roles = ctx.message.server.roles
try:
serverconf = session.query(ServerConfig).filter(ServerConfig.serverId == ctx.message.server.id).one()
rolename = serverconf.modRole
if rolename:
role = discord.utils.get(roles, name=rolename)
return role in ctx.message.author.roles or admincheck(ctx)
except Exception as e:
print(e)
return ownercheck(ctx)
except Exception as e:
return ownercheck(ctx)
def guildonly(ctx):
if isinstance(ctx.message.channel, discord.PrivateChannel):
return False
else:
return True
| {"/cogs/hidden.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/administration.py": ["/permissions.py"], "/cogs/moderation.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/chance.py": ["/permissions.py"], "/bot.py": ["/utils/db_declarative.py", "/utils/mutes.py"], "/cogs/apt.py": ["/permissions.py"], "/cogs/misc.py": ["/bot.py"], "/cogs/config.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/cogs/owner.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/permissions.py": ["/bot.py", "/utils/db_declarative.py"], "/utils/mutes.py": ["/bot.py", "/utils/db_declarative.py"]} |
75,297 | GreatGodApollo/bashbot | refs/heads/master | /auto-update.py | # This auto-update script is designed specifically for my environment.
# You will likely need to make modifications for this to work.
# Imports
import sh
from config import Config
from sh import git
import time
# Declarations
serviceName = Config.serviceName
workingDirectory = Config.workingDirectory
service = sh.sudo.service
def CheckForUpdate(workingDir):
print("Fetching most recent code from source...")
# Fetch most up to date version of code.
p = git(f"--git-dir={workingDir}.git/", f"--work-tree={workingDir}", "fetch", "origin", "master")
print("Fetch Complete.")
time.sleep(2)
print(f"Checking status for {workingDir}...")
statusCheck = git(f"--git-dir={workingDir}.git/", f"--work-tree={workingDir}", "status")
if "Your branch is up-to-date" in statusCheck:
print("Status check passes.")
print("Code up to date.")
return False
else:
print("Code update available.")
return True
if __name__ == "__main__":
print("****** Checking for Code Update ******")
if CheckForUpdate(workingDirectory):
service(f"{serviceName}", "stop")
print("Resetting local modifications...")
resetCheck = git(f"--git-dir={workingDirectory}.git/", f"--work-tree={workingDirectory}", "reset", "--hard",
"origin/master")
print(str(resetCheck))
service(f"{serviceName}", "start")
| {"/cogs/hidden.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/administration.py": ["/permissions.py"], "/cogs/moderation.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/chance.py": ["/permissions.py"], "/bot.py": ["/utils/db_declarative.py", "/utils/mutes.py"], "/cogs/apt.py": ["/permissions.py"], "/cogs/misc.py": ["/bot.py"], "/cogs/config.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/cogs/owner.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/permissions.py": ["/bot.py", "/utils/db_declarative.py"], "/utils/mutes.py": ["/bot.py", "/utils/db_declarative.py"]} |
75,298 | GreatGodApollo/bashbot | refs/heads/master | /utils/db_declarative.py | from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class ServerConfig(Base):
__tablename__ = 'servers'
id = Column(Integer, primary_key=True, nullable=False)
serverId = Column(String(18), nullable=False)
adminRole = Column(String(100), nullable=False)
modRole = Column(String(100), nullable=False)
class ServerMutes(Base):
__tablename__ = 'mutes'
id = Column(Integer, primary_key=True, nullable=False)
serverId = Column(String(18), nullable=False)
userId = Column(String(18), nullable=False)
| {"/cogs/hidden.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/administration.py": ["/permissions.py"], "/cogs/moderation.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/chance.py": ["/permissions.py"], "/bot.py": ["/utils/db_declarative.py", "/utils/mutes.py"], "/cogs/apt.py": ["/permissions.py"], "/cogs/misc.py": ["/bot.py"], "/cogs/config.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/cogs/owner.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/permissions.py": ["/bot.py", "/utils/db_declarative.py"], "/utils/mutes.py": ["/bot.py", "/utils/db_declarative.py"]} |
75,299 | GreatGodApollo/bashbot | refs/heads/master | /utils/mutes.py | from datetime import datetime, timedelta
import discord
from bot import session
from utils.db_declarative import ServerMutes
async def un_mute(bot, serverid, userid):
try:
row = session.query(ServerMutes).filter(ServerMutes.serverId == serverid).filter(ServerMutes.userId == userid).one()
except:
row = None
if row is not None:
server = discord.utils.get(bot.servers, id=serverid)
role = discord.utils.get(server.roles, name='Muted')
user = discord.utils.get(server.members, id=userid)
await bot.remove_roles(user, role)
session.delete(row)
session.commit()
await bot.send_message(user, f"You have been unmuted in {server.name}")
return True
else:
return False
async def mute(bot, serverid, userid):
try:
prerow = session.query(ServerMutes).filter(ServerMutes.userId == userid).filter(ServerMutes.serverId == serverid).one()
except:
prerow = None
if prerow is None:
server = discord.utils.get(bot.servers, id=serverid)
role = discord.utils.get(server.roles, name='Muted')
user = discord.utils.get(server.members, id=userid)
await bot.add_roles(user, role)
new_mute = ServerMutes(serverId=serverid, userId=userid)
session.add(new_mute)
session.commit()
await bot.send_message(user, f"You have been muted in {server.name}")
return True
else:
return False
| {"/cogs/hidden.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/administration.py": ["/permissions.py"], "/cogs/moderation.py": ["/permissions.py", "/utils/mutes.py"], "/cogs/chance.py": ["/permissions.py"], "/bot.py": ["/utils/db_declarative.py", "/utils/mutes.py"], "/cogs/apt.py": ["/permissions.py"], "/cogs/misc.py": ["/bot.py"], "/cogs/config.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/cogs/owner.py": ["/bot.py", "/permissions.py", "/utils/db_declarative.py"], "/permissions.py": ["/bot.py", "/utils/db_declarative.py"], "/utils/mutes.py": ["/bot.py", "/utils/db_declarative.py"]} |
75,316 | ricardo-jr37/RabbitMQ_e_gRPC | refs/heads/master | /emit_log_portao.py | #!/usr/bin/env python
import pika
import sys
import time
import random
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='portao')
while True:
#ruido = random.random() * random.randrange(-2, 2, 1)
valor = 1
channel.basic_publish(exchange='', routing_key='portao', body=str(valor))
time.sleep(5)
connection.close() | {"/server_portao.py": ["/portao_pb2_grpc.py"], "/server_lampada.py": ["/lampada_pb2_grpc.py"], "/homeassintent.py": ["/lampada_pb2_grpc.py", "/portao_pb2_grpc.py"]} |
75,317 | ricardo-jr37/RabbitMQ_e_gRPC | refs/heads/master | /client-flask.py | import time
from flask import Flask, url_for, jsonify
from flask import render_template
from flask import request
import socket
import pdb
import pickle
import time
import json
PORT = 5000
HOST = 'localhost'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/', methods=['POST'])
def create():
result = json.dumps(request.form)
print(result)
#ligada = request.form.get('lampada')
#ar = request.form.get('ar')
s.send(result.encode('utf-8'))
return render_template('index.html')
'''
if int(ar) == 1:
print(request.form.get('temperatura'))
else:
print('ar-desligado')
if int(ligada) == 1:
palavra = 'teste_conexao'
print(ligada)
PORT = 5000
HOST = 'localhost'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.send(palavra.encode('utf-8'))
else:
palavra = 'teste_conexao_desligada'
print(ligada)
PORT = 5000
HOST = 'localhost'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.send(palavra.encode('utf-8'))
return render_template('index.html')
#time.sleep(5)
'''
'''
def server_lampada(ligada):
teste = Sensor()
teste.nome = 'lampada'
teste.status_lampada = int(ligada)
PORT = 1510
HOST = 'localhost'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall(teste.SerializeToString())
'''
if __name__ == '__main__':
app.run(port=8080, debug=True) | {"/server_portao.py": ["/portao_pb2_grpc.py"], "/server_lampada.py": ["/lampada_pb2_grpc.py"], "/homeassintent.py": ["/lampada_pb2_grpc.py", "/portao_pb2_grpc.py"]} |
75,318 | ricardo-jr37/RabbitMQ_e_gRPC | refs/heads/master | /server_portao.py | import grpc
from concurrent import futures
import time
# import the generated classes
import portao_pb2
import portao_pb2_grpc
class PortaoServicer(portao_pb2_grpc.PortaoServicer):
def abrirPortao(self, request, context):
response = portao_pb2.StatusPortao()
# Vai apresentar ligada
print(response)
response.status = 1
#print(self.request.temperatura)
return response
def fecharPortao(self, request, context):
response = portao_pb2.StatusPortao()
print(response)
# -1 Vai representar desligada
response.status = -1
return response
# create a gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
portao_pb2_grpc.add_PortaoServicer_to_server(
PortaoServicer(), server)
# listen on port 50051
print('Starting server. Listening on port 50053.')
server.add_insecure_port('[::]:50053')
server.start()
# since server.start() will not block,
# a sleep-loop is added to keep alive
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0) | {"/server_portao.py": ["/portao_pb2_grpc.py"], "/server_lampada.py": ["/lampada_pb2_grpc.py"], "/homeassintent.py": ["/lampada_pb2_grpc.py", "/portao_pb2_grpc.py"]} |
75,319 | ricardo-jr37/RabbitMQ_e_gRPC | refs/heads/master | /emit_log_lampada.py | #!/usr/bin/env python
import pika
import sys
import time
import random
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='lampada')
while True:
ruido = random.random() * random.randrange(-5, 5, 1)
valor = 15 + ruido
channel.basic_publish(exchange='', routing_key='lampada', body=str(valor))
time.sleep(5)
connection.close() | {"/server_portao.py": ["/portao_pb2_grpc.py"], "/server_lampada.py": ["/lampada_pb2_grpc.py"], "/homeassintent.py": ["/lampada_pb2_grpc.py", "/portao_pb2_grpc.py"]} |
75,320 | ricardo-jr37/RabbitMQ_e_gRPC | refs/heads/master | /lampada_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import lampada_pb2 as lampada__pb2
class LampadaStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ligarLampada = channel.unary_unary(
'/Lampada/ligarLampada',
request_serializer=lampada__pb2.LampadaStatus.SerializeToString,
response_deserializer=lampada__pb2.LampadaStatus.FromString,
)
self.desligarLampada = channel.unary_unary(
'/Lampada/desligarLampada',
request_serializer=lampada__pb2.LampadaStatus.SerializeToString,
response_deserializer=lampada__pb2.LampadaStatus.FromString,
)
class LampadaServicer(object):
"""Missing associated documentation comment in .proto file."""
def ligarLampada(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def desligarLampada(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_LampadaServicer_to_server(servicer, server):
rpc_method_handlers = {
'ligarLampada': grpc.unary_unary_rpc_method_handler(
servicer.ligarLampada,
request_deserializer=lampada__pb2.LampadaStatus.FromString,
response_serializer=lampada__pb2.LampadaStatus.SerializeToString,
),
'desligarLampada': grpc.unary_unary_rpc_method_handler(
servicer.desligarLampada,
request_deserializer=lampada__pb2.LampadaStatus.FromString,
response_serializer=lampada__pb2.LampadaStatus.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Lampada', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Lampada(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def ligarLampada(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Lampada/ligarLampada',
lampada__pb2.LampadaStatus.SerializeToString,
lampada__pb2.LampadaStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def desligarLampada(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Lampada/desligarLampada',
lampada__pb2.LampadaStatus.SerializeToString,
lampada__pb2.LampadaStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| {"/server_portao.py": ["/portao_pb2_grpc.py"], "/server_lampada.py": ["/lampada_pb2_grpc.py"], "/homeassintent.py": ["/lampada_pb2_grpc.py", "/portao_pb2_grpc.py"]} |
75,321 | ricardo-jr37/RabbitMQ_e_gRPC | refs/heads/master | /portao_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import portao_pb2 as portao__pb2
class PortaoStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.abrirPortao = channel.unary_unary(
'/Portao/abrirPortao',
request_serializer=portao__pb2.StatusPortao.SerializeToString,
response_deserializer=portao__pb2.StatusPortao.FromString,
)
self.fecharPortao = channel.unary_unary(
'/Portao/fecharPortao',
request_serializer=portao__pb2.StatusPortao.SerializeToString,
response_deserializer=portao__pb2.StatusPortao.FromString,
)
class PortaoServicer(object):
"""Missing associated documentation comment in .proto file."""
def abrirPortao(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def fecharPortao(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PortaoServicer_to_server(servicer, server):
rpc_method_handlers = {
'abrirPortao': grpc.unary_unary_rpc_method_handler(
servicer.abrirPortao,
request_deserializer=portao__pb2.StatusPortao.FromString,
response_serializer=portao__pb2.StatusPortao.SerializeToString,
),
'fecharPortao': grpc.unary_unary_rpc_method_handler(
servicer.fecharPortao,
request_deserializer=portao__pb2.StatusPortao.FromString,
response_serializer=portao__pb2.StatusPortao.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Portao', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Portao(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def abrirPortao(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Portao/abrirPortao',
portao__pb2.StatusPortao.SerializeToString,
portao__pb2.StatusPortao.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def fecharPortao(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Portao/fecharPortao',
portao__pb2.StatusPortao.SerializeToString,
portao__pb2.StatusPortao.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| {"/server_portao.py": ["/portao_pb2_grpc.py"], "/server_lampada.py": ["/lampada_pb2_grpc.py"], "/homeassintent.py": ["/lampada_pb2_grpc.py", "/portao_pb2_grpc.py"]} |
75,322 | ricardo-jr37/RabbitMQ_e_gRPC | refs/heads/master | /server_ar.py | import grpc
from concurrent import futures
import time
# import the generated classes
import ar_pb2
import ar_pb2_grpc
class ArServicer(ar_pb2_grpc.ArServicer):
def ligarAr(self, request, context):
response = ar_pb2.ArTemperatura()
# Vai apresentar ligada
print(response)
response.temperatura = request.temperatura
#print(self.request.temperatura)
return response
def desligarAr(self, request, context):
response = ar_pb2.ArTemperatura()
print(response)
# -1 Vai representar desligada
response.temperatura = -1
return response
# create a gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
ar_pb2_grpc.add_ArServicer_to_server(
ArServicer(), server)
# listen on port 50051
print('Starting server. Listening on port 50052.')
server.add_insecure_port('[::]:50052')
server.start()
# since server.start() will not block,
# a sleep-loop is added to keep alive
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0) | {"/server_portao.py": ["/portao_pb2_grpc.py"], "/server_lampada.py": ["/lampada_pb2_grpc.py"], "/homeassintent.py": ["/lampada_pb2_grpc.py", "/portao_pb2_grpc.py"]} |
75,323 | ricardo-jr37/RabbitMQ_e_gRPC | refs/heads/master | /server_lampada.py | import grpc
from concurrent import futures
import time
# import the generated classes
import lampada_pb2
import lampada_pb2_grpc
class LampadaServicer(lampada_pb2_grpc.LampadaServicer):
def ligarLampada(self, request, context):
response = lampada_pb2.LampadaStatus()
# Vai apresentar ligada
response.stauts = 500
return response
def desligarLampada(self, request, context):
response = lampada_pb2.LampadaStatus()
# -1 Vai representar desligada
response.stauts = -1
return response
# create a gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
lampada_pb2_grpc.add_LampadaServicer_to_server(
LampadaServicer(), server)
# listen on port 50051
print('Starting server. Listening on port 50051.')
server.add_insecure_port('[::]:50051')
server.start()
# since server.start() will not block,
# a sleep-loop is added to keep alive
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0) | {"/server_portao.py": ["/portao_pb2_grpc.py"], "/server_lampada.py": ["/lampada_pb2_grpc.py"], "/homeassintent.py": ["/lampada_pb2_grpc.py", "/portao_pb2_grpc.py"]} |
75,324 | ricardo-jr37/RabbitMQ_e_gRPC | refs/heads/master | /homeassintent.py | #!/usr/bin/env python
#!/usr/bin/python
import socket
import threading
import grpc
from concurrent import futures
import pika, sys, os, time
# import the generated classes
import lampada_pb2
import lampada_pb2_grpc
# import the generated classes
import ar_pb2
import ar_pb2_grpc
# import the generated classes
import portao_pb2
import portao_pb2_grpc
import json
#Conexao socket
IP = "127.0.0.1"
PORT = 5000
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((str(IP), int(PORT)))
sock.listen(10)
sock.setblocking(False)
clientes = []
#Inicializando os sensores
# Lampada
sensor_ilumicao = []
sensor_ilumicao.append(0)
lampada_valor = []
lampada_valor.append(0)
# Ar
ar_sensor = []
ar_sensor.append(0)
ar_valor = []
ar_valor.append(0)
#Portão
portao_sensor = []
portao_sensor.append(0)
portao_valor = []
portao_valor.append(0)
def aceptarCon():
print("aceptarCon iniciado")
global sensor_ilumicao
global lampada_valor
global clientes
global sock
while True:
try:
stauts_portao = ""
conn, addr = sock.accept()
conn.setblocking(False)
clientes.append(conn)
#print(conn.getpeername()[1])
valorluminosidade = int(lampada_valor[len(lampada_valor)-1]) - int(sensor_ilumicao[len(sensor_ilumicao)-1])
#valortemperatura = 0
#print(ar_sensor)
if int(ar_valor[len(ar_valor)-1]) >= 0 and len(ar_valor)>1:
valortemperatura = int(ar_valor[len(ar_valor)-1])
else:
valortemperatura = ar_sensor[len(ar_sensor)-1]
if valorluminosidade < 0:
valorluminosidade *= -1
#print(len(portao_valor))
if len(portao_valor)>1:
if portao_valor[len(portao_valor)-1] >= 1:
stauts_portao = "Aberto"
else:
stauts_portao = "Fechado"
else:
if portao_sensor[len(portao_sensor)-1] >= 1:
stauts_portao = "Aberto"
else:
stauts_portao = "Fechado"
#print(stauts_portao)
conn.send(('HTTP/1.0 200 OK\n').encode('utf-8'))
conn.send(('Content-Type: text/html\n').encode('utf-8'))
conn.send(('\n').encode('utf-8')) # header and body should be separated by additional newline
conn.send(("""
<html>
<meta charset="utf-8"/>
<body>
<h1>Trabalho 3 - SISTEMAS DISTRIBUÍDOS</h1>
<h3>Nivel de luminosidade (Candela): {}</h3>
<h3>Temperatura (C°): {}</h3>
<h3>Portão: {}</h3>
<br><a href="http://localhost:8080/">Modificar Estado</a>
</body>
</html>
""").format(valorluminosidade, valortemperatura, stauts_portao).encode('utf-8'))
#time.sleep(3)
except:
pass
def procesarCon():
print("ProcesarCon iniciado")
global clientes
while True:
if len(clientes) > 0:
for c in clientes:
try:
data = c.recv(1024)
teste = data.decode('utf-8')
if data:
y = json.loads(teste)
print(y)
procform(y)
except:
pass
def procform(y):
if int(y['lampada']) == 1:
comando = 'ligar'
lampada(comando)
else:
comando = 'desligar'
lampada(comando)
if int(y['ar']) == 1:
ar(float(y['temperatura']))
else:
ar(0)
if int(y['portao']) > 0:
portao(int(y['portao']))
else:
portao(int(y['portao']))
def sub_lampada():
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='lampada')
def callback(ch, method, properties, body):
valor = int(float(body.decode()))
global sensor_ilumicao
sensor_ilumicao.append(valor)
channel.basic_consume(queue='lampada', on_message_callback=callback, auto_ack=True)
channel.start_consuming()
def sub_ar():
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='ar')
def callback(ch, method, properties, body):
valor = int(float(body.decode()))
global ar_sensor
ar_sensor.append(valor)
channel.basic_consume(queue='ar', on_message_callback=callback, auto_ack=True)
channel.start_consuming()
def sub_portao():
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='portao')
def callback(ch, method, properties, body):
valor = body.decode()
global portao_sensor
portao_sensor.append(int(valor))
channel.basic_consume(queue='portao', on_message_callback=callback, auto_ack=True)
channel.start_consuming()
def portao(comando):
channel = grpc.insecure_channel('localhost:50053')
stub = portao_pb2_grpc.PortaoStub(channel)
# create a valid request message
if float(comando)>=0:
status = portao_pb2.StatusPortao(status=float(comando))
# make the call
response = stub.abrirPortao(status)
else:
status = portao_pb2.StatusPortao(status=float(comando))
# make the call
response = stub.fecharPortao(status)
portao_valor.append(response.status)
def ar(comando):
channel = grpc.insecure_channel('localhost:50052')
stub = ar_pb2_grpc.ArStub(channel)
if float(comando)>0:
status = ar_pb2.ArTemperatura(temperatura=float(comando))
# make the call
response = stub.ligarAr(status)
else:
status = ar_pb2.ArTemperatura(temperatura=-1)
# make the call
response = stub.desligarAr(status)
ar_valor.append(response.temperatura)
#print(ar_valor)
def lampada(comando):
# open a gRPC channel
channel = grpc.insecure_channel('localhost:50051')
# create a stub (client)
stub = lampada_pb2_grpc.LampadaStub(channel)
# create a valid request message
status = lampada_pb2.LampadaStatus(stauts=1)
# make the call
if comando == 'desligar':
response = stub.desligarLampada(status)
else:
response = stub.ligarLampada(status)
lampada_valor.append(response.stauts)
t = threading.Thread(target=sub_lampada)
t.start()
#t2 = threading.Thread(target=sensores)
#t2.start()
t3 = threading.Thread(target=sub_ar)
t3.start()
#sub_portao
t4=threading.Thread(target=sub_portao)
t4.start()
aceptar = threading.Thread(target=aceptarCon)
aceptar.start()
procesar = threading.Thread(target=procesarCon)
procesar.start()
while True:
try:
comando = input('')
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
sock.close()
except SystemExit:
sock.close()
os._exit(0)
| {"/server_portao.py": ["/portao_pb2_grpc.py"], "/server_lampada.py": ["/lampada_pb2_grpc.py"], "/homeassintent.py": ["/lampada_pb2_grpc.py", "/portao_pb2_grpc.py"]} |
75,337 | oppia/oppia-ml | refs/heads/develop | /core/classifiers/algorithm_registry.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry for classification algorithms/classifiers."""
import os
import pkgutil
import vmconf
class Registry(object):
"""Registry of all classifier classes."""
# pylint: disable=fixme
# TODO (prasanna08): Add unittest for algorithm registry when we have
# classifier(s) to test it.
# Dict mapping algorithm IDs to classifier classes.
_classifier_classes = {}
@classmethod
def get_all_classifier_algorithm_ids(cls):
"""Retrieves a list of all classifier algorithm IDs.
Returns:
A list containing all the classifier algorithm IDs.
"""
return [classifier_id
for classifier_id in vmconf.ALGORITHM_IDS]
@classmethod
def _refresh(cls):
"""Refreshes the dict mapping algorithm IDs to instances of
classifiers.
"""
cls._classifier_classes.clear()
all_classifier_ids = cls.get_all_classifier_algorithm_ids()
# Assemble all paths to the classifiers.
extension_paths = [
os.path.join(vmconf.CLASSIFIERS_DIR, classifier_id)
for classifier_id in all_classifier_ids]
# Crawl the directories and add new classifier instances to the
# registry.
for loader, name, _ in pkgutil.iter_modules(path=extension_paths):
module = loader.find_module(name).load_module(name)
try:
clazz = getattr(module, name)
except AttributeError:
continue
ancestor_names = [
base_class.__name__ for base_class in clazz.__bases__]
if 'BaseClassifier' in ancestor_names:
cls._classifier_classes[clazz.__name__] = clazz
@classmethod
def get_all_classifiers(cls):
"""Retrieves a list of instances of all classifiers.
Returns:
A list of instances of all the classification algorithms.
"""
if not cls._classifier_classes:
cls._refresh()
return [clazz() for clazz in cls._classifier_classes.values()]
@classmethod
def get_classifier_by_algorithm_id(cls, classifier_algorithm_id):
"""Retrieves a classifier instance by its algorithm id.
Refreshes once if the classifier is not found; subsequently, throws a
KeyError.
Args:
classifier_algorithm_id: str. The ID of the classifier algorithm.
Raises:
KeyError: If the classifier is not found the first time.
Returns:
An instance of the classifier.
"""
if classifier_algorithm_id not in cls._classifier_classes:
cls._refresh()
clazz = cls._classifier_classes[classifier_algorithm_id]
return clazz()
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,338 | oppia/oppia-ml | refs/heads/develop | /core/domain/job_services.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains functions used for polling, training and saving jobs."""
from core.classifiers import algorithm_registry
from core.domain import training_job_result_domain
from core.domain import remote_access_services
# pylint: disable=too-many-branches
def _validate_job_data(job_data):
if not isinstance(job_data, dict):
raise Exception('Invalid format of job data')
if 'job_id' not in job_data:
raise Exception('job data should contain job id')
if 'training_data' not in job_data:
raise Exception('job data should contain training data')
if 'algorithm_id' not in job_data:
raise Exception('job data should contain algorithm id')
if 'algorithm_version' not in job_data:
raise Exception('job data should contain algorithm version')
if not isinstance(job_data['job_id'], unicode):
raise Exception(
'Expected job id to be unicode, received %s' %
job_data['job_id'])
if not isinstance(job_data['algorithm_id'], unicode):
raise Exception(
'Expected algorithm id to be unicode, received %s' %
job_data['algorithm_id'])
if not isinstance(job_data['algorithm_version'], int):
raise Exception(
'Expected algorithm version to be integer, received %s' %
job_data['algorithm_version'])
if not isinstance(job_data['training_data'], list):
raise Exception(
'Expected training data to be a list, received %s' %
job_data['training_data'])
algorithm_ids = (
algorithm_registry.Registry.get_all_classifier_algorithm_ids())
if job_data['algorithm_id'] not in algorithm_ids:
raise Exception('Invalid algorithm id %s' % job_data['algorithm_id'])
classifier = algorithm_registry.Registry.get_classifier_by_algorithm_id(
job_data['algorithm_id'])
if classifier.algorithm_version != job_data['algorithm_version']:
raise Exception(
'Classifier version %d mismatches algorithm version %d received '
'in job data' % (
classifier.algorithm_version, job_data['algorithm_version']))
for grouped_answers in job_data['training_data']:
if 'answer_group_index' not in grouped_answers:
raise Exception(
'Expected answer_group_index to be a key in training_data',
' list item')
if 'answers' not in grouped_answers:
raise Exception(
'Expected answers to be a key in training_data list item')
if not isinstance(grouped_answers['answer_group_index'], int):
raise Exception(
'Expected answer_group_index to be an int, received %s' %
grouped_answers['answer_group_index'])
if not isinstance(grouped_answers['answers'], list):
raise Exception(
'Expected answers to be a list, received %s' %
grouped_answers['answers'])
def get_next_job():
"""Get next job request.
Returns: dict. A dictionary containing job data.
"""
job_data = remote_access_services.fetch_next_job_request()
if job_data:
_validate_job_data(job_data)
return job_data
def train_classifier(algorithm_id, training_data):
"""Train classifier associated with 'algorithm_id' using 'training_data'.
Args:
algorithm_id: str. ID of classifier algorithm.
algorithm_version: int. Version of the classifier algorithm.
training_data: list(dict). A list containing training data. Each dict
stores 'answer_group_index' and 'answers'.
Returns:
FrozenModel. A protobuf object containing trained parameters of the
classifier algorithm.
"""
classifier = algorithm_registry.Registry.get_classifier_by_algorithm_id(
algorithm_id)
classifier.train(training_data)
frozen_model_proto = classifier.to_proto()
classifier.validate(frozen_model_proto)
return frozen_model_proto
def store_job_result(job_id, algorithm_id, frozen_model_proto):
"""Store result of job in the Oppia server.
Args:
job_id: str. ID of the job whose result is to be stored.
algorithm_id: str. ID of the classifier algorithm.
frozen_model_proto: FrozenModel. A protobuf object that stores trained
model parameters.
Returns:
int. Status code of response.
"""
job_result = training_job_result_domain.TrainingJobResult(
job_id, algorithm_id, frozen_model_proto)
status = remote_access_services.store_trained_classifier_model(
job_result)
return status
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,339 | oppia/oppia-ml | refs/heads/develop | /utils.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides utility functions."""
import json
import vmconf
def parse_data_received_from_server(text):
"""Parse the data received from server and return corresponding
dict object.
Args:
text: str. Received data in string format.
Returns:
dict. A dictionary corresponding to received text.
"""
if text.startswith(vmconf.XSSI_PREFIX):
text = text[len(vmconf.XSSI_PREFIX):]
return json.loads(text)
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,340 | oppia/oppia-ml | refs/heads/develop | /main.py | # Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main process for training classifiers."""
# Preconfigure before starting main worker process.
# This step should be performed before importing any of the
# third party libraries.
import logging
import sys
import time
import vm_config
vm_config.configure()
# pylint: disable=wrong-import-position
from core.domain import job_services
import vmconf
def main():
"""Main process of VM."""
try:
job_data = job_services.get_next_job()
if not job_data:
logging.info('No pending job requests.')
return
frozen_model_proto = job_services.train_classifier(
job_data['algorithm_id'], job_data['training_data'])
status = job_services.store_job_result(
job_data['job_id'], job_data['algorithm_id'],
frozen_model_proto)
if status != 200:
logging.warning(
'Failed to store result of the job with \'%s\' job_id',
job_data['job_id'])
return
except KeyboardInterrupt:
logging.info('Exiting')
sys.exit(0)
except Exception as e: # pylint: disable=broad-except
# Log any exceptions that arises during processing of job.
logging.error(e.message)
finally:
if vmconf.DEFAULT_WAITING_METHOD == vmconf.FIXED_TIME_WAITING:
time.sleep(vmconf.FIXED_TIME_WAITING_PERIOD)
if __name__ == '__main__':
while True:
main()
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,341 | oppia/oppia-ml | refs/heads/develop | /core/tests/test_utils.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
import contextlib
import json
import unittest
import urlparse
import responses
import vmconf
class TestBase(unittest.TestCase):
"""Base class for all tests."""
def setUp(self):
"""setUp method which is run before every test case."""
pass
def tearDown(self):
"""tearDown method which is run after executing every test case."""
pass
@staticmethod
def put_get_request(url, data, status_code, headers=None):
"""Puts a mock get request for given url.
Args:
url: str. URL on which request.get() is to be executed.
data: dict. A dictionary containing response data.
status_code: int. Status code of response.
"""
response = responses.RequestsMock()
response.add(
response.GET, url, body=data, status=status_code,
adding_headers=headers)
return response
def set_job_request_post_callback(self, callback):
"""Sets a callback for fetch next job post request.
Args:
callback: callable. This is called implicitly when
request.post() is executed.
"""
request_url = '%s:%s/%s' % (
vmconf.DEFAULT_COMMUNICATION_URL, vmconf.DEFAULT_COMMUNICATION_PORT,
vmconf.FETCH_NEXT_JOB_REQUEST_HANDLER)
return self.set_post_callback(request_url, callback)
def set_job_result_post_callback(self, callback):
"""Sets a callback for store job result post request.
Args:
callback: callable. This is called implicitly when
request.post() is executed.
"""
request_url = '%s:%s/%s' % (
vmconf.DEFAULT_COMMUNICATION_URL, vmconf.DEFAULT_COMMUNICATION_PORT,
vmconf.STORE_TRAINED_CLASSIFIER_MODEL_HANDLER)
return self.set_post_callback(request_url, callback)
@staticmethod
def set_post_callback(url, callback):
"""Sets a callback for store job result post request.
Args:
url: str. URL on which requests.post() is executed.
callback: callable. This is called implicitly when
request.post() is executed.
"""
response = responses.RequestsMock()
response.add_callback(
response.POST, url, callback=callback)
return response
@staticmethod
@contextlib.contextmanager
def swap(obj, attr, newvalue):
"""Swap an object's attribute value within the context of a
'with' statement. The object can be anything that supports
getattr and setattr, such as class instances, modules, ...
Example usage:
import math
with self.swap(math, 'sqrt', lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
Note that this does not work directly for classmethods. In this case,
you will need to import the 'types' module, as follows:
import types
with self.swap(
SomePythonClass, 'some_classmethod',
types.MethodType(new_classmethod, SomePythonClass)):
NOTE: self.swap and other context managers that are created using
contextlib.contextmanager use generators that yield exactly once. This
means that you can only use them once after construction, otherwise,
the generator will immediately raise StopIteration, and contextlib will
raise a RuntimeError.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
@staticmethod
def callback(func):
"""Decorator for callback method.
Use this function as decorator when you are defining your own
callback for post request.
Example usage:
@test_utils.GenericTestBase.callback
def _post_callback(request):
# Your assertions and code.
# This function should return str(data) to be
# returned in response.
NOTE: It is neccessary to use this decorator whenever defining callback
method for post request.
"""
def wrapper(request):
"""Wrapper class for python decorator."""
# func(request) returns response data as json object.
if request.headers['content-type'] == (
'application/x-www-form-urlencoded'):
data = urlparse.parse_qs(request.body)
payload = json.loads(data['payload'][0])
request.payload = payload
return (200, {}, json.dumps(func(request)))
return (200, {}, func(request))
return wrapper
if vmconf.PLATFORM == 'gce':
GenericTestBase = TestBase
else:
raise Exception('Invalid platform: expected one of [\'gce\']')
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,342 | oppia/oppia-ml | refs/heads/develop | /core/classifiers/base.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for classification algorithms"""
import abc
class BaseClassifier(object):
"""A base class for classifiers that uses supervised learning to match
free-form text answers to answer groups. The classifier trains on answers
that exploration editors have assigned to an answer group.
Below are some concepts used in this class.
training_data: list(dict). The training data that is used for training
the classifier.
label - An answer group that the training sample should correspond to. If a
sample is being added to train a model, labels are provided.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@property
@abc.abstractproperty
def algorithm_version(self):
"""Version of the classifier algorithm. The version of the algorithm is
matched with the version received as part of job data before training
the classifier.
The version of the algorithm changes every time the classifier
algorithm is changed. The algorithm version helps Oppia to map the
trained classifier models with their corresponding prediction API
in the Oppia frontend.
"""
raise NotImplementedError
@property
@abc.abstractproperty
def name_in_job_result_proto(self):
"""A property that identifies the attribute in job result proto message
which will store this classifier's classifier data.
"""
raise NotImplementedError
@property
@abc.abstractproperty
def type_in_job_result_proto(self):
"""The type of the property in job result proto message which stores
this classifier's classifier data.
"""
raise NotImplementedError
@abc.abstractmethod
def to_proto(self):
"""Returns a protobuf of the frozen model consisting of trained
parameters.
Returns:
FrozenModel. A protobuf object of frozen model containing
trained model parameters. This data is used for prediction.
"""
raise NotImplementedError
@abc.abstractmethod
def train(self, training_data):
"""Trains classifier using given training_data.
Args:
training_data: list(dict). The training data that is used for
training the classifier. The list contains dicts where each dict
represents a single training data group, for example:
training_data = [
{
'answer_group_index': 1,
'answers': [answer_1, answer_2]
},
{
'answer_group_index': 2,
'answers': [answer_3, answer_4]
}
]
"""
raise NotImplementedError
@abc.abstractmethod
def validate(self, frozen_model_proto):
"""Validates the specified frozen model protobuf object containing
parameters of the trained classifier model.
Args:
frozen_model_proto: The frozen model protobuf containing parameter
of trained classifier model.
"""
raise NotImplementedError
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,343 | oppia/oppia-ml | refs/heads/develop | /core/domain/remote_access_services.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides interface to communicate with Oppia remotely."""
import base64
import hashlib
import hmac
import json
import requests
from core.domain.proto import training_job_response_payload_pb2
from core.platform import platform_services
import utils
import vmconf
metadata_services = platform_services.Registry.import_metadata_services()
def _get_url():
if vmconf.DEV_MODE:
return vmconf.DEFAULT_COMMUNICATION_URL
return vmconf.SERVER_COMMUNICATION_URL
def _get_port():
if vmconf.DEV_MODE:
return vmconf.DEFAULT_COMMUNICATION_PORT
return vmconf.SERVER_COMMUNICATION_PORT
def _get_vm_id():
if vmconf.DEV_MODE:
return vmconf.DEFAULT_VM_ID
# Get VMID dynamically from metadata. HMAC module does not
# support unicode string. Hence we need to cast them to str.
return str(metadata_services.get_metadata_param(
vmconf.METADATA_VM_ID_PARAM_NAME))
def _get_shared_secret():
if vmconf.DEV_MODE:
return vmconf.DEFAULT_VM_SHARED_SECRET
# Get shared secret dynamically from metadata. HMAC module does not
# support unicode string. Hence we need to cast them to str.
return str(metadata_services.get_metadata_param(
vmconf.METADATA_SHARED_SECRET_PARAM_NAME))
def generate_signature(message, vm_id):
"""Generates digital signature for given message combined with vm_id.
Args:
message: bytes. Message string.
vm_id: str. ID of the VM that trained the job.
Returns:
str. The digital signature generated from request data.
"""
encoded_vm_id = vm_id.encode(encoding='utf-8')
msg = b'%s|%s' % (base64.b64encode(message), encoded_vm_id)
key = _get_shared_secret().encode(encoding='utf-8')
# Generate signature and return it.
return hmac.new(key, msg, digestmod=hashlib.sha256).hexdigest()
def fetch_next_job_request():
"""Returns the next job request to be processed.
Returns:
dict. A dict retrieved remotely from database containing
job request data.
"""
request_url = "%s:%s/%s" % (
_get_url(), _get_port(), vmconf.FETCH_NEXT_JOB_REQUEST_HANDLER)
payload = {
'vm_id': _get_vm_id().encode(encoding='utf-8'),
'message': _get_vm_id().encode(encoding='utf-8'),
}
signature = generate_signature(payload['message'], payload['vm_id'])
payload['signature'] = signature
data = {
'payload': json.dumps(payload)
}
response = requests.post(request_url, data=data)
return utils.parse_data_received_from_server(response.text)
def store_trained_classifier_model(job_result):
"""Stores the result of processed job request.
Args:
job_result: TrainingJobResult. Domain object containing result of
training of classifier along with job_id and algorithm_id.
Returns:
int. Status code of the response.
"""
job_result.validate()
payload = training_job_response_payload_pb2.TrainingJobResponsePayload()
payload.job_result.CopyFrom(job_result.to_proto())
payload.vm_id = _get_vm_id().encode(encoding='utf-8')
message = payload.job_result.SerializeToString().encode(encoding='utf-8')
signature = generate_signature(message, payload.vm_id)
payload.signature = signature
data = payload.SerializeToString()
request_url = "%s:%s/%s" % (
_get_url(), _get_port(), vmconf.STORE_TRAINED_CLASSIFIER_MODEL_HANDLER)
response = requests.post(
request_url, data=data,
headers={'Content-Type': 'application/octet-stream'})
return response.status_code
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,344 | oppia/oppia-ml | refs/heads/develop | /core/platform/metadata/gce_metadata_services.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for retrieving metadata of GCE."""
import requests
# Following URL is used to retrieve metadata of gce instance.
# For more information check following link:
# https://cloud.google.com/compute/docs/storing-retrieving-metadata
METADATA_ATTRIBUTES_URL = (
'http://metadata.google.internal/computeMetadata/v1/instance/attributes/')
METADATA_HEADERS = {
'Metadata-Flavor': 'Google'
}
def get_metadata_param(param_name):
"""Fetch value of metadata parameter.
Args:
param_name: str. Name of the parameter to be fetched.
"""
# Send a request to get param details.
resp = requests.get(
'%s/%s' % (METADATA_ATTRIBUTES_URL, param_name),
headers=METADATA_HEADERS)
return resp.text
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,345 | oppia/oppia-ml | refs/heads/develop | /core/classifiers/classifier_utils.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions required by classifier."""
import re
import scipy
import vmconf
def extract_svm_parameters(clf):
"""Extract parameters from a trained SVC classifier.
Args:
clf: object of class sklearn.svm.SVC. Trained classifier model
instance.
Returns:
dict. A dictionary containing parameters of trained classifier. These
parameters will be used in frontend during prediction.
"""
support_vectors = clf.__dict__['support_vectors_']
dual_coef = clf.__dict__['_dual_coef_']
# If `support_vectors` is a sparse matrix, convert it to an array.
# Dual coefficients will have the same type as support_vectors.
if isinstance(support_vectors, scipy.sparse.csr.csr_matrix):
# Warning: this might result in really large list.
support_vectors = support_vectors.toarray()
dual_coef = dual_coef.toarray()
kernel_params = {
u'kernel': unicode(clf.__dict__['kernel']),
u'gamma': clf.__dict__['_gamma'],
u'coef0': clf.__dict__['coef0'],
u'degree': clf.__dict__['degree'],
}
return {
u'n_support': clf.__dict__['n_support_'].tolist(),
u'support_vectors': support_vectors.tolist(),
u'dual_coef': dual_coef.tolist(),
u'intercept': clf.__dict__['_intercept_'].tolist(),
u'classes': [i.decode('UTF-8') if isinstance(i, basestring) else i
for i in clf.__dict__['classes_'].tolist()],
u'probA': clf.__dict__['probA_'].tolist(),
u'probB': clf.__dict__['probB_'].tolist(),
u'kernel_params': kernel_params
}
def unicode_validator_for_classifier_data(classifier_data):
"""Validates that incoming object contains unicode literal strings.
Args:
classifier_data: *. The trained classifier model data.
Raises:
Exception. If any of the strings in classifier data is not a unicode
string.
"""
if isinstance(classifier_data, dict):
for k in classifier_data:
if isinstance(k, str):
raise Exception('Expected %s to be unicode but found str.' % k)
unicode_validator_for_classifier_data(classifier_data[k])
elif isinstance(classifier_data, list):
for item in classifier_data:
unicode_validator_for_classifier_data(item)
elif isinstance(classifier_data, str):
raise Exception(
'Expected \'%s\' to be unicode but found str.' % classifier_data)
else:
return
def encode_floats_in_classifier_data(classifier_data):
"""Converts all floating point numbers in classifier data to string.
The following function iterates through entire classifier data and converts
all float values to corresponding string values. At the same time, it also
verifies that none of the existing string values are convertible to float
values with the help of regex.
Args:
classifier_data: dict|list|string|int|float. The original classifier
data which needs conversion of floats to strings.
Raises:
Exception. If any of the string values are convertible to float then
an exception is raised to report the error. The classifier data
must not include any string values which can be casted to float.
Exception. If classifier data contains an object whose type is other
than integer, string, dict, float or list.
Returns:
dict|list|string|int|float. Modified classifier data in which float
values are converted into strings.
"""
if isinstance(classifier_data, dict):
classifier_data_with_stringified_floats = {}
for k in classifier_data:
classifier_data_with_stringified_floats[k] = (
encode_floats_in_classifier_data(
classifier_data[k]))
return classifier_data_with_stringified_floats
elif isinstance(classifier_data, list):
classifier_data_with_stringified_floats = []
for item in classifier_data:
classifier_data_with_stringified_floats.append(
encode_floats_in_classifier_data(item))
return classifier_data_with_stringified_floats
elif isinstance(classifier_data, float):
return str(classifier_data)
elif isinstance(classifier_data, basestring):
if re.match(vmconf.FLOAT_VERIFIER_REGEX, classifier_data):
# A float value must not be stored as a string in
# classifier data.
raise Exception(
'Error: Found a float value %s stored as string. Float '
'values should not be stored as strings.' % (
classifier_data))
return classifier_data
elif isinstance(classifier_data, int):
return classifier_data
else:
raise Exception(
'Expected all classifier data objects to be lists, dicts, floats, '
'strings, integers but received %s.' % (type(classifier_data)))
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,346 | oppia/oppia-ml | refs/heads/develop | /core/domain/training_job_result_domain.py | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classdefs related to protobuf files used in Oppia-ml"""
from core.classifiers import algorithm_registry
from core.domain.proto import training_job_response_payload_pb2
class TrainingJobResult(object):
"""TrainingJobResult domain object.
This domain object stores the of training job result along with job_id and
algorithm_id. The training job result is the trained classifier data.
"""
def __init__(self, job_id, algorithm_id, classifier_data):
"""Initializes TrainingJobResult object.
Args:
job_id: str. The id of the training job whose results are stored
in classifier_data.
algorithm_id: str. The id of the algorithm of the training job.
classifier_data: object. Frozen model of the corresponding
training job.
"""
self.job_id = job_id
self.algorithm_id = algorithm_id
self.classifier_data = classifier_data
def validate(self):
"""Validate that TrainigJobResult object stores correct data.
Raises:
Exception: str. The classifier data is stored in a field
that does not correspond to algorithm_id.
"""
if not isinstance(self.job_id, basestring):
raise Exception(
"Expected job id of basestring type but received %s" % (
type(self.job_id).__name__))
if not isinstance(self.algorithm_id, basestring):
raise Exception(
"Expected algorithm id of basestring type but received %s" % (
type(self.algorithm_id).__name__))
# Ensure that the classifier_data is corresponds to the classifier
# having given algorithm_id.
classifier = algorithm_registry.Registry.get_classifier_by_algorithm_id(
self.algorithm_id)
if (
type(self.classifier_data).__name__ !=
classifier.type_in_job_result_proto):
raise Exception(
"Expected classifier data of type %s but found %s type" % (
classifier.type_in_job_result_proto,
type(self.classifier_data).__name__))
def to_proto(self):
"""Generate TrainingJobResult protobuf object from the TrainingJobResult
domain object.
Returns:
TrainingJobResult protobuf object. Protobuf object corresponding to
TrainingJobResult protobuf message definition.
"""
self.validate()
proto_message = (
training_job_response_payload_pb2.
TrainingJobResponsePayload.JobResult())
proto_message.job_id = self.job_id
job_result_attribute = (
algorithm_registry.Registry.get_classifier_by_algorithm_id(
self.algorithm_id).name_in_job_result_proto)
getattr(proto_message, job_result_attribute).CopyFrom(
self.classifier_data)
return proto_message
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,347 | oppia/oppia-ml | refs/heads/develop | /core/platform/metadata/gce_metadata_services_test.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gce metadata services"""
from core.platform.metadata import gce_metadata_services
from core.tests import test_utils
class MetadataServicesTests(test_utils.GenericTestBase):
"""Tests for metadata accessing methods."""
def test_that_metadata_is_fetched(self):
"""Test that metadata param is fetched correctly."""
# Get value of 'param' parameter.
param_name = 'param'
metadata_url = '%s/%s' % (
gce_metadata_services.METADATA_ATTRIBUTES_URL, param_name)
metadata_headers = gce_metadata_services.METADATA_HEADERS
metadata_value = 'value'
with self.put_get_request(
metadata_url, metadata_value, 200, metadata_headers):
value = gce_metadata_services.get_metadata_param(param_name)
self.assertEqual(value, 'value')
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,348 | oppia/oppia-ml | refs/heads/develop | /core/domain/training_job_result_domain_test.py | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for training job result domain"""
from core.classifiers import algorithm_registry
from core.domain import training_job_result_domain
from core.domain.proto import training_job_response_payload_pb2
from core.domain.proto import text_classifier_pb2
from core.tests import test_utils
class TrainingJobResultTests(test_utils.GenericTestBase):
"""Tests for TrainingJobResult domain object."""
def test_validate_job_data_with_valid_model_does_not_raise_exception(self): # pylint: disable=no-self-use
"""Ensure that validation checks do not raise exceptions when
a valid classifier model is supplied.
"""
job_id = 'job_id'
algorithm_id = 'TextClassifier'
classifier_data = text_classifier_pb2.TextClassifierFrozenModel()
classifier_data.model_json = 'dummy model'
job_result = training_job_result_domain.TrainingJobResult(
job_id, algorithm_id, classifier_data)
job_result.validate()
def test_job_data_with_invalid_job_id_raises_exception_on_validation(self):
"""Ensure that validation checks raise exception when an invalid
job_id is supplied.
"""
job_id = 123
algorithm_id = 'TextClassifier'
classifier_data = 'simple classifier'
job_result = training_job_result_domain.TrainingJobResult(
job_id, algorithm_id, classifier_data)
with self.assertRaisesRegexp(
Exception, 'Expected job id of basestring type'):
job_result.validate()
def test_job_data_with_invalid_algorithm_id_raises_exception_on_validation(
self):
"""Ensure that validation checks raise exception when an invalid
algorithm_id is supplied.
"""
job_id = '123'
algorithm_id = 123
classifier_data = 'simple classifier'
job_result = training_job_result_domain.TrainingJobResult(
job_id, algorithm_id, classifier_data)
with self.assertRaisesRegexp(
Exception, 'Expected algorithm id of basestring type'):
job_result.validate()
def test_job_data_with_invalid_classifier_data_raises_exception_on_validate(
self):
"""Ensure that validation checks raise exception when an invalid
classifier_data is supplied.
"""
job_id = 'job_id'
algorithm_id = 'TextClassifier'
classifier_data = 'simple classifier'
job_result = training_job_result_domain.TrainingJobResult(
job_id, algorithm_id, classifier_data)
with self.assertRaisesRegexp(
Exception,
'Expected classifier data of type TextClassifier'):
job_result.validate()
def test_that_all_algorithms_have_job_result_information(self):
"""Test that all algorithms have properties to identify name and type
of attribute in job result proto which stores classifier data for that
algorithm.
"""
job_result_proto = (
training_job_response_payload_pb2.
TrainingJobResponsePayload.JobResult())
for classifier in algorithm_registry.Registry.get_all_classifiers():
self.assertIsNotNone(classifier.name_in_job_result_proto)
attribute_type_name = type(getattr(
job_result_proto, classifier.name_in_job_result_proto)).__name__
self.assertEqual(
attribute_type_name, classifier.type_in_job_result_proto)
def test_that_training_job_result_proto_is_generated_with_correct_details(
self):
"""Ensure that the JobResult proto is correctly generated from
TrainingJobResult domain object.
"""
classifier_data = text_classifier_pb2.TextClassifierFrozenModel()
classifier_data.model_json = 'dummy model'
job_id = 'job_id'
algorithm_id = 'TextClassifier'
classifier = algorithm_registry.Registry.get_classifier_by_algorithm_id(
algorithm_id)
job_result = training_job_result_domain.TrainingJobResult(
job_id, algorithm_id, classifier_data)
job_result_proto = job_result.to_proto()
# Lint test for no-member needs to be disabled as protobuf generated
# classes are metaclasses and hence their attributes are defined at
# runtime.
self.assertEqual(job_result_proto.job_id, job_id) # pylint: disable=no-member
self.assertEqual(
job_result_proto.WhichOneof('classifier_frozen_model'),
classifier.name_in_job_result_proto)
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,349 | oppia/oppia-ml | refs/heads/develop | /core/classifiers/TextClassifier/TextClassifier.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classifier for free-form text answers."""
import json
import logging
import time
from sklearn import model_selection
from sklearn import svm
from sklearn.feature_extraction.text import CountVectorizer
from core.classifiers import base
from core.classifiers import classifier_utils
from core.domain.proto import text_classifier_pb2
class TextClassifier(base.BaseClassifier):
"""A classifier that uses supervised learning to match free-form text
answers to answer groups. The classifier trains on answers that exploration
editors have assigned to an answer group. This classifier uses scikit's
Support Vector Classifier (SVC) to obtain the best model using the linear
kernel.
"""
def __init__(self):
super(TextClassifier, self).__init__()
# sklearn.svm.SVC classifier object.
self.best_clf = None
# sklearn.feature_extraction.text.CountVectorizer object. It fits
# text into a feature vector made up of word counts.
self.count_vector = None
# A dict representing the best parameters for the
# sklearn.svm.SVC classifier.
self.best_params = None
# The f1 score of the best classifier found with GridSearch.
self.best_score = None
# Time taken to train the classifier
self.exec_time = None
@property
def algorithm_version(self):
"""The version of the TextClassifier algorithm.
The algorithm_version should be incremented every time the classifier
algorithm used for training is updated. This change is non-backward
compatible i.e. models trained on older algorithm have to be retrained
on new algorithm.
"""
return 1
@property
def name_in_job_result_proto(self):
return 'text_classifier'
@property
def type_in_job_result_proto(self):
return '%sFrozenModel' % (self.__class__.__name__)
def train(self, training_data):
"""Trains classifier using given training_data.
Args:
training_data: list(dict). The training data that is used for
training the classifier. The list contains dicts where each
dict represents a single training data group, for example:
training_data = [
{
'answer_group_index': 1,
'answers': ['a1', 'a2']
},
{
'answer_group_index': 2,
'answers': ['a2', 'a3']
}
]
"""
x = []
y = []
start = time.time()
for answer_group in training_data:
for answer in answer_group['answers']:
x.append(answer)
y.append(answer_group['answer_group_index'])
count_vector = CountVectorizer()
# Learn a vocabulary dictionary of all tokens in the raw documents.
count_vector.fit(x)
# Transform document to document-term matrix
transformed_vector = count_vector.transform(x)
# Set the range of parameters for the exhaustive grid search.
param_grid = [{
u'C': [0.5, 1, 10, 50, 100],
u'kernel': [u'linear']
}]
clf = model_selection.GridSearchCV(
svm.SVC(probability=True), param_grid, scoring='f1_weighted',
n_jobs=-1)
clf.fit(transformed_vector, y)
end = time.time()
logging.info(
'The best score for GridSearch=%s', clf.best_score_)
logging.info(
'train() spent %f seconds for %d instances', end-start, len(x))
self.best_params = clf.best_params_
self.best_clf = clf.best_estimator_
self.best_score = clf.best_score_
self.count_vector = count_vector
self.exec_time = end-start
def to_proto(self):
"""Returns a protobuf of the frozen model.
Returns:
TextClassifierFrozenModel. A protobuf object of the frozen model
which stores parameters of the trained model. This data is used for
prediction.
"""
classifier_data = {
u'SVM': classifier_utils.extract_svm_parameters(self.best_clf),
u'cv_vocabulary': self.count_vector.__dict__['vocabulary_'],
u'best_params': self.best_params,
u'best_score': self.best_score
}
stringified_classifier_data = json.dumps(classifier_data)
text_classifier_proto = text_classifier_pb2.TextClassifierFrozenModel()
text_classifier_proto.model_json = stringified_classifier_data
return text_classifier_proto
# pylint: disable=too-many-branches
def validate(self, frozen_model_proto):
"""Validates frozen model of trained classifier.
Args:
frozen_model_proto: TextClassifierFrozenModel. An object consisting
of trained classifier model parameters.
"""
stringified_classifier_data = frozen_model_proto.model_json
classifier_data = json.loads(stringified_classifier_data)
allowed_top_level_keys = [u'SVM', u'cv_vocabulary', u'best_params',
u'best_score']
allowed_best_params_keys = [u'kernel', u'C']
allowed_svm_kernel_params_keys = [u'kernel', u'gamma', u'coef0',
u'degree']
allowed_svm_keys = [u'n_support', u'dual_coef', u'support_vectors',
u'intercept', u'classes', u'kernel_params',
u'probA', u'probB']
for key in allowed_top_level_keys:
if key not in classifier_data:
raise Exception(
'\'%s\' key not found in classifier_data.' % key)
if key != u'best_score':
if not isinstance(classifier_data[key], dict):
raise Exception(
'Expected \'%s\' to be dict but found \'%s\'.'
% (key, type(classifier_data[key])))
else:
if not isinstance(classifier_data[key], float):
raise Exception(
'Expected \'%s\' to be float but found \'%s\'.'
% (key, type(classifier_data[key])))
for key in allowed_best_params_keys:
if key not in classifier_data[u'best_params']:
raise Exception(
'\'%s\' key not found in \'best_params\''
' in classifier_data.' % key)
for key in allowed_svm_keys:
if key not in classifier_data[u'SVM']:
raise Exception(
'\'%s\' key not found in \'SVM\''
' in classifier_data.' % key)
for key in allowed_svm_kernel_params_keys:
if key not in classifier_data[u'SVM'][u'kernel_params']:
raise Exception(
'\'%s\' key not found in \'kernel_params\''
' in classifier_data.' % key)
if not isinstance(classifier_data[u'best_params'][u'C'], float):
raise Exception(
'Expected \'C\' to be a float but found \'%s\'' %
type(classifier_data[u'best_params'][u'C']))
if not isinstance(classifier_data[u'best_params'][u'kernel'],
basestring):
raise Exception(
'Expected \'kernel\' to be a string but found \'%s\'' %
type(classifier_data[u'best_params'][u'kernel']))
# Validate that all the strings in classifier data are of unicode type.
classifier_utils.unicode_validator_for_classifier_data(classifier_data)
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,350 | oppia/oppia-ml | refs/heads/develop | /vmconf.py | # Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stores various configuration options and constants for Oppia-ml."""
import os
# The platform for the storage backend. This is used in the model-switching
# code in core/platform.
PLATFORM = 'gce'
# Whether we should serve the development or production experience.
# DEV_MODE should only be changed to False in the production environment.
DEV_MODE = True
# Default communication URL to be used to communicate with Oppia server.
# This URL will be used in local development.
DEFAULT_COMMUNICATION_URL = 'http://localhost'
# Default communication PORT to be used to communicate with Oppia server.
# This URL will be used in local development.
DEFAULT_COMMUNICATION_PORT = 8181
# Communication URL which is to be used in production environment to
# communicate with Oppia server.
SERVER_COMMUNICATION_URL = 'https://www.oppia.org'
# Communication PORT which is to be used in production environment to
# communicate with Oppia server.
SERVER_COMMUNICATION_PORT = 443
# Default ID which is to be used in local development environment.
DEFAULT_VM_ID = 'vm_default'
# Default shared key which is to be used in local development environment.
DEFAULT_VM_SHARED_SECRET = '1a2b3c4e'
# Name of metadata parameter which will contain ID of the VM in production
# environment in GCE.
METADATA_VM_ID_PARAM_NAME = 'vm_id'
# Name of metadata parameter which will contain shared secret of the VM in
# production environment in GCE.
METADATA_SHARED_SECRET_PARAM_NAME = 'shared_secret_key'
# Handler URL of Oppia which is used to retrieve jobs.
FETCH_NEXT_JOB_REQUEST_HANDLER = 'ml/nextjobhandler'
# Handler URL of Oppia which is used to store job result.
STORE_TRAINED_CLASSIFIER_MODEL_HANDLER = 'ml/trainedclassifierhandler'
# Algorithm IDs of different classifier algorithms. These IDs are used to obtain
# instance of classifier algorithm using algorithm_registry.
# Note: we need same IDs in Oppia as well.
ALGORITHM_IDS = ['TextClassifier']
# Path of the directory which stores classifiers.
CLASSIFIERS_DIR = os.path.join('core', 'classifiers')
# Path of directory which stores datasets for testing.
DATASETS_DIR = os.path.join('core', 'tests', 'datasets')
# Path of directory which stores pretrained classifier models for each
# classifier.
PRETRAINED_MODELS_PATH = os.path.join('core', 'tests', 'models')
# Wait for fixed amount of time when there are no pending job requests.
FIXED_TIME_WAITING = 'fixed_time_wait'
# Seconds to wait in case of fixed time waiting approach.
FIXED_TIME_WAITING_PERIOD = 60
# Default waiting method to be used when there are no pending job requests.
DEFAULT_WAITING_METHOD = FIXED_TIME_WAITING
# Prefix for data sent from Oppia to the Oppia-ml via JSON.
XSSI_PREFIX = ')]}\'\n'
# The regular expression used to identify whether a string contains float value.
# The regex must match with regex that is stored in feconf.py file of Oppia.
# If this regex needs to be modified then first of all shutdown Oppia-ml VM.
# Then update the regex constant in here and Oppia both.
# Run any migration job that is required to migrate existing trained models
# before starting Oppia-ml again.
FLOAT_VERIFIER_REGEX = (
'^([-+]?\\d*\\.\\d+)$|^([-+]?(\\d*\\.?\\d+|\\d+\\.?\\d*)e[-+]?\\d*)$')
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,351 | oppia/oppia-ml | refs/heads/develop | /core/domain/remote_access_services_test.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for remote access services."""
from core.domain import remote_access_services
from core.domain import training_job_result_domain
from core.domain.proto import text_classifier_pb2
from core.domain.proto import training_job_response_payload_pb2
from core.tests import test_utils
import vmconf
class RemoteAccessServicesTests(test_utils.GenericTestBase):
"""Tests for remote accessing methods."""
def test_that_generate_signature_works_correctly(self):
"""Test that generate signature function is working as expected."""
with self.swap(vmconf, 'DEFAULT_VM_SHARED_SECRET', '1a2b3c4e'):
message = 'vm_default'
vm_id = 'vm_default'
signature = remote_access_services.generate_signature(
message.encode(encoding='utf-8'),
vm_id.encode(encoding='utf-8'))
expected_signature = (
'740ed25befc87674a82844db7769436edb7d21c29d1c9cc87d7a1f3fdefe3610')
self.assertEqual(signature, expected_signature)
def test_next_job_gets_fetched(self):
"""Test that next job is fetched correctly."""
# Callback for post request.
@self.callback
def post_callback(request):
"""Callback for post request."""
self.assertIn('vm_id', request.payload.keys())
self.assertIn('message', request.payload.keys())
self.assertIn('signature', request.payload.keys())
job_data = {
'job_id': '1',
'algorithm_id': 'ab',
'training_data': {},
'algorithm_version': 1
}
return job_data
with self.set_job_request_post_callback(post_callback):
resp = remote_access_services.fetch_next_job_request()
self.assertIn('job_id', resp.keys())
self.assertIn('algorithm_id', resp.keys())
self.assertIn('training_data', resp.keys())
self.assertIn('algorithm_version', resp.keys())
self.assertEqual(resp['job_id'], '1')
self.assertEqual(resp['algorithm_id'], 'ab')
self.assertEqual(resp['algorithm_version'], 1)
self.assertDictEqual(resp['training_data'], {})
def test_result_gets_stored_correctly(self):
"""Test that correct results are stored."""
frozen_model_proto = text_classifier_pb2.TextClassifierFrozenModel()
frozen_model_proto.model_json = 'model_json'
algorithm_id = 'TextClassifier'
job_id = '123'
job_result = training_job_result_domain.TrainingJobResult(
job_id, algorithm_id, frozen_model_proto)
# Callback for post request.
@self.callback
def post_callback(request):
"""Callback for post request."""
payload = (
training_job_response_payload_pb2.TrainingJobResponsePayload())
payload.ParseFromString(request.body)
self.assertEqual(payload.job_result.job_id, '123')
self.assertEqual(
payload.job_result.WhichOneof('classifier_frozen_model'),
'text_classifier')
self.assertEqual(
payload.job_result.text_classifier.model_json, 'model_json')
with self.set_job_result_post_callback(post_callback):
status = remote_access_services.store_trained_classifier_model(
job_result)
self.assertEqual(status, 200)
def test_that_job_result_is_validated_before_storing(self):
"""Ensure that JobResult domain object is validated before proceeding
to store it.
"""
frozen_model_proto = text_classifier_pb2.TextClassifierFrozenModel()
frozen_model_proto.model_json = 'model_json'
algorithm_id = 'TextClassifier'
job_id = '123'
job_result = training_job_result_domain.TrainingJobResult(
job_id, algorithm_id, frozen_model_proto)
check_valid_call = {'validate_has_been_called': False}
def validate_check():
"""Assert that validate function is called."""
check_valid_call['validate_has_been_called'] = True
@self.callback
def post_callback(request): # pylint: disable=unused-argument
"""Callback for post request."""
return
with self.set_job_result_post_callback(post_callback):
with self.swap(job_result, 'validate', validate_check):
remote_access_services.store_trained_classifier_model(
job_result)
self.assertTrue(check_valid_call['validate_has_been_called'])
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,352 | oppia/oppia-ml | refs/heads/develop | /vm_config.py | # Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for configuration of python environment."""
import logging
import os
import sys
# Root path of the repo.
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
MANIFEST_FILE_PATH = os.path.join(ROOT_PATH, 'manifest.txt')
# Third-party library paths.
with open(MANIFEST_FILE_PATH, 'r') as f:
THIRD_PARTY_LIB_PATHS = [
os.path.join(
ROOT_PATH, 'third_party',
'%s-%s' % (line.split()[0], line.split()[1])
) for line in [x.strip() for x in f.readlines()]
if line and not line.startswith('#')
]
def configure():
"""This function configures python environment."""
log_format = ('%(levelname)s\t%(asctime)s %(module)s:%(lineno)d]'
' %(name)s: "%(message)s"')
logging.basicConfig(format=log_format, level=logging.INFO)
try:
_fix_third_party_lib_paths()
except Exception as e: # pylint: disable=broad-except
logging.info('Failed to configure VM: \'%s\'', e.message)
def _fix_third_party_lib_paths():
"""Fixes third party libraries path in python environment."""
for lib_path in THIRD_PARTY_LIB_PATHS:
if not os.path.isdir(lib_path):
raise Exception(
'Invalid path for third_party library: %s' % lib_path)
sys.path.insert(0, lib_path)
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,353 | oppia/oppia-ml | refs/heads/develop | /core/classifiers/classifier_utils_test.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utility functions defined in classifier_utils."""
import json
import os
import re
from core.classifiers import algorithm_registry
from core.classifiers import classifier_utils
from core.tests import test_utils
import vmconf
import numpy as np
from sklearn import svm
class ClassifierUtilsTest(test_utils.GenericTestBase):
"""Tests for utility functions."""
def setUp(self):
super(ClassifierUtilsTest, self).setUp()
# Example training dataset.
self.data = np.array([[1, 0], [0, 1], [1, 1], [0, 0]])
self.labels = np.array([1, 1, 0, 0])
def test_that_svc_parameters_are_extracted_correctly(self):
"""Test that SVC classifier's parameters are extracted correctly."""
clf = svm.SVC()
# Train the model.
clf.fit(self.data, self.labels)
data = classifier_utils.extract_svm_parameters(clf)
expected_keys = [u'n_support', u'support_vectors', u'dual_coef',
u'intercept', u'classes', u'kernel_params', u'probA',
u'probB']
self.assertListEqual(sorted(expected_keys), sorted(data.keys()))
# Make sure that all of the values are of serializable type.
self.assertEqual(type(data[u'n_support']), list)
self.assertEqual(type(data[u'support_vectors']), list)
self.assertEqual(type(data[u'dual_coef']), list)
self.assertEqual(type(data[u'intercept']), list)
self.assertEqual(type(data[u'classes']), list)
self.assertEqual(type(data[u'probA']), list)
self.assertEqual(type(data[u'probB']), list)
self.assertEqual(type(data[u'kernel_params']), dict)
self.assertEqual(type(data[u'kernel_params'][u'kernel']), unicode)
self.assertEqual(type(data[u'kernel_params'][u'gamma']), float)
self.assertEqual(type(data[u'kernel_params'][u'degree']), int)
self.assertEqual(type(data[u'kernel_params'][u'coef0']), float)
def check_that_unicode_validator_works_as_expected(self):
"""Make sure that unicode validator function works as expected."""
test_dict = {
'a': u'b',
u'c': {
u'abc': 20,
u'cdf': [u'j', u'k']
},
u'x': [{u'm': u'n'}, {u'e': u'f'}]
}
with self.assertRaisesRegexp(
Exception, 'Expected \'a\' to be unicode but found str.'):
classifier_utils.unicode_validator_for_classifier_data(test_dict)
test_dict = {
u'a': 'b',
u'c': {
u'abc': 20,
u'cdf': [u'j', u'k']
},
u'x': [{u'm': u'n'}, {u'e': u'f'}]
}
with self.assertRaisesRegexp(
Exception, 'Expected \'b\' to be unicode but found str.'):
classifier_utils.unicode_validator_for_classifier_data(test_dict)
test_dict = {
u'a': u'b',
u'c': {
'abc': 20,
u'cdf': [u'j', u'k']
},
u'x': [{u'm': u'n'}, {u'e': u'f'}]
}
with self.assertRaisesRegexp(
Exception, 'Expected \'abc\' to be unicode but found str.'):
classifier_utils.unicode_validator_for_classifier_data(test_dict)
test_dict = {
u'a': u'b',
u'c': {
u'abc': 20,
u'cdf': ['j', u'k']
},
u'x': [{u'm': u'n'}, {u'e': u'f'}]
}
with self.assertRaisesRegexp(
Exception, 'Expected \'j\' to be unicode but found str.'):
classifier_utils.unicode_validator_for_classifier_data(test_dict)
test_dict = {
u'a': u'b',
u'c': {
u'abc': 20,
u'cdf': [u'j', u'k']
},
u'x': [{'m': u'n'}, {u'e': u'f'}]
}
with self.assertRaisesRegexp(
Exception, 'Expected \'m\' to be unicode but found str.'):
classifier_utils.unicode_validator_for_classifier_data(test_dict)
test_dict = {
u'a': u'b',
u'c': {
u'abc': 20,
u'cdf': [u'j', u'k']
},
u'x': [{u'm': u'n'}, {u'e': 'f'}]
}
with self.assertRaisesRegexp(
Exception, 'Expected \'f\' to be unicode but found str.'):
classifier_utils.unicode_validator_for_classifier_data(test_dict)
def test_that_float_verifier_regex_works_correctly(self):
"""Test that float verifier regex correctly identifies float values."""
test_list = [
'0.123', '+0.123', '-0.123', '.123', '+.123', '-.123',
'0000003.1']
for test in test_list:
self.assertEqual(
re.match(vmconf.FLOAT_VERIFIER_REGEX, test).groups()[0], test)
test_list = [
'3e10', '3e-10', '3e+10', '+3e10', '-3e10', '+3e+10', '+3e-10',
'-3e-10', '-3e+10', '0.3e10', '-0.3e10', '+0.3e10', '-0.3e-10',
'-0.3e+10', '+0.3e+10', '+0.3e-10', '.3e+10', '-.3e10']
for test in test_list:
self.assertEqual(
re.match(vmconf.FLOAT_VERIFIER_REGEX, test).groups()[1], test)
test_list = ['123', '0000', '123.']
for test in test_list:
self.assertIsNone(re.match(vmconf.FLOAT_VERIFIER_REGEX, test))
def test_encode_floats_in_classifier_data(self):
"""Make sure that all values are converted correctly."""
test_dict = {
'x': ['123', 'abc', 0.123]
}
expected_dict = {
'x': ['123', 'abc', '0.123'],
}
output_dict = (
classifier_utils.encode_floats_in_classifier_data(
test_dict))
self.assertDictEqual(expected_dict, output_dict)
test_dict = {
'x': '-0.123'
}
with self.assertRaisesRegexp(
Exception,
'Float values should not be stored as strings.'):
classifier_utils.encode_floats_in_classifier_data(
test_dict)
test_dict = {
'x': ['+0.123', 0.456]
}
with self.assertRaisesRegexp(
Exception,
'Float values should not be stored as strings.'):
classifier_utils.encode_floats_in_classifier_data(
test_dict)
test_dict = {
'a': {
'ab': 'abcd',
'ad': {
'ada': 'abcdef',
'adc': [{
'adca': 'abcd',
'adcb': 0.1234,
'adcc': ['ade', 'afd']
}]
},
'ae': [['123', 0.123], ['abc']],
},
'b': {
'bd': [-2.48521656693, -2.48521656693, -2.48521656693],
'bg': ['abc', 'def', 'ghi'],
'bh': ['abc', '123'],
},
'c': 1.123432,
}
expected_dict = {
'a': {
'ab': 'abcd',
'ad': {
'ada': 'abcdef',
'adc': [{
'adca': 'abcd',
'adcb': '0.1234',
'adcc': ['ade', 'afd'],
}],
},
'ae': [['123', '0.123'], ['abc']],
},
'b': {
'bd': ['-2.48521656693', '-2.48521656693', '-2.48521656693'],
'bg': ['abc', 'def', 'ghi'],
'bh': ['abc', '123'],
},
'c': '1.123432',
}
output_dict = (
classifier_utils.encode_floats_in_classifier_data(
test_dict))
self.assertDictEqual(expected_dict, output_dict)
def test_that_pretrained_models_for_all_classifier_are_correct(self):
"""Make sure that trained classifier models generated in the output
by each classifier do not raise Exception when passed through
encode_floats_in_classifier_data function."""
classifier_ids = (
algorithm_registry.Registry.get_all_classifier_algorithm_ids())
for classifier_id in classifier_ids:
file_path = os.path.join(
vmconf.PRETRAINED_MODELS_PATH, '%s.json' % classifier_id)
with open(file_path, 'r') as f:
classifier_data = json.loads(f.read())
output_dict = classifier_utils.encode_floats_in_classifier_data(
classifier_data)
self.assertIsInstance(output_dict, dict)
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,354 | oppia/oppia-ml | refs/heads/develop | /core/domain/job_services_test.py | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for remote access services."""
import json
import os
from core.domain import job_services
from core.domain.proto import text_classifier_pb2
from core.domain.proto import training_job_response_payload_pb2
from core.tests import test_utils
import vmconf
def _load_training_data():
file_path = os.path.join(
vmconf.DATASETS_DIR, 'string_classifier_data.json')
with open(file_path, 'r') as f:
training_data = json.loads(f.read())
return training_data
class JobServicesTests(test_utils.GenericTestBase):
"""Tests for job service function ."""
def _get_post_callback_for_next_job(self, job_data):
def post_callback(request):
"""Callback for post request."""
self.assertIn('vm_id', request.payload.keys())
self.assertIn('message', request.payload.keys())
self.assertIn('signature', request.payload.keys())
return job_data
return self.callback(post_callback)
def test_next_job_is_fetched_correctly(self):
"""Test that next job is fetched correctly."""
job_data = {
'job_id': '1',
'algorithm_id': 'TextClassifier',
'training_data': [],
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
job_data = job_services.get_next_job()
self.assertIn('job_id', job_data.keys())
self.assertIn('algorithm_id', job_data.keys())
self.assertIn('training_data', job_data.keys())
self.assertIn('algorithm_version', job_data.keys())
self.assertEqual(job_data['job_id'], '1')
self.assertEqual(job_data['algorithm_id'], 'TextClassifier')
self.assertEqual(job_data['algorithm_version'], 1)
self.assertEqual(job_data['training_data'], [])
def test_validate_with_valid_job_data_raises_no_exceptions(self):
"""Test that validate for fetched job works as expected."""
job_data = {
'job_id': '1',
'algorithm_id': 'TextClassifier',
'training_data': [],
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
job_services.get_next_job()
def test_validate_with_incorrect_job_data_type_raises_exception(self):
"""Test that job validation raises exception if job_id is missing
in the received job data.
"""
job_data = 'a simple job'
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception, 'Invalid format of job data'):
job_services.get_next_job()
def test_validate_with_missing_job_id_raises_exception(self):
"""Test that job validation raises exception if job_id is missing
in the received job data.
"""
job_data = {
'algorithm_id': 'TextClassifier',
'training_data': [],
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception, 'job data should contain job id'):
job_services.get_next_job()
def test_validate_with_missing_algorithm_id_raises_exception(self):
"""Test that job validation raises exception if algorithm_id is missing
in the received job data.
"""
job_data = {
'job_id': '1',
'training_data': [],
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception, 'job data should contain algorithm id'):
job_services.get_next_job()
def test_validate_with_missing_training_data_raises_exception(self):
"""Test that job validation raises exception if training_data is missing
in the received job data.
"""
job_data = {
'job_id': '1',
'algorithm_id': 'TextClassifier',
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception, 'job data should contain training data'):
job_services.get_next_job()
def test_validate_with_missing_algorithm_version_raises_exception(self):
"""Test that job validation raises exception if algorithm_version is
missing in the received job data.
"""
job_data = {
'job_id': '1',
'algorithm_id': 'TextClassifier',
'training_data': []
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception, 'job data should contain algorithm version'):
job_services.get_next_job()
def test_validate_with_invalid_job_id_raises_exception(self):
"""Test that job validation raises exception if job_id is invalid
in the received job data.
"""
# Callback for post request.
job_data = {
'job_id': 123,
'algorithm_id': 'TextClassifier',
'training_data': [],
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception, 'Expected job id to be unicode'):
job_services.get_next_job()
def test_validate_with_invalid_algorithm_id_raises_exception(self):
"""Test that job validation raises exception if algorithm_id is invalid
in the received job data.
"""
job_data = {
'job_id': '1',
'algorithm_id': 123,
'training_data': [],
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception, 'Expected algorithm id to be unicode'):
job_services.get_next_job()
def test_validate_with_invalid_training_data_raises_exception(self):
"""Test that job validation raises exception if training_data is not
a dict in the received job data.
"""
job_data = {
'job_id': '1',
'algorithm_id': 'TextClassifier',
'training_data': 'data',
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception, 'Expected training data to be a list'):
job_services.get_next_job()
def test_validate_training_data_without_answer_group_index_raises_exception(
self):
"""Test that job validation raises exception if training_data doesn't
contain 'answer_group_index' key.
"""
job_data = {
'job_id': '1',
'algorithm_id': 'TextClassifier',
'training_data': [{'answers': ['a', 'b', 'c']}],
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception,
'Expected answer_group_index to be a key in training_data'):
job_services.get_next_job()
def test_validate_training_data_without_answers_raises_exception(self):
"""Test that job validation raises exception if training_data doesn't
contain answers key.
"""
job_data = {
'job_id': '1',
'algorithm_id': 'TextClassifier',
'training_data': [{'answer_group_index': 1}],
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception,
'Expected answers to be a key in training_data'):
job_services.get_next_job()
def test_validate_training_data_invalid_answer_group_idx_raises_exception(
self):
"""Test that job validation raises exception if training_data contains
incorrect answer_group_index.
"""
job_data = {
'job_id': '1',
'algorithm_id': 'TextClassifier',
'training_data': [{
'answer_group_index': '1',
'answers': ['a', 'b', 'c']}],
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception,
'Expected answer_group_index to be an int'):
job_services.get_next_job()
def test_validate_training_data_with_invalid_answers_exception(self):
"""Test that job validation raises exception if training_data contains
incorrect answers.
"""
job_data = {
'job_id': '1',
'algorithm_id': 'TextClassifier',
'training_data': [{
'answer_group_index': 1,
'answers': 'answer1'}],
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception,
'Expected answers to be a list'):
job_services.get_next_job()
def test_validate_with_invalid_algorithm_version_raises_exception(self):
"""Test that job validation raises exception if algorithm_version is
invalid in the received job data.
"""
job_data = {
'job_id': '1',
'algorithm_id': 'TextClassifier',
'training_data': [],
'algorithm_version': '123'
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception, 'Expected algorithm version to be integer'):
job_services.get_next_job()
def test_validate_with_incorrect_algorithm_id_raises_exception(self):
"""Test that job validation raises exception if algorithm_id is
unknown in the received job data.
"""
job_data = {
'job_id': '1',
'algorithm_id': 'ab',
'training_data': [],
'algorithm_version': 1
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception, 'Invalid algorithm id ab'):
job_services.get_next_job()
def test_validate_with_incorrect_algorithm_version_raises_exception(self):
"""Test that job validation raises exception if algorithm_version is
unknown in the received job data.
"""
job_data = {
'job_id': '1',
'algorithm_id': 'TextClassifier',
'training_data': [],
'algorithm_version': 1000
}
post_callback = self._get_post_callback_for_next_job(job_data)
with self.set_job_request_post_callback(post_callback):
with self.assertRaisesRegexp(
Exception,
'Classifier version 1 mismatches algorithm version 1000 '
'received in job data'):
job_services.get_next_job()
def test_result_gets_stored_correctly(self):
"""Test that correct results are stored."""
frozen_model_proto = text_classifier_pb2.TextClassifierFrozenModel()
frozen_model_proto.model_json = 'model_json'
algorithm_id = 'TextClassifier'
job_id = '123'
# Callback for post request.
@self.callback
def post_callback(request):
"""Callback for post request."""
payload = (
training_job_response_payload_pb2.TrainingJobResponsePayload())
payload.ParseFromString(request.body)
self.assertEqual(payload.job_result.job_id, '123')
self.assertEqual(
payload.job_result.WhichOneof('classifier_frozen_model'),
'text_classifier')
self.assertEqual(
payload.job_result.text_classifier.model_json, 'model_json')
with self.set_job_result_post_callback(post_callback):
status = job_services.store_job_result(
job_id, algorithm_id, frozen_model_proto)
self.assertEqual(status, 200)
def test_train_classifier(self):
"""Ensure that train classifier trains classifier."""
training_data = _load_training_data()
frozen_model = job_services.train_classifier(
'TextClassifier', training_data)
self.assertTrue(isinstance(
frozen_model, text_classifier_pb2.TextClassifierFrozenModel))
def test_train_classifier_returns_none_on_invalid_algorithm_versio(self):
"""Ensure that train classifier trains classifier."""
training_data = _load_training_data()
frozen_model = job_services.train_classifier(
'TextClassifier', training_data)
self.assertTrue(isinstance(
frozen_model, text_classifier_pb2.TextClassifierFrozenModel))
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,355 | oppia/oppia-ml | refs/heads/develop | /core/platform/platform_services.py | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for platform services switching."""
import vmconf
class _Gce(object):
"""Provides platform-specific imports related to GCE
(Google Compute Engine).
"""
@classmethod
def import_metadata_services(cls):
"""Imports and returns gce_metadata_services module.
Returns:
module. The gce_metadata_services module.
"""
from core.platform.metadata import gce_metadata_services
return gce_metadata_services
NAME = 'gce'
class Registry(object):
"""Platform-agnostic interface for retrieving platform-specific modules.
"""
# Maps platform names to the corresponding module registry classes.
_PLATFORM_MAPPING = {
_Gce.NAME: _Gce,
}
@classmethod
def _get(cls):
"""Returns the appropriate interface class for platform-specific
imports.
Returns:
class: The corresponding platform-specific interface class.
"""
return cls._PLATFORM_MAPPING.get(vmconf.PLATFORM)
@classmethod
def import_metadata_services(cls):
"""Imports and returns metadata_services module.
Returns:
module. The metadata_services module.
"""
return cls._get().import_metadata_services()
| {"/core/classifiers/algorithm_registry.py": ["/vmconf.py"], "/utils.py": ["/vmconf.py"], "/main.py": ["/vm_config.py", "/vmconf.py"], "/core/tests/test_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services.py": ["/utils.py", "/vmconf.py"], "/core/classifiers/classifier_utils.py": ["/vmconf.py"], "/core/domain/remote_access_services_test.py": ["/vmconf.py"], "/core/classifiers/classifier_utils_test.py": ["/vmconf.py"], "/core/domain/job_services_test.py": ["/vmconf.py"], "/core/platform/platform_services.py": ["/vmconf.py"]} |
75,363 | europelee/growth-studio | refs/heads/master | /blog/api.py | from django.contrib.auth.models import User
from rest_framework import permissions
from rest_framework import serializers, viewsets
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import BasePermission
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from blog.models import Blog
SAFE_METHODS = ['GET', 'HEAD', 'OPTIONS']
class IsAuthenticatedOrReadOnly(BasePermission):
def has_permission(self, request, view):
if (request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated()):
return True
return False
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email')
class BlogSerializer(serializers.ModelSerializer):
author = User
class Meta:
model = Blog
fields = ('title', 'author', 'body', 'slug', 'id')
class StandardResultsSetPagination(PageNumberPagination):
page_size = 10
page_size_query_param = 'page_size'
max_page_size = 10
class BlogSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
serializer_class = BlogSerializer
queryset = Blog.objects.all()
pagination_class = StandardResultsSetPagination
def get_queryset(self):
queryset = Blog.objects.all()
title = self.request.query_params.get('title', None)
if title is not None:
queryset = queryset.filter(title__contains=title)
return queryset
class UserDetail(viewsets.ReadOnlyModelViewSet):
authentication_classes = [JSONWebTokenAuthentication, BasicAuthentication, SessionAuthentication]
permission_classes = (permissions.IsAuthenticated,)
queryset = User.objects.all()
serializer_class = UserSerializer
| {"/blog/api.py": ["/blog/models.py"], "/growth_studio/urls.py": ["/blog/api.py", "/blog/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/schema.py": ["/blog/models.py"]} |
75,364 | europelee/growth-studio | refs/heads/master | /growth_studio/urls.py | """growth_studio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import TemplateView
from graphene_django.views import GraphQLView
from blog.api import BlogSet, UserDetail
from blog.views import blog_list, blog_detail
from homepage.views import index as home
from rest_framework import routers
from rest_framework_jwt import views as DRFViews
apiRouter = routers.DefaultRouter()
apiRouter.register(r'blog', BlogSet, 'blog')
apiRouter.register(r'user', UserDetail, 'user')
urlpatterns = [
url(r'^$', home, name='home'),
url(r'^about-us/$', TemplateView.as_view(template_name='pages/about-us.html')),
url(r'^blog/$', blog_list),
url(r'^blog/(?P<slug>[^\.]+).html', blog_detail, name='blog_view'),
url(r'^admin/', admin.site.urls),
url(r'^graphql', GraphQLView.as_view(graphiql=True)),
url('^markdown/', include('django_markdown.urls')),
url(r'^api/', include(apiRouter.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api-token-auth/', DRFViews.obtain_jwt_token),
url(r'^api-token-refresh/', DRFViews.refresh_jwt_token),
url(r'^api-token-verify/', DRFViews.verify_jwt_token),
]
| {"/blog/api.py": ["/blog/models.py"], "/growth_studio/urls.py": ["/blog/api.py", "/blog/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/schema.py": ["/blog/models.py"]} |
75,365 | europelee/growth-studio | refs/heads/master | /e2e/test_homepage.py | from django.test import LiveServerTestCase
from selenium import webdriver
class HomepageTestCase(LiveServerTestCase):
def setUp(self):
self.selenium = webdriver.Chrome()
self.selenium.maximize_window()
super(HomepageTestCase, self).setUp()
def tearDown(self):
self.selenium.quit()
super(HomepageTestCase, self).tearDown()
def test_can_visit_homepage(self):
self.selenium.get(
'%s%s' % (self.live_server_url, "/")
)
self.assertIn("Growth Studio - Enjoy Create & Share", self.selenium.title) | {"/blog/api.py": ["/blog/models.py"], "/growth_studio/urls.py": ["/blog/api.py", "/blog/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/schema.py": ["/blog/models.py"]} |
75,366 | europelee/growth-studio | refs/heads/master | /blog/models.py | # coding=utf-8
from django.contrib.auth.models import User
from django.db import models
from django.db.models import permalink
from django.utils.translation import ugettext_lazy as _
from django_markdown.models import MarkdownField
class Blog(models.Model):
class Meta:
verbose_name = _('博客')
verbose_name_plural = _('博客')
title = models.CharField(max_length=30, unique=True, verbose_name=_('标题'), help_text='博客的标题')
author = models.ForeignKey(User, verbose_name=_('作者'))
slug = models.SlugField(max_length=50, unique=True, verbose_name=_('URL'))
body = MarkdownField(verbose_name=_('正文'))
posted = models.DateField(db_index=True, auto_now_add=True)
def __str__(self):
return '%s' % (self.title)
@permalink
def get_absolute_url(self):
return 'blog_view', None, {'slug': self.slug}
| {"/blog/api.py": ["/blog/models.py"], "/growth_studio/urls.py": ["/blog/api.py", "/blog/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/schema.py": ["/blog/models.py"]} |
75,367 | europelee/growth-studio | refs/heads/master | /ac/test_homepage.py | from unittest import TestCase
from selenium import webdriver
class HomepageTestCase(TestCase):
def setUp(self):
self.selenium = webdriver.Chrome()
self.selenium.maximize_window()
super(HomepageTestCase, self).setUp()
def tearDown(self):
self.selenium.quit()
super(HomepageTestCase, self).tearDown()
def test_can_visit_homepage(self):
self.selenium.get('http://10.211.55.26/')
self.assertIn("Growth Studio - Enjoy Create & Share", self.selenium.title)
def test_can_about_us_page(self):
self.selenium.get('http://10.211.55.26/about-us/')
self.assertIn("关于我们 - Growth Studio", self.selenium.title) | {"/blog/api.py": ["/blog/models.py"], "/growth_studio/urls.py": ["/blog/api.py", "/blog/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/schema.py": ["/blog/models.py"]} |
75,368 | europelee/growth-studio | refs/heads/master | /blog/views.py | from django.shortcuts import render_to_response, get_object_or_404
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from blog.models import Blog
def blog_list(request):
blogs = Blog.objects.all()
paginator = Paginator(blogs, 5)
page = request.GET.get('page')
try:
blogs = paginator.page(page)
except PageNotAnInteger:
blogs = paginator.page(1)
except EmptyPage:
blogs = paginator.page(paginator.num_pages)
return render_to_response('blog/list.html', {
'blogs': blogs
})
def blog_detail(request, slug):
return render_to_response('blog/detail.html', {
'post': get_object_or_404(Blog, slug=slug)
})
| {"/blog/api.py": ["/blog/models.py"], "/growth_studio/urls.py": ["/blog/api.py", "/blog/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/schema.py": ["/blog/models.py"]} |
75,369 | europelee/growth-studio | refs/heads/master | /fabfile.py | import os
from fabric.api import local
from fabric.decorators import task
from fabric.context_managers import settings, hide, cd, prefix
from fabric.operations import sudo, run, put
from fabric.state import env
circus_file_path = os.path.realpath('deploy/circus.ini')
circus_upstart_file_path = os.path.realpath('deploy/circus.conf')
nginx_config_path = os.path.realpath('deploy/nginx')
nginx_avaliable_path = "/etc/nginx/sites-available/"
nginx_enable_path = "/etc/nginx/sites-enabled/"
app_path = "~"
virtual_env_path = "~/py35env/bin/activate"
env.hosts = ['10.211.55.26']
env.user = 'phodal'
env.password = '940217'
@task
def install():
"""Install requirements packages"""
local("pip install -r requirements/dev.txt")
@task
def runserver():
"""Run Server"""
local("./manage.py runserver")
@task
def pep8():
""" Check the project for PEP8 compliance using `pep8` """
with settings(hide('warnings'), warn_only=True):
local('pep8 .')
@task
def tag_version(version):
"""Tag New Version"""
local("git tag %s" % version)
local("git push origin %s" % version)
@task
def fetch_version(version):
"""Fetch Git Version"""
local('wget https://codeload.github.com/phodal/growth_studio/tar.gz/%s' % version)
@task
def test():
""" Run Test """
local("./manage.py test blog")
@task
def e2e():
"""Run E2E Test"""
local("./manage.py test e2e")
@task
def prepare_ac():
with cd('growth-studio'):
with prefix('source ' + virtual_env_path):
run('echo "from django.contrib.auth.models import User; User.objects.create_superuser(%s, %s, %s)" | python manage.py shell' % ("'test'", "'test@phodal.com'", "'test'"))
@task
def ac():
"""Run E2E Test"""
local("./manage.py test ac")
@task
def host_type():
run('uname -a')
@task
def setup():
""" Setup the Ubuntu Env """
sudo('apt-get update')
APT_GET_PACKAGES = [
"build-essential",
"git",
"python3-dev",
"python3-pip",
"nginx",
"virtualenv",
]
sudo("apt-get install -y " + " ".join(APT_GET_PACKAGES))
sudo('pip3 install circus')
sudo('rm ' + nginx_enable_path + 'default')
run('virtualenv --distribute -p /usr/bin/python3.5 py35env')
def nginx_restart():
"Reset nginx"
sudo("service nginx restart")
def nginx_start():
"Start nginx"
sudo("service nginx start")
def nginx_config(nginx_config_path=nginx_config_path):
"Send nginx configuration"
for file_name in os.listdir(nginx_config_path):
put(os.path.join(nginx_config_path, file_name), nginx_avaliable_path, use_sudo=True)
def circus_config():
"Send Circus configuration"
sudo('mkdir -p /etc/circus/')
put(circus_file_path, '/etc/circus/', use_sudo=True)
def circus_upstart_config():
"Send Circus Upstart configuration"
put(circus_upstart_file_path, '/etc/init/', use_sudo=True)
def circus_start():
"Send Circus Upstart configuration"
sudo('/usr/local/bin/circusd /etc/circus/circus.ini --daemon')
sudo('circusctl restart')
def nginx_enable_site(nginx_config_file):
"Enable nginx site"
with cd(nginx_enable_path):
sudo('rm -f ' + nginx_config_file)
sudo('ln -s ' + nginx_avaliable_path + nginx_config_file)
@task
def deploy(version):
""" depoly app to cloud """
with cd(app_path):
get_app(version)
setup_app(version)
config_app()
nginx_config()
nginx_enable_site('growth-studio.conf')
circus_config()
circus_upstart_config()
circus_start()
nginx_restart()
def config_app():
with cd('growth-studio'):
with prefix('source ' + virtual_env_path):
run('python manage.py collectstatic -v0 --noinput')
run('python manage.py migrate')
def setup_app(version):
with prefix('source ' + virtual_env_path):
run('pip3 install -r growth-studio-%s/requirements/prod.txt' % version)
run('rm -f growth-studio')
run('ln -s growth-studio-%s growth-studio' % version)
def get_app(version):
run(('wget ' + 'https://codeload.github.com/phodal/growth_studio/tar.gz/v' + '%s') % version)
run('tar xvf v%s' % version)
| {"/blog/api.py": ["/blog/models.py"], "/growth_studio/urls.py": ["/blog/api.py", "/blog/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/schema.py": ["/blog/models.py"]} |
75,370 | europelee/growth-studio | refs/heads/master | /blog/schema.py | import graphene
from graphene_django import DjangoObjectType
from blog.models import Blog as BlogModel
class Blog(DjangoObjectType):
class Meta:
model = BlogModel
class Query(graphene.ObjectType):
blog = graphene.List(Blog)
@graphene.resolve_only_args
def resolve_users(self):
return BlogModel.objects.all()
schema = graphene.Schema(query=Query)
| {"/blog/api.py": ["/blog/models.py"], "/growth_studio/urls.py": ["/blog/api.py", "/blog/views.py"], "/blog/views.py": ["/blog/models.py"], "/blog/schema.py": ["/blog/models.py"]} |
75,373 | Trinkes/clean-architecture | refs/heads/master | /interface_adapter/topic_api.py | from typing import Tuple
class TopicApi(object):
def get(self, word) -> Tuple[int, str]:
pass
| {"/interface_adapter/api_topic_repository.py": ["/entities/response_status.py", "/entities/topic.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/topic_api.py", "/use_cases/topic_repository.py"], "/use_cases/topic_repository.py": ["/entities/response_status.py", "/entities/topic.py"], "/use_cases/topic_service.py": ["/entities/response_status.py", "/entities/topic.py", "/use_cases/topic_repository.py"], "/frameworks/main.py": ["/frameworks/requests_topic_api.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/api_topic_repository.py", "/use_cases/topic_service.py"], "/frameworks/requests_topic_api.py": ["/interface_adapter/topic_api.py"], "/interface_adapter/api_response_mapper.py": ["/entities/response_status.py", "/entities/topic.py"]} |
75,374 | Trinkes/clean-architecture | refs/heads/master | /interface_adapter/api_topic_repository.py | from typing import Tuple, List
from entities.response_status import ResponseStatus
from entities.topic import Topic
from interface_adapter.api_response_mapper import ApiResponseMapper
from interface_adapter.topic_api import TopicApi
from use_cases.topic_repository import TopicRepository
class ApiTopicRepository(TopicRepository):
def __init__(self, api: TopicApi, mapper: ApiResponseMapper):
self.mapper = mapper
self.api = api
def find_related_topic(self, word) -> Tuple[ResponseStatus, List[Topic]]:
response_code, data = self.api.get(word)
return self.mapper.map(response_code, data)
| {"/interface_adapter/api_topic_repository.py": ["/entities/response_status.py", "/entities/topic.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/topic_api.py", "/use_cases/topic_repository.py"], "/use_cases/topic_repository.py": ["/entities/response_status.py", "/entities/topic.py"], "/use_cases/topic_service.py": ["/entities/response_status.py", "/entities/topic.py", "/use_cases/topic_repository.py"], "/frameworks/main.py": ["/frameworks/requests_topic_api.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/api_topic_repository.py", "/use_cases/topic_service.py"], "/frameworks/requests_topic_api.py": ["/interface_adapter/topic_api.py"], "/interface_adapter/api_response_mapper.py": ["/entities/response_status.py", "/entities/topic.py"]} |
75,375 | Trinkes/clean-architecture | refs/heads/master | /use_cases/topic_repository.py | from typing import Tuple, List
from entities.response_status import ResponseStatus
from entities.topic import Topic
class TopicRepository:
def find_related_topic(self, word) -> Tuple[ResponseStatus, List[Topic]]:
pass
| {"/interface_adapter/api_topic_repository.py": ["/entities/response_status.py", "/entities/topic.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/topic_api.py", "/use_cases/topic_repository.py"], "/use_cases/topic_repository.py": ["/entities/response_status.py", "/entities/topic.py"], "/use_cases/topic_service.py": ["/entities/response_status.py", "/entities/topic.py", "/use_cases/topic_repository.py"], "/frameworks/main.py": ["/frameworks/requests_topic_api.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/api_topic_repository.py", "/use_cases/topic_service.py"], "/frameworks/requests_topic_api.py": ["/interface_adapter/topic_api.py"], "/interface_adapter/api_response_mapper.py": ["/entities/response_status.py", "/entities/topic.py"]} |
75,376 | Trinkes/clean-architecture | refs/heads/master | /use_cases/topic_service.py | from entities.response_status import ResponseStatus
from entities.topic import Topic
from use_cases.topic_repository import TopicRepository
class TopicService:
def __init__(self, api: TopicRepository):
self.api = api
def find_related_topic(self, word: str) -> Topic:
response_status, topics = self.api.find_related_topic(word)
if response_status == ResponseStatus.ERROR:
raise ValueError('Error accessing external data')
for topic in topics:
if topic.title:
return topic
raise ValueError('No related topics found')
| {"/interface_adapter/api_topic_repository.py": ["/entities/response_status.py", "/entities/topic.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/topic_api.py", "/use_cases/topic_repository.py"], "/use_cases/topic_repository.py": ["/entities/response_status.py", "/entities/topic.py"], "/use_cases/topic_service.py": ["/entities/response_status.py", "/entities/topic.py", "/use_cases/topic_repository.py"], "/frameworks/main.py": ["/frameworks/requests_topic_api.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/api_topic_repository.py", "/use_cases/topic_service.py"], "/frameworks/requests_topic_api.py": ["/interface_adapter/topic_api.py"], "/interface_adapter/api_response_mapper.py": ["/entities/response_status.py", "/entities/topic.py"]} |
75,377 | Trinkes/clean-architecture | refs/heads/master | /entities/response_status.py | from enum import Enum
class ResponseStatus(Enum):
SUCCESS = 0
ERROR = 1
| {"/interface_adapter/api_topic_repository.py": ["/entities/response_status.py", "/entities/topic.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/topic_api.py", "/use_cases/topic_repository.py"], "/use_cases/topic_repository.py": ["/entities/response_status.py", "/entities/topic.py"], "/use_cases/topic_service.py": ["/entities/response_status.py", "/entities/topic.py", "/use_cases/topic_repository.py"], "/frameworks/main.py": ["/frameworks/requests_topic_api.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/api_topic_repository.py", "/use_cases/topic_service.py"], "/frameworks/requests_topic_api.py": ["/interface_adapter/topic_api.py"], "/interface_adapter/api_response_mapper.py": ["/entities/response_status.py", "/entities/topic.py"]} |
75,378 | Trinkes/clean-architecture | refs/heads/master | /frameworks/main.py | from frameworks.requests_topic_api import RequestsTopicApi
from interface_adapter.api_response_mapper import ApiResponseMapper
from interface_adapter.api_topic_repository import ApiTopicRepository
from use_cases.topic_service import TopicService
service = TopicService(
ApiTopicRepository(RequestsTopicApi('http://api.duckduckgo.com/'),
ApiResponseMapper()))
topic = service.find_related_topic('java')
print(topic)
| {"/interface_adapter/api_topic_repository.py": ["/entities/response_status.py", "/entities/topic.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/topic_api.py", "/use_cases/topic_repository.py"], "/use_cases/topic_repository.py": ["/entities/response_status.py", "/entities/topic.py"], "/use_cases/topic_service.py": ["/entities/response_status.py", "/entities/topic.py", "/use_cases/topic_repository.py"], "/frameworks/main.py": ["/frameworks/requests_topic_api.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/api_topic_repository.py", "/use_cases/topic_service.py"], "/frameworks/requests_topic_api.py": ["/interface_adapter/topic_api.py"], "/interface_adapter/api_response_mapper.py": ["/entities/response_status.py", "/entities/topic.py"]} |
75,379 | Trinkes/clean-architecture | refs/heads/master | /frameworks/requests_topic_api.py | from typing import Tuple
from urllib.parse import urlencode
import requests
from interface_adapter.topic_api import TopicApi
class RequestsTopicApi(TopicApi):
def __init__(self, base_host: str):
self.base_host = base_host
def get(self, word) -> Tuple[int, str]:
q = word
url = "{}?".format(self.base_host)
url += urlencode({'q': q, 'format': 'json'})
print(url)
response = requests.get(url)
return response.status_code, response.json()
| {"/interface_adapter/api_topic_repository.py": ["/entities/response_status.py", "/entities/topic.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/topic_api.py", "/use_cases/topic_repository.py"], "/use_cases/topic_repository.py": ["/entities/response_status.py", "/entities/topic.py"], "/use_cases/topic_service.py": ["/entities/response_status.py", "/entities/topic.py", "/use_cases/topic_repository.py"], "/frameworks/main.py": ["/frameworks/requests_topic_api.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/api_topic_repository.py", "/use_cases/topic_service.py"], "/frameworks/requests_topic_api.py": ["/interface_adapter/topic_api.py"], "/interface_adapter/api_response_mapper.py": ["/entities/response_status.py", "/entities/topic.py"]} |
75,380 | Trinkes/clean-architecture | refs/heads/master | /interface_adapter/api_response_mapper.py | from typing import List, Tuple
from entities.response_status import ResponseStatus
from entities.topic import Topic
class ApiResponseMapper(object):
def map_topics(self, data: dict) -> List[Topic]:
related_topics = data['RelatedTopics']
if not related_topics:
raise ValueError('No related topics found')
topics = []
for topic in related_topics:
if 'Text' in topic.keys() and topic['Text'] is not None:
topics.append(Topic(topic['Text']))
return topics
def map(self, response_code, data) -> Tuple[ResponseStatus, List[Topic]]:
if response_code == 200:
return ResponseStatus.SUCCESS, self.map_topics(data)
else:
return ResponseStatus.ERROR, []
| {"/interface_adapter/api_topic_repository.py": ["/entities/response_status.py", "/entities/topic.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/topic_api.py", "/use_cases/topic_repository.py"], "/use_cases/topic_repository.py": ["/entities/response_status.py", "/entities/topic.py"], "/use_cases/topic_service.py": ["/entities/response_status.py", "/entities/topic.py", "/use_cases/topic_repository.py"], "/frameworks/main.py": ["/frameworks/requests_topic_api.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/api_topic_repository.py", "/use_cases/topic_service.py"], "/frameworks/requests_topic_api.py": ["/interface_adapter/topic_api.py"], "/interface_adapter/api_response_mapper.py": ["/entities/response_status.py", "/entities/topic.py"]} |
75,381 | Trinkes/clean-architecture | refs/heads/master | /entities/topic.py | from dataclasses import dataclass
@dataclass
class Topic(object):
title: str
| {"/interface_adapter/api_topic_repository.py": ["/entities/response_status.py", "/entities/topic.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/topic_api.py", "/use_cases/topic_repository.py"], "/use_cases/topic_repository.py": ["/entities/response_status.py", "/entities/topic.py"], "/use_cases/topic_service.py": ["/entities/response_status.py", "/entities/topic.py", "/use_cases/topic_repository.py"], "/frameworks/main.py": ["/frameworks/requests_topic_api.py", "/interface_adapter/api_response_mapper.py", "/interface_adapter/api_topic_repository.py", "/use_cases/topic_service.py"], "/frameworks/requests_topic_api.py": ["/interface_adapter/topic_api.py"], "/interface_adapter/api_response_mapper.py": ["/entities/response_status.py", "/entities/topic.py"]} |
75,396 | aspire-fp7/code-guards | refs/heads/master | /generate_attestator.py | #!/usr/bin/python
import argparse
import os
import string
import sys
# Generate the default attestator
def generate_default(output_dir, label, degradation_label):
codeguard_dir = os.path.dirname(sys.argv[0])# The directory that contains the python scripts and the source code that will be injected
attestator_dst = os.path.join(output_dir, 'attestator_' + label + '.c')
attestator_variables_dst = os.path.join(output_dir, 'attestator_variables_' + label + '.c')
if not os.path.exists(attestator_dst):
attestator_src = os.path.join(codeguard_dir, 'attestator.c')
instantiate_template(attestator_src, attestator_dst, label, degradation_label)
if not os.path.exists(attestator_variables_dst):
attestator_variables_src = os.path.join(codeguard_dir, 'attestator_variables.c')
instantiate_template(attestator_variables_src, attestator_variables_dst, label, degradation_label)
# Instantiate the template by filling in the labels in the input file and writing the new output file
def instantiate_template(input_file, output_file, label, degradation_label):
# Open the input and output file (the output file will be overwritten if it already exists)
with open(input_file) as f_in:
with open(output_file, 'w+') as f_out:
for line in f_in:
line = string.replace(line, '##LABEL##', label)
line = string.replace(line, '##DEGRADATION_LABEL##', degradation_label)
# Write out the line
f_out.write(line)
def main():
# Parsing the arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file', required=True, help='The input file. This file contains the labels we need to replace.')
parser.add_argument('-o', '--output_file', required=True, help='The output file.')
parser.add_argument('-l', '--label', required=True, help='The label for the attestator.')
parser.add_argument('-d', '--degradation_label', required=True, help='The label for the start_degradation function.')
args = parser.parse_args()
# Generate the actual file
instantiate_template(args.input_file, args.output_file, args.label, args.degradation_label)
if __name__ == "__main__": main()
| {"/codeguard.py": ["/generate_attestator.py"]} |
75,397 | aspire-fp7/code-guards | refs/heads/master | /codeguard.py | #!/usr/bin/python
import argparse
import generate_attestator
import glob
import os
import random
import re
import shutil
import sys
# This script takes the following arguments:
# Argument 1: Input file. The file that contains the annotations we need to process.
# Argument 2: Output file. The processed file.
# Argument 3: The AID.
def process(input_file, output_file, aid):
# Some initialization for commonly used variables
codeguard_dir = os.path.dirname(sys.argv[0])# The directory that contains the python scripts and the source code that will be injected
mechanisms_dir = os.path.join(codeguard_dir, 'mechanisms')
output_dir = os.path.dirname(output_file)
# Seed the PRNG using the AID
random.seed(aid)
# Compile these regexes that we will use a lot
reg_attest = re.compile(r'.*#pragma\s*ASPIRE.*guard_attestator.*label\s*\(\s*(?P<id>.*?)\s*\).*')
reg_verif = re.compile(r'.*#pragma\s*ASPIRE.*guard_verifier.*attestator\s*\(\s*(?P<id>.*?)\s*\).*')
reg_mechanisms = re.compile(r'mechanisms_(?P<id>.*?).c')
# Determine the reaction mechanism files available
degradation_labels = []
files = os.listdir(mechanisms_dir)
for f in files:
match = reg_mechanisms.match(f)
if (match != None):# If we match the regex, add the label
if match.group('id') != 'complex':# Temporarily disable the complex mechanisms
degradation_labels.append(match.group('id'))
# Open the input and output file (the output file will be overwritten if it already exists)
contains_annot = False
with open(input_file) as f_in:
with open(output_file, 'w+') as f_out:
for line in f_in:
# Write out the original line
f_out.write(line)
# Randomly choose a degradation to use
degradation_label = random.choice(degradation_labels)
# Match for the attestator annotation
match = reg_attest.match(line)
if (match != None):# Add a call to an attestator if needed
label = match.group('id')
contains_annot = True
f_out.write('extern void attestator_' + label + '(unsigned int id);\n')
f_out.write('attestator_' + label + '(0);\n')
# Generate the attestator for this label (if it doesn't exist already)
generate_attestator.generate_default(output_dir, label, degradation_label)
# Match for the verifier annotation
match = reg_verif.match(line)
if (match != None):# Add a call to a verifier if needed
contains_annot = True
label = match.group('id')
f_out.write('extern void verifier_' + label + '();\n')
f_out.write('verifier_' + label + '();\n')
# Generate the attestator for this label (if it doesn't exist already)
generate_attestator.generate_default(output_dir, label, degradation_label)
# If the file contained any annotations, copy the utils over to the output directory (unless this has already happened)
if contains_annot and not os.path.exists(os.path.join(output_dir, 'utils.c')):
shutil.copy(os.path.join(codeguard_dir, 'utils.c'), output_dir)
shutil.copy(os.path.join(codeguard_dir, 'utils.h'), output_dir)
# Copy all mechanisms stuff
shutil.copy(os.path.join(mechanisms_dir, 'mechanisms.h'), output_dir)
for filename in glob.glob(os.path.join(mechanisms_dir, 'mechanisms_*.c')):
shutil.copy(os.path.join(mechanisms_dir, filename), output_dir)
# Make sure the AID (which is passed as a string containing hexadecimal characters) is automatically converted into an integer
def hex_int(x):
return int(x, 16)
def main():
# Parsing the arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file', required=True, help='The input file. This file contains the annotations we need to process.')
parser.add_argument('-o', '--output_file', required=True, help='The output file.')
parser.add_argument('-a', '--aid', type=hex_int, required=False, help='The AID.')
args = parser.parse_args()
# Process the file
process(args.input_file, args.output_file, args.aid)
if __name__ == "__main__": main()
| {"/codeguard.py": ["/generate_attestator.py"]} |
75,405 | lovea88824057/I4TP | refs/heads/master | /blog/migrations/0006_componentproduct_product_name.py | # Generated by Django 2.1.3 on 2018-12-24 15:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_componentmaschine_maschine_text'),
]
operations = [
migrations.AddField(
model_name='componentproduct',
name='product_name',
field=models.CharField(default=0, max_length=200),
preserve_default=False,
),
]
| {"/blog/admin.py": ["/blog/models.py"]} |
75,406 | lovea88824057/I4TP | refs/heads/master | /blog/migrations/0005_componentmaschine_maschine_text.py | # Generated by Django 2.1.3 on 2018-12-22 11:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20181220_1129'),
]
operations = [
migrations.AddField(
model_name='componentmaschine',
name='maschine_text',
field=models.CharField(default=123, max_length=200),
preserve_default=False,
),
]
| {"/blog/admin.py": ["/blog/models.py"]} |
75,407 | lovea88824057/I4TP | refs/heads/master | /blog/urls.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from django.conf.urls import url
from . import views
app_name = 'blog'
urlpatterns = [
url(r'^indexmaschine/$', views.indexmaschine),
url(r'^dashboard/', views.dashboard, name='dashboard'),
url(r'^index/', views.index, name='index'),
url(r'^$', views.dashboard),
url(r'^search_results/', views.search_results),
url(r'^requirement/', views.requirement),
]
| {"/blog/admin.py": ["/blog/models.py"]} |
75,408 | lovea88824057/I4TP | refs/heads/master | /blog/views.py | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from . import models
from django.shortcuts import render_to_response
def index(request):
products = models.Product.objects.all() # here, product is just a variable, all back is a list
return render(request, 'blog/index.html', {'products': products})
def indexmaschine(request):
maschines = models.Maschine.objects.all()
return render(request, 'blog/indexmaschine.html', {'maschines': maschines})
def dashboard(request):
total = models.Product.objects.count()
upline = models.Product.objects.filter(oee__gte=90).filter(oee__lte=100).count()
offline = models.Product.objects.filter(oee__gte=60).filter(oee__lt=90).count()
breakdown = models.Product.objects.filter(oee__gte=0).filter(oee__lt=60).count()
# backup = models.Asset.objects.filter(status=4).count()
up_rate = round(upline/total*100)
o_rate = round(offline/total*100)
# un_rate = round(unknown/total*100)
bd_rate = round(breakdown/total*100)
p_number = models.Product.objects.count()
m_number = models.Maschine.objects.count()
return render(request, 'blog/dashboard.html', locals())
def search_results(request):
oee = request.GET['oee']
quality = request.GET['quality']
volume = request.GET['volume']
x = request.GET['x']
y = request.GET['y']
z = request.GET['z']
material = request.GET['material']
result = models.Componentmaschine.objects.filter(oee_componentofmaschine__gte=oee).filter(quality__gte=quality).filter(volume__exact=volume).filter(length_workspace__gte=x).filter(width_workspace__gte=y).filter(height_workspace__gte=z).filter(material_tool__exact=material)
return render(request, 'blog/search_results.html', locals())
def requirement(request):
return render(request, 'blog/requirement.html') | {"/blog/admin.py": ["/blog/models.py"]} |
75,409 | lovea88824057/I4TP | refs/heads/master | /blog/migrations/0004_auto_20181220_1129.py | # Generated by Django 2.1.3 on 2018-12-20 10:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20181219_2337'),
]
operations = [
migrations.RenameField(
model_name='componentmaschine',
old_name='maschine_text',
new_name='maschine',
),
migrations.RenameField(
model_name='componentproduct',
old_name='product_text',
new_name='product',
),
]
| {"/blog/admin.py": ["/blog/models.py"]} |
75,410 | lovea88824057/I4TP | refs/heads/master | /blog/admin.py | from django.contrib import admin # Register your models here.
from .models import Product, Maschine, Componentmaschine
admin.site.register(Product)
admin.site.register(Maschine)
admin.site.register(Componentmaschine) | {"/blog/admin.py": ["/blog/models.py"]} |
75,411 | lovea88824057/I4TP | refs/heads/master | /blog/migrations/0007_auto_20190114_1732.py | # Generated by Django 2.1.3 on 2019-01-14 16:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_componentproduct_product_name'),
]
operations = [
migrations.AddField(
model_name='componentmaschine',
name='ability_multiaspect',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='componentmaschine',
name='accuracy',
field=models.CharField(default=0, max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='componentmaschine',
name='diameter_tool',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='componentmaschine',
name='feedspeed',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='componentmaschine',
name='height_workspace',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='componentmaschine',
name='length_tool',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='componentmaschine',
name='length_workspace',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='componentmaschine',
name='material_tool',
field=models.CharField(default=0, max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='componentmaschine',
name='pressure',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='componentmaschine',
name='rotatedspeed',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='componentmaschine',
name='strock',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='componentmaschine',
name='type_tool',
field=models.CharField(default=0, max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='componentmaschine',
name='weight',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='componentmaschine',
name='width_workspace',
field=models.IntegerField(default=0),
),
]
| {"/blog/admin.py": ["/blog/models.py"]} |
75,412 | lovea88824057/I4TP | refs/heads/master | /blog/migrations/0002_componentofmaschine_componentofproduct.py | # Generated by Django 2.1.3 on 2018-12-19 19:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Componentofmaschine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('component_text_maschine', models.CharField(max_length=200)),
('typeofmaschine', models.CharField(max_length=200)),
('quality', models.IntegerField(default=0)),
('oee_componentofmaschine', models.IntegerField(default=0)),
('volume', models.IntegerField(default=0)),
('shape', models.CharField(max_length=200)),
('dr', models.IntegerField(default=0)),
('bx', models.IntegerField(default=0)),
('feed', models.IntegerField(default=0)),
('maschine', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Maschine')),
],
),
migrations.CreateModel(
name='Componentofproduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('component_text_product', models.CharField(max_length=200)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Product')),
],
),
]
| {"/blog/admin.py": ["/blog/models.py"]} |
75,413 | lovea88824057/I4TP | refs/heads/master | /blog/models.py | from django.db import models
class Product(models.Model):
product_text = models.CharField(max_length=200,default='Valve')
pub_date = models.DateTimeField('date published')
sn = models.CharField(max_length=64) #sn is serial number
product_component = models.CharField(max_length=200)
product_production = models.CharField(max_length=200)
oee = models.IntegerField(default=0)
def __str__(self): # in python2 is __unique__
return self.product_text
class Maschine(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
maschine_text = models.CharField(max_length=200)
maschine_ability = models.CharField(max_length=200)
maschine_component = models.CharField(max_length=200)
def __str__(self):
return self.maschine_text
class Componentproduct(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
component_text_product = models.CharField(max_length=200)
shapeofproduct = models.CharField(max_length=200)
product_name = models.CharField(max_length=200)
production = models.CharField(max_length=200)
def __str__(self):
return self.component_text_product
class Componentmaschine(models.Model):
maschine = models.ForeignKey(Maschine, on_delete=models.CASCADE)
maschine_text= models.CharField(max_length=200)
component_text_maschine = models.CharField(max_length=200)
abilityofmaschine = models.CharField(max_length=200)
quality = models.IntegerField(default=0)
plant = models.CharField(max_length=200)
oee_componentofmaschine = models.IntegerField(default=0)
volume = models.IntegerField(default=0)
length_workspace = models.IntegerField(default=0)
width_workspace = models.IntegerField(default=0)
height_workspace = models.IntegerField(default=0)
material_tool = models.CharField(max_length=200)
def __str__(self):
return self.component_text_maschine
| {"/blog/admin.py": ["/blog/models.py"]} |
75,414 | lovea88824057/I4TP | refs/heads/master | /blog/forms.py | from django import forms
class AddForm(forms.Form):
product=forms.ChoiceField(choices=[('valve','Valve')])
oee = forms.IntegerField()
quality = forms.IntegerField()
volume = forms.IntegerField() | {"/blog/admin.py": ["/blog/models.py"]} |
75,415 | lovea88824057/I4TP | refs/heads/master | /blog/migrations/0001_initial.py | # Generated by Django 2.1.3 on 2018-11-20 11:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Maschine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('maschine_text', models.CharField(max_length=200)),
('maschine_ability', models.CharField(max_length=200)),
('maschine_component', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_text', models.CharField(default='Valve', max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
('product_component', models.CharField(max_length=200)),
('product_production', models.CharField(max_length=200)),
('sn', models.CharField(max_length=64)),
('oee', models.IntegerField(default=0)),
],
),
migrations.AddField(
model_name='maschine',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Product'),
),
]
| {"/blog/admin.py": ["/blog/models.py"]} |
75,416 | lovea88824057/I4TP | refs/heads/master | /blog/migrations/0008_auto_20190121_1327.py | # Generated by Django 2.1.3 on 2019-01-21 12:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20190114_1732'),
]
operations = [
migrations.RemoveField(
model_name='componentmaschine',
name='ability_multiaspect',
),
migrations.RemoveField(
model_name='componentmaschine',
name='accuracy',
),
migrations.RemoveField(
model_name='componentmaschine',
name='diameter_tool',
),
migrations.RemoveField(
model_name='componentmaschine',
name='feedspeed',
),
migrations.RemoveField(
model_name='componentmaschine',
name='length_tool',
),
migrations.RemoveField(
model_name='componentmaschine',
name='pressure',
),
migrations.RemoveField(
model_name='componentmaschine',
name='rotatedspeed',
),
migrations.RemoveField(
model_name='componentmaschine',
name='strock',
),
migrations.RemoveField(
model_name='componentmaschine',
name='type_tool',
),
migrations.RemoveField(
model_name='componentmaschine',
name='weight',
),
]
| {"/blog/admin.py": ["/blog/models.py"]} |
75,417 | lovea88824057/I4TP | refs/heads/master | /blog/migrations/0003_auto_20181219_2337.py | # Generated by Django 2.1.3 on 2018-12-19 22:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_componentofmaschine_componentofproduct'),
]
operations = [
migrations.CreateModel(
name='Componentmaschine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('component_text_maschine', models.CharField(max_length=200)),
('abilityofmaschine', models.CharField(max_length=200)),
('quality', models.IntegerField(default=0)),
('plant', models.CharField(max_length=200)),
('oee_componentofmaschine', models.IntegerField(default=0)),
('volume', models.IntegerField(default=0)),
('maschine_text', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Maschine')),
],
),
migrations.CreateModel(
name='Componentproduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('component_text_product', models.CharField(max_length=200)),
('shapeofproduct', models.CharField(max_length=200)),
('production', models.CharField(max_length=200)),
('product_text', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Product')),
],
),
migrations.RemoveField(
model_name='componentofmaschine',
name='maschine',
),
migrations.RemoveField(
model_name='componentofproduct',
name='product',
),
migrations.DeleteModel(
name='Componentofmaschine',
),
migrations.DeleteModel(
name='Componentofproduct',
),
]
| {"/blog/admin.py": ["/blog/models.py"]} |
75,428 | tianhm/autoslot | refs/heads/master | /autoslot.py | """ Classes and metaclasses for easier ``__slots__`` handling. """
from itertools import tee
from inspect import getmro
import dis
__version__ = '2022.12.1'
__all__ = ['Slots', 'SlotsMeta', 'SlotsPlusDict', 'SlotsPlusDictMeta']
def assignments_to_self(method) -> set:
"""Given a method, collect all the attribute names for assignments
to "self"."""
# Get the name of the var used to refer to the instance. Usually,
# this will be "self". It's the first parameter to the
# __init__(self, ...) method call. If there are no parameters,
# just pretend it was "self".
instance_var = next(iter(method.__code__.co_varnames), 'self')
# We need to scan all the bytecode instructions to see all the times
# an attribute of "self" got assigned-to. First get the list of
# instructions.
instructions = dis.Bytecode(method)
# Assignments to attributes of "self" are identified by a first
# LOAD_FAST (with a value of "self") immediately followed by a
# STORE_ATTR (with a value of the attribute name). So we will need
# to look at a sequence of pairs through the bytecode. The easiest
# way to do this is with two iterators.
i0, i1 = tee(instructions)
# March the second one ahead by one step.
next(i1, None)
names = set()
# a and b are a pair of bytecode instructions; b follows a.
for a, b in zip(i0, i1):
accessing_self = (
a.argval == instance_var
and a.opname in ('LOAD_FAST', 'LOAD_DEREF')
)
storing_attribute = (b.opname == 'STORE_ATTR')
if accessing_self and storing_attribute:
names.add(b.argval)
return names
class SlotsMeta(type):
def __new__(mcs, name, bases, ns):
# Caller may have already provided slots, in which case just
# retain them and keep going. Note that we make a set() to make
# it easier to avoid dupes.
slots = set(ns.get('__slots__', ()))
if '__init__' in ns:
slots |= assignments_to_self(ns['__init__'])
ns['__slots__'] = slots
return super().__new__(mcs, name, bases, ns)
class Slots(metaclass=SlotsMeta):
pass
def super_has_dict(cls):
return hasattr(cls, '__slots__') and '__dict__' in cls.__slots__
class SlotsPlusDictMeta(SlotsMeta):
def __new__(mcs, name, bases, ns):
slots = set(ns.get('__slots__', ()))
# It seems like "__dict__" is only allowed to appear once in
# the entire MRO slots hierarchy, so check them all to see
# whether to add __dict__ or not.
if not any(super_has_dict(s) for b in bases for s in getmro(b)):
slots.add('__dict__')
ns['__slots__'] = slots
return super().__new__(mcs, name, bases, ns)
class SlotsPlusDict(metaclass=SlotsPlusDictMeta):
pass
| {"/tests/test_slots.py": ["/autoslot.py"]} |
75,429 | tianhm/autoslot | refs/heads/master | /tests/test_slots.py | import pytest
from autoslot import Slots, SlotsPlusDict
def test_normal():
"""This is just normal behaviour: nothing different."""
class A:
def __init__(self, a, b):
self.x = a
self.y = b
a = A(1, 2)
assert hasattr(a, 'x')
assert hasattr(a, 'y')
assert hasattr(a, '__dict__')
assert not hasattr(a, '__slots__')
a.z = 3
assert hasattr(a, 'z')
def test_slots():
"""Basic usage of the Slots metaclass."""
class A(Slots):
def __init__(self, a, b):
self.x = a
self.y = b
# Testing to see that the
# bytecode processor identifies things
# correctly.
self.x = 'bleh'
assert '__module__' in A.__dict__
assert '__init__' in A.__dict__
assert '__slots__' in A.__dict__
assert A.__dict__['__slots__'] == {'x', 'y'}
a = A(1, 2)
assert hasattr(a, 'x')
assert hasattr(a, 'y')
# Just checking that we didn't pick up the wrong names
assert not hasattr(a, 'a')
assert not hasattr(a, 'b')
assert hasattr(a, '__slots__')
assert not hasattr(a, '__dict__')
# Can't assign new attributes
with pytest.raises(AttributeError):
a.z = 3
def test_slots_load_deref():
"""Values can come from either LOAD_FAST or LOAD_DEREF
opcodes, so we need to handle both."""
class A(Slots):
def __init__(self, a, b):
self.x = a
def f():
"""Simply by referring to self in another scope
is enough to change the `self` accessing opcodes
in __init__ to become LOAD_DEREF instead of
LOAD_FAST. We don't even have to call `f`."""
print(self)
self.y = b
# Testing to see that the
# bytecode processor identifies things
# correctly.
self.x = 'bleh'
assert '__module__' in A.__dict__
assert '__init__' in A.__dict__
assert '__slots__' in A.__dict__
assert A.__dict__['__slots__'] == {'x', 'y'}
a = A(1, 2)
assert hasattr(a, 'x')
assert hasattr(a, 'y')
# Just checking that we didn't pick up the wrong names
assert not hasattr(a, 'a')
assert not hasattr(a, 'b')
assert hasattr(a, '__slots__')
assert not hasattr(a, '__dict__')
# Can't assign new attributes
with pytest.raises(AttributeError):
a.z = 3
def test_slots_weakref():
"""Basic usage of the Slots metaclass."""
class A(Slots):
__slots__ = ['__weakref__']
def __init__(self, a, b):
self.x = a
self.y = b
# Testing to see that the
# bytecode processor identifies things
# correctly.
self.x = 'bleh'
assert '__module__' in A.__dict__
assert '__init__' in A.__dict__
assert '__slots__' in A.__dict__
assert A.__dict__['__slots__'] == {'__weakref__', 'x', 'y'}
a = A(1, 2)
assert hasattr(a, 'x')
assert hasattr(a, 'y')
# Just checking that we didn't pick up the wrong names
assert not hasattr(a, 'a')
assert not hasattr(a, 'b')
assert hasattr(a, '__slots__')
assert not hasattr(a, '__dict__')
# Can't assign new attributes
with pytest.raises(AttributeError):
a.z = 3
import weakref
r = weakref.ref(a)
assert r
def test_no_init():
class A(Slots):
pass
a = A()
assert hasattr(a, '__slots__')
assert not hasattr(a, '__dict__')
def test_conditional():
"""What happens if you conditionally create attributes inside
__init__()?"""
class A(Slots):
def __init__(self, a):
if a == 0:
self.x = 1
elif a == 1:
self.y = 1
# "if" is hit
a = A(0)
assert hasattr(a, '__slots__')
assert not hasattr(a, '__dict__')
# Both attributes will get slots.
assert {'x', 'y'} < set(dir(a))
# "elif" is hit
a = A(1)
assert hasattr(a, '__slots__')
assert not hasattr(a, '__dict__')
# Both attributes will get slots.
assert {'x', 'y'} < set(dir(a))
# Neither branch hit
a = A(2)
assert hasattr(a, '__slots__')
assert not hasattr(a, '__dict__')
# Both attributes will get slots.
assert {'x', 'y'} < set(dir(a))
def test_inherit_new():
"""Normal inheritance will propagate the metaclass. Note that
you MUST call super().__init__() if you want slots for the parents
to be created."""
class A(Slots):
def __init__(self, a, b):
self.x = a
self.y = b
class B(A):
def __init__(self, c, d):
super().__init__(1, 2)
self.w = c
self.z = d
class C(B):
"""Missing call to superclass initializer."""
def __init__(self, e, f):
self.ww = e
self.zz = f
a = A(1, 2)
assert hasattr(a, '__slots__')
assert not hasattr(a, '__dict__')
assert hasattr(a, 'x')
assert hasattr(a, 'y')
# Instances of B have slots from A in addition to their own.
b = B(3, 4)
assert hasattr(b, 'x')
assert hasattr(b, 'y')
assert hasattr(b, 'w')
assert hasattr(b, 'z')
assert hasattr(b, '__slots__')
assert not hasattr(b, '__dict__')
c = C(5, 6)
assert hasattr(c, '__slots__')
assert not hasattr(c, '__dict__')
assert not hasattr(c, 'x')
assert not hasattr(c, 'y')
assert not hasattr(c, 'w')
assert not hasattr(c, 'z')
assert hasattr(c, 'ww')
assert hasattr(c, 'zz')
with pytest.raises(AttributeError):
c.www = 123
def test_wrong_instance_var():
"""Also works if you don't use 'self' as the instance var name"""
class A(Slots):
def __init__(blah, a, b):
blah.x = a
blah.y = b
a = A(1, 2)
assert hasattr(a, 'x')
assert hasattr(a, 'y')
# Just checking that we didn't pick up the wrong things in
# slots_metaclass.assignments_to_self.
assert not hasattr(a, 'a')
assert not hasattr(a, 'b')
assert hasattr(a, '__slots__')
assert not hasattr(a, '__dict__')
# Can't assign new attributes
with pytest.raises(AttributeError):
a.z = 3
def test_slots_plus_dict():
"""You can also have both: slots for some vars, and dynamic assignment
for other vars."""
class A(SlotsPlusDict):
def __init__(self, a, b):
self.x = a
self.y = b
a = A(1, 2)
assert hasattr(a, 'x')
assert hasattr(a, 'y')
# Just checking that we didn't pick up the wrong things in
# slots_metaclass.assignments_to_self.
assert not hasattr(a, 'a')
assert not hasattr(a, 'b')
assert hasattr(a, '__slots__')
assert hasattr(a, '__dict__')
# This now succeeds because there is a __dict__
a.z = 3
assert {'x', 'y'} < set(dir(a))
# Note that x and y ARE NOT in the dict. This is where the space
# savings come from.
assert dict(z=3) == a.__dict__
def test_slots_plus_dict_empty():
"""You will always have to pay the cost of having an empty dict
laying around though."""
class A(SlotsPlusDict):
def __init__(self, a, b):
self.x = a
self.y = b
a = A(1, 2)
assert hasattr(a, 'x')
assert hasattr(a, 'y')
# Just checking that we didn't pick up the wrong things in
# slots_metaclass.assignments_to_self.
assert not hasattr(a, 'a')
assert not hasattr(a, 'b')
assert hasattr(a, '__slots__')
assert hasattr(a, '__dict__')
assert {'x', 'y'} < set(dir(a))
# Doesn't lazy-initialize, unfortunately.
assert {} == a.__dict__
def test_slots_existing():
"""You can also provide your own slots if you like"""
class A(Slots):
__slots__ = ('z',)
def __init__(self, a, b):
self.x = a
self.y = b
a = A(1, 2)
assert hasattr(a, '__slots__')
assert not hasattr(a, '__dict__')
assert {'x', 'y', 'z'} == a.__slots__
assert hasattr(a, 'x')
assert hasattr(a, 'y')
# The instance does not have an attribute until you assign it.
assert not hasattr(a, 'z')
with pytest.raises(AttributeError):
a.z
# Assign to it, then hasattr will fail
a.z = 123
assert hasattr(a, 'z')
with pytest.raises(AttributeError):
a.totallynew = 456
def test_slots_existing_with_dict():
"""You can also provide your own slots if you like"""
class A(SlotsPlusDict):
__slots__ = {'z'}
def __init__(self, a, b):
self.x = a
self.y = b
a = A(1, 2)
assert hasattr(a, '__slots__')
assert hasattr(a, '__dict__')
# NOTE! even though __dict__ was injected internally into the
# slots array of the class, in the INSTANCE, __dict__ no longer
# appears.
assert {'x', 'y', 'z'} == a.__slots__
assert hasattr(a, 'x')
assert hasattr(a, 'y')
assert not hasattr(a, 'z')
with pytest.raises(AttributeError):
a.z
a.z = 123
assert hasattr(a, 'z')
a.totallynew = 456
assert a.totallynew == 456
def test_much_inherit():
"""Very long inheritance chain."""
class A(Slots):
def __init__(self):
self.x = 1
class B(A):
def __init__(self):
super().__init__()
self.y = 2
class C(B):
def __init__(self):
super().__init__()
self.z = 3
class D(C):
def __init__(self):
super().__init__()
self.u = 4
class E(D):
def __init__(self):
super().__init__()
self.v = 5
e = E()
assert hasattr(e, '__slots__')
assert not hasattr(e, '__dict__')
assert all(hasattr(e, attr) for attr in 'x y z u v'.split())
with pytest.raises(AttributeError):
e.w = 123
def test_much_inherit_dict():
"""Very long inheritance chain."""
class A(SlotsPlusDict):
def __init__(self):
self.x = 1
class B(A):
def __init__(self):
super().__init__()
self.y = 2
class C(B):
def __init__(self):
super().__init__()
self.z = 3
class D(C):
def __init__(self):
super().__init__()
self.u = 4
class E(D):
def __init__(self):
super().__init__()
self.v = 5
e = E()
assert hasattr(e, '__slots__')
assert hasattr(e, '__dict__')
assert all(hasattr(e, attr) for attr in 'x y z u v'.split())
e.w = 123
| {"/tests/test_slots.py": ["/autoslot.py"]} |
75,430 | MistSF/polibot | refs/heads/main | /functions.py | import requests
from conf.secrets import *
def reply_to_mention(mention, api) :
mentionner = mention.user.screen_name
id = mention.id
text = api.get_status(mention.in_reply_to_status_id).text
response = requests.get("{}predict/{}".format(URL, text))
tendance = response.json()
reply = make_reply(tendance, mentionner)
valret = {
"id_str" : mention.id_str,
"text": text,
"text_mentionner": mention.text,
"reply" : reply,
"mentionner": mention.user.screen_name,
"gauche": tendance["gauche"],
"droite": tendance["droite"]
}
try :
api.update_status(
status= reply,
in_reply_to_status_id = id
)
return valret
except Exception as err:
print(err)
return False
def make_reply(tendance, mentionner) :
if tendance["gauche"] > tendance["droite"] :
first = "{}% de gauche".format(round(tendance["gauche"] * 100))
second = "{}% de droite".format(round(tendance["droite"] * 100))
else :
first = "{}% de droite".format(round(tendance["droite"] * 100))
second = "{}% de gauche".format(round(tendance["gauche"] * 100))
return "@{} C'est de {}\n\n{}\n{}".format(mentionner, tendance["position"], first, second) | {"/bot.py": ["/functions.py"]} |
75,431 | MistSF/polibot | refs/heads/main | /bot.py | import json
from logging import error
from time import sleep
import requests
from requests.api import post
import tweepy
from conf.secrets import *
from conf.mongo_url import *
from functions import *
from pymongo import MongoClient
def __main__(reply) :
while 1 :
for mentions in tweepy.Cursor(api.mentions_timeline).items() :
if reply.find_one({"id_str": mentions.id_str}) == None :
try :
api.get_status(mentions.in_reply_to_status_id).text # On vérifie qu'il y ai un tweet parent
valret = reply_to_mention(mentions, api)
if valret :
reply_id = reply.insert_one(valret)
except :
print("Aucun tweet parent trouvé")
reply_id = reply.insert_one({
"id_str" : mentions.id_str,
"mentionner": mentions.user.screen_name,
"parents": False
})
print(reply_id)
sleep(60)
auth = tweepy.OAuthHandler(API_KEY, API_SECRET_KEY)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
auth.secure = True
api = tweepy.API(auth)
try :
api.verify_credentials()
print("Authentification Ok")
try :
client = MongoClient(MONGO_URL)
db = client.politweet
reply = db.reply
print("connected")
__main__(reply)
except Exception as err:
print(err)
except :
print("Error during authentification") | {"/bot.py": ["/functions.py"]} |
75,440 | Jonasdero/py-covid-graphs | refs/heads/master | /graphs/graph.py | import matplotlib.pyplot as plt
from functions.date import daterange
from constantData.keys import confirmedKey, deathsKey, endDateKey, latKey, lonKey, newConfirmedKey, newDeathsKey, newRecoveredKey, recoveredKey, startDateKey, percentageConfirmedKey, percentageDeathsKey, percentageRecoveredKey, activeKey, newActiveKey, percentageActiveKey, nameKey, latestActiveKey, latestDeathsKey, latestRecoveredKey
from scipy.ndimage.filters import gaussian_filter1d
def showCountryPlot(countryCases, countryDeaths, countryRecovered, country):
startDate = countryCases.get(country).get(startDateKey)
endDate = countryCases.get(country).get(endDateKey)
# x axis values
x = list(daterange(startDate, endDate))
# corresponding y axis values
countryCasesY = countryCases.get(country).get('timeSeries')
countryDeathsY = countryDeaths.get(country).get('timeSeries')
countryRecoveredY = countryRecovered.get(country).get('timeSeries')
# plotting the points
plt.plot(x, countryCasesY, label="Confirmed Cases in " + country)
plt.plot(x, countryDeathsY, label="Deaths in " + country)
plt.plot(x, countryRecoveredY, label="Recovered in " + country)
# naming the x axis
plt.xlabel('Date')
plt.ylabel('Values')
# giving a title to my graph
plt.title(country + " (" + countryCases.get(country).get(latKey) +
", " + countryCases.get(country).get(lonKey) + ")")
# show a legend on the plot
plt.legend()
# function to show the plot
plt.show()
def showPlot(countryDic, countries, title):
startDate = countryDic.get(countries[0]).get(startDateKey)
endDate = countryDic.get(countries[0]).get(endDateKey)
# x axis values
x = list(daterange(startDate, endDate))
for country in countries:
# corresponding y axis values
y = countryDic.get(country).get('timeSeries')
# plotting the points
plt.plot(x, y, label=country)
# naming the axis
plt.xlabel('Date')
plt.ylabel('Values')
# giving a title to my graph
plt.title(title)
# show a legend on the plot
plt.legend()
# function to show the plot
plt.show()
def cumulatedView(country):
endDate = country.get(endDateKey)
dateList = list(daterange(
country.get(startDateKey),
endDate))
# https://matplotlib.org/gallery/api/two_scales.html
fig, ax1 = plt.subplots()
# naming the axis
# todo color?
ax1.set_xlabel('Date')
ax1.set_ylabel('Active Cases %-Change')
ax1.plot(dateList,
filter(country, percentageActiveKey),
label='Active Cases %-Change', color='black')
ax2 = ax1.twinx()
ax2.set_ylabel('Cumulative Cases')
ax2.plot(dateList, filter(country, activeKey),
label='Active Cases (' + str(country.get(latestActiveKey)) + ')')
ax2.plot(dateList, filter(country, recoveredKey),
label='Recovered Cases (' +
str(country.get(latestRecoveredKey)) + ')')
ax2.plot(dateList, filter(country, deathsKey),
label='Dead Cases (' + str(country.get(latestDeathsKey)) + ')')
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.title('Cumulated View ' +
country.get(nameKey) + ' as of ' +
endDate.strftime('%Y-%m-%d'))
plt.legend()
plt.show()
def dailyView(country):
endDate = country.get(endDateKey)
dateList = list(daterange(
country.get(startDateKey),
endDate))
# https://matplotlib.org/gallery/api/two_scales.html
fig, ax1 = plt.subplots()
# naming the axis
# todo color?
ax1.set_xlabel('Date')
ax1.set_ylabel('Daily new active cases / recoveries')
ax1.plot(dateList,
filter(country, newActiveKey),
label='New Cases')
ax1.plot(dateList,
filter(country, newRecoveredKey),
label='New Recoveries')
ax2 = ax1.twinx()
ax2.set_ylabel('Daily new deaths')
ax2.plot(dateList, filter(country, newDeathsKey),
label='New Deaths', color='tab:red')
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.title('Daily View - ' +
country.get(nameKey))
plt.legend()
plt.show()
def filter(country, key):
return gaussian_filter1d(country.get(key), sigma=2)
| {"/graphs/graph.py": ["/functions/date.py", "/constantData/keys.py"], "/createGraphs.py": ["/constantData/urls.py", "/functions/parser.py", "/functions/crawler.py", "/graphs/graph.py"], "/index.py": ["/constantData/urls.py", "/functions/crawler.py", "/functions/parser.py", "/graphs/graph.py"], "/functions/parser.py": ["/functions/date.py", "/constantData/keys.py"]} |
75,441 | Jonasdero/py-covid-graphs | refs/heads/master | /functions/crawler.py | import requests
import csv
def getCSV(csvURL):
with requests.Session() as s:
download = s.get(csvURL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
return my_list
| {"/graphs/graph.py": ["/functions/date.py", "/constantData/keys.py"], "/createGraphs.py": ["/constantData/urls.py", "/functions/parser.py", "/functions/crawler.py", "/graphs/graph.py"], "/index.py": ["/constantData/urls.py", "/functions/crawler.py", "/functions/parser.py", "/graphs/graph.py"], "/functions/parser.py": ["/functions/date.py", "/constantData/keys.py"]} |
75,442 | Jonasdero/py-covid-graphs | refs/heads/master | /createGraphs.py | from constantData.urls import confirmedGlobalUrl, confirmedUSUrl, deathsGlobalUrl, deathsUSUrl, recoveredGlobalUrl
from functions.parser import transformSingleList
from functions.crawler import getCSV
from graphs.graph import showCountryPlot, showPlot
confirmedGlobal = transformSingleList(getCSV(confirmedGlobalUrl))
deathsGlobal = transformSingleList(getCSV(deathsGlobalUrl))
recoveredGlobal = transformSingleList(getCSV(recoveredGlobalUrl))
interestingCountries = ['China', 'Germany',
'US', 'United Kingdom', 'Italy', 'France']
showCountryPlot(confirmedGlobal, deathsGlobal, recoveredGlobal, 'China')
# showPlot(confirmedGlobal, interestingCountries, 'Confirmed Cases')
# showPlot(deathsGlobal, interestingCountries, 'Deaths')
# showPlot(recoveredGlobal, interestingCountries, 'Recovered')
| {"/graphs/graph.py": ["/functions/date.py", "/constantData/keys.py"], "/createGraphs.py": ["/constantData/urls.py", "/functions/parser.py", "/functions/crawler.py", "/graphs/graph.py"], "/index.py": ["/constantData/urls.py", "/functions/crawler.py", "/functions/parser.py", "/graphs/graph.py"], "/functions/parser.py": ["/functions/date.py", "/constantData/keys.py"]} |
75,443 | Jonasdero/py-covid-graphs | refs/heads/master | /functions/date.py | import datetime
def getDate(usString):
return datetime.datetime.strptime(usString, "%m/%d/%y").date()
def addDays(date, amount):
return date + datetime.timedelta(days=amount)
def addOneDay(date):
return addDays(date, 1)
def daterange(startDate, endDate):
for n in range(int((endDate - startDate).days)+1):
yield startDate + datetime.timedelta(n)
| {"/graphs/graph.py": ["/functions/date.py", "/constantData/keys.py"], "/createGraphs.py": ["/constantData/urls.py", "/functions/parser.py", "/functions/crawler.py", "/graphs/graph.py"], "/index.py": ["/constantData/urls.py", "/functions/crawler.py", "/functions/parser.py", "/graphs/graph.py"], "/functions/parser.py": ["/functions/date.py", "/constantData/keys.py"]} |
75,444 | Jonasdero/py-covid-graphs | refs/heads/master | /index.py | from constantData.urls import confirmedGlobalUrl, deathsGlobalUrl, recoveredGlobalUrl
from functions.crawler import getCSV
from functions.parser import parseData
from graphs.graph import cumulatedView, dailyView
countriesList = parseData(getCSV(confirmedGlobalUrl), getCSV(
deathsGlobalUrl), getCSV(recoveredGlobalUrl))
country = 'US'
# print(countriesList.get('Germany'))
cumulatedView(countriesList.get(country))
dailyView(countriesList.get(country))
| {"/graphs/graph.py": ["/functions/date.py", "/constantData/keys.py"], "/createGraphs.py": ["/constantData/urls.py", "/functions/parser.py", "/functions/crawler.py", "/graphs/graph.py"], "/index.py": ["/constantData/urls.py", "/functions/crawler.py", "/functions/parser.py", "/graphs/graph.py"], "/functions/parser.py": ["/functions/date.py", "/constantData/keys.py"]} |
75,445 | Jonasdero/py-covid-graphs | refs/heads/master | /functions/parser.py |
from functions.date import getDate, addOneDay
from constantData.keys import confirmedKey, deathsKey, endDateKey, latKey, lonKey, newConfirmedKey, newDeathsKey, newRecoveredKey, recoveredKey, startDateKey, percentageConfirmedKey, percentageDeathsKey, percentageRecoveredKey, activeKey, newActiveKey, percentageActiveKey, nameKey, latestActiveKey, latestDeathsKey, latestRecoveredKey
def parseData(confirmedList, deathList, recoveredList):
countries = {}
# Lists are nearly the same, therefore only the first list (confirmedList) is taken for indexing or dates
defRow = confirmedList[0]
lowestRowIndex = 4
highestRowIndex = len(defRow)-1
startDate = getDate(defRow[lowestRowIndex])
endDate = getDate(defRow[highestRowIndex])
for index in range(1, len(confirmedList)):
confirmedRow = confirmedList[index]
country = confirmedRow[1]
countryData = {}
if countries.get(country) is None:
countryData[nameKey] = country
countryData[startDateKey] = startDate
countryData[endDateKey] = endDate
countryData[latKey] = confirmedRow[2]
countryData[lonKey] = confirmedRow[3]
countryData[confirmedKey] = parseTimeSeries(confirmedRow)
countries[country] = countryData
else:
mergeTimeSeries(countries[country].get(
confirmedKey), parseTimeSeries(confirmedRow))
for index in range(1, len(recoveredList)):
recoveredRow = recoveredList[index]
country = recoveredRow[1]
if(countries.get(country).get(deathsKey) is None):
countries[country][recoveredKey] = parseTimeSeries(recoveredRow)
else:
mergeTimeSeries(countries[country].get(
recoveredKey), parseTimeSeries(recoveredRow))
for index in range(1, len(deathList)):
deathsRow = deathList[index]
country = deathsRow[1]
if(countries.get(country).get(deathsKey) is None):
countries[country][deathsKey] = parseTimeSeries(deathsRow)
else:
mergeTimeSeries(countries[country].get(
deathsKey), parseTimeSeries(deathsRow))
for countryKey in countries.keys():
country = countries.get(countryKey)
country[newConfirmedKey] = getDelta(country.get(confirmedKey))
country[newDeathsKey] = getDelta(country.get(deathsKey))
country[newRecoveredKey] = getDelta(country.get(recoveredKey))
country[percentageConfirmedKey] = getPercentualChange(
country.get(confirmedKey), country.get(newConfirmedKey))
country[percentageDeathsKey] = getPercentualChange(
country.get(deathsKey), country.get(newDeathsKey))
country[percentageRecoveredKey] = getPercentualChange(
country.get(recoveredKey), country.get(newRecoveredKey))
country[activeKey] = getActiveCases(country)
country[newActiveKey] = getDelta(country.get(activeKey))
country[percentageActiveKey] = getPercentualChange(
country.get(activeKey), country.get(newActiveKey))
country[latestActiveKey] = getLastValueSafely(country.get(activeKey))
country[latestRecoveredKey] = getLastValueSafely(
country.get(recoveredKey))
country[latestDeathsKey] = getLastValueSafely(country.get(deathsKey))
return countries
def getLastValueSafely(list):
if(len(list) > 0):
return list[-1]
return 0
def getNumberSavelyFromList(list, index):
if(index < len(list)):
return list[index]
return 0
def transformSingleList(csvList):
countries = {}
defRow = csvList[0]
lowestRowIndex = 4
highestRowIndex = len(defRow)-1
startDate = getDate(defRow[lowestRowIndex])
endDate = getDate(defRow[highestRowIndex])
for index in range(1, len(csvList)):
row = csvList[index]
country = row[1]
countryData = {}
if countries.get(country) is None:
countryData[startDateKey] = startDate
countryData[endDateKey] = endDate
countryData[latKey] = row[2]
countryData[lonKey] = row[3]
countryData['timeSeries'] = parseTimeSeries(row)
countries[country] = countryData
else:
mergeTimeSeries(countries[country].get(
'timeSeries'), parseTimeSeries(row))
return countries
def parseTimeSeries(row):
timeSeries = []
for index in range(4, len(row)):
timeSeries.append(int(row[index]))
return timeSeries
def mergeTimeSeries(timeSeries1, timeSeries2):
for i in range(len(timeSeries1)):
if(i < len(timeSeries2)):
timeSeries1[i] += timeSeries2[i]
return timeSeries1
def getDelta(list):
delta = []
if len(list) != 0:
delta.append(list[0])
for index in range(len(list) - 1):
delta.append(list[index + 1] - list[index])
return delta
def getActiveCases(country):
active = []
confirmedList = country.get(confirmedKey)
deathList = country.get(deathsKey)
recoveredList = country.get(recoveredKey)
for index in range(0, len(confirmedList)):
active.append(getNumberSavelyFromList(confirmedList, index) -
getNumberSavelyFromList(deathList, index) - getNumberSavelyFromList(recoveredList, index))
return active
def getPercentualChange(cumulatedList, deltaList):
percent = []
if len(cumulatedList) != 0 and len(deltaList) != 0:
for index in range(len(cumulatedList)):
if index < len(deltaList) and deltaList[index] != 0 and cumulatedList[index] != 0:
negative = False
value = deltaList[index]
if(value < 0):
negative = True
value = abs(value)
percentage = value / cumulatedList[index]
if(negative):
percentage *= -1
percent.append(percentage * 100)
else:
percent.append(0)
return percent
| {"/graphs/graph.py": ["/functions/date.py", "/constantData/keys.py"], "/createGraphs.py": ["/constantData/urls.py", "/functions/parser.py", "/functions/crawler.py", "/graphs/graph.py"], "/index.py": ["/constantData/urls.py", "/functions/crawler.py", "/functions/parser.py", "/graphs/graph.py"], "/functions/parser.py": ["/functions/date.py", "/constantData/keys.py"]} |
75,446 | Jonasdero/py-covid-graphs | refs/heads/master | /constantData/urls.py | repository = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/'
confirmedGlobalUrl = repository + 'time_series_covid19_confirmed_global.csv'
deathsGlobalUrl = repository + 'time_series_covid19_deaths_global.csv'
recoveredGlobalUrl = repository + 'time_series_covid19_recovered_global.csv'
confirmedUSUrl = repository + 'time_series_covid19_confirmed_US.csv'
deathsUSUrl = repository + 'time_series_covid19_deaths_US.csv'
| {"/graphs/graph.py": ["/functions/date.py", "/constantData/keys.py"], "/createGraphs.py": ["/constantData/urls.py", "/functions/parser.py", "/functions/crawler.py", "/graphs/graph.py"], "/index.py": ["/constantData/urls.py", "/functions/crawler.py", "/functions/parser.py", "/graphs/graph.py"], "/functions/parser.py": ["/functions/date.py", "/constantData/keys.py"]} |
75,447 | Jonasdero/py-covid-graphs | refs/heads/master | /constantData/keys.py | nameKey = 'name'
confirmedKey = 'confirmed'
deathsKey = 'deaths'
recoveredKey = 'recovered'
newConfirmedKey = 'newConfirmed'
newDeathsKey = 'newDeaths'
newRecoveredKey = 'newRecovered'
percentageConfirmedKey = 'percentageConfirmed'
percentageDeathsKey = 'percentageDeaths'
percentageRecoveredKey = 'percentageRecovered'
startDateKey = 'startDate'
endDateKey = 'endDate'
latKey = 'lat'
lonKey = 'lon'
activeKey = 'active'
newActiveKey = 'newActive'
percentageActiveKey = 'percentageActive'
latestActiveKey = 'latestActive'
latestRecoveredKey = 'latestRecovered'
latestDeathsKey = 'latestDeaths'
| {"/graphs/graph.py": ["/functions/date.py", "/constantData/keys.py"], "/createGraphs.py": ["/constantData/urls.py", "/functions/parser.py", "/functions/crawler.py", "/graphs/graph.py"], "/index.py": ["/constantData/urls.py", "/functions/crawler.py", "/functions/parser.py", "/graphs/graph.py"], "/functions/parser.py": ["/functions/date.py", "/constantData/keys.py"]} |
75,489 | powerWishes/chanpay-test | refs/heads/master | /facade/enum/db/DBName.py | '''
本枚举为了读取数据库配置而建立,并保证其值与resource/config/db下的文件名一致
'''
CHANJET_PAY_BASE = 'chanjet_pay_base'
CHANJET_PAY_CHANNEL = 'chanjet_pay_channel' | {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,490 | powerWishes/chanpay-test | refs/heads/master | /util/WorkspaceUtil.py | import os
# 返回根目录路径
def get_root_path():
now_path = os.getcwd()
while not now_path.endswith("chanpay-test"):
os.chdir(os.path.abspath('..'))
now_path = os.getcwd()
return now_path
| {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,491 | powerWishes/chanpay-test | refs/heads/master | /facade/enum/base/TestType.py | COMMUNICATION_TEST = 1 # 1. 接口连通性测试(接口通讯为200)
PARAMETER_TEST = 2 # 2. 接口参数校验测试(接口返回失败,数据库无数据)
RIGHT_TEST = 3 # 3. 接口正确性校验(接口返回正确,数据库数据正确)
CALLBACK_TEST = 4 # 4. 回调测试
CUSTOM_TEST_FIRST = 5 # 5. 定制化测试1,使用时需在文档加相应备注
CUSTOM_TEST_SECOND = 6 # 6. 定制化测试2,使用时需在文档加相应备注
CUSTOM_TEST_THIRD = 7 # 7. 定制化测试3,使用时需在文档加相应备注
CUSTOM_TEST_FOURTH = 8 # 8. 定制化测试4,使用时需在文档加相应备注
CUSTOM_TEST_FIFTH = 9 # 9. 定制化测试5,使用时需在文档加相应备注 | {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,492 | powerWishes/chanpay-test | refs/heads/master | /facade/enum/base/DataTypeEnum.py | BOOLEAN = 0
INTEGER = 1
STRING = 2
LONG = 3
DOUBLE = 4
| {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,493 | powerWishes/chanpay-test | refs/heads/master | /util/MapUtil.py | from collections import OrderedDict
# 字典转化为排序字典OrderedDict()
def to_tree_map(normal_map):
acs_keys = sorted(normal_map.keys())
res_map = OrderedDict()
for each in acs_keys:
res_map[each] = normal_map[each]
return res_map
| {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,494 | powerWishes/chanpay-test | refs/heads/master | /util/HttpUtil.py | import json
import requests
# param 格式:{key1: value1,key2: value2}
def get(url, param):
# 添加http报头信息
req = requests.get(url, params=param)
# 下面是响应的内容
content = req.text
return req
def post(aim_url, param):
# 发送请求
req = requests.post(aim_url, json=param)
# 下面是响应的内容
content = req.text
return req
if __name__ == '__main__':
parm = {"productDesc": "自动化测试", "trxTyp": "1301", "mrchntNo": "100000000000008", "trxChnNo": "2100",
"sign": "abedb7be5cb42660b3c5465c40639cf2", "accUsrNo": "4000", "mrchntNm": "微信主扫商户",
"trxId": "9753aafd960c9be3", "trxAmt": "0.01"}
url = 'http://10.255.0.113:7185/gateway/channel/api/qrCode/mainSweep'
req = requests.post(url, json=parm)
print(req.text)
print(json.loads(req.text))
| {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,495 | powerWishes/chanpay-test | refs/heads/master | /facade/enum/base/CommunicationTypeEnum.py | HTTP_POST = 1
HTTP_GET = 2
HTTPS_POST = 3
HTTPS_GET = 4
WEB_SERVICE = 5 | {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,496 | powerWishes/chanpay-test | refs/heads/master | /test/common/verification.py | # 此文件主要负责验证信息,例如:获取session、token
| {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,497 | powerWishes/chanpay-test | refs/heads/master | /facade/dto/db/DBConnectionInfo.py | class DBConnectionInfo:
database = None
username = None
password = None
host = None
port = None
dbtype = None
def __init__(self, database=None, username=None, password=None, host=None, port=None, dbtype=None):
self.database = database
self.username = username
self.password = password
self.host = host
self.port = port
self.dbtype = dbtype
def __str__(self):
return 'DBConnectionInfo[ database=%s, username=%s, password=%s, host=%s, port=%s, dbtype=%s ]' % (
self.database, self.username, self.password, self.host, self.port, self.dbtype)
| {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,498 | powerWishes/chanpay-test | refs/heads/master | /test/trans/gateway/qrcode/unionMainSweep.py | import json
import unittest
import requests
from facade.enum.db import DBName, DBENV
from test.common.paramUtil import is_keyword, analysis_param
from util import DBUtil, ExcelUtil, WorkspaceUtil, MapUtil, SignUtil
# 3.9、二维码主扫消费
class UnionMainSweepTest(unittest.TestCase):
conn = None
cur = None
# { 1:{'request': '{"accUserNo":"4000","trxChnNo":"2100","trxTyp":"1201","trxId":"17b80f1d1f75adeb","trxAmt":0.01,"mrchntNo":"__createRadomUuid(16)__","mrchntNm":"刘无媚-页面新增企业商户","productDesc":"自动化测试用例1"}',
# 'predict_response': '{"code":"000000","message":"SUCCESS","data":{"rpCd":"000000","rpDesc":"SUCCESS","qrCodeInfo":"__checkStartWith(https://)__"}}',
# 'predict_db_data': {'statusCode': '200', 'data': {'code': '000000', 'message': '__checkNotEmpty()__', 'data': {'rpCd': '000000', 'rpDesc': '__checkNotEmpty()__', 'qrCodeInfo': '__checkStartWith(https://)__'}}},
# 'response': {},
# 'db_data': {},
# 'start_params': '{"channelKey":"0123456789ABCDEFFEDCBA9876543210"}',
# 'id': 1}}
test_data = {}
excel_path_name = '/resource/testdata/trans/gateway/qrcode/unionMainSweep.xlsx'
test_name = '渠道网关--银联主扫'
url = 'http://10.255.0.111:7185/gateway/channel/api/qrCode/mainSweep'
def __init__(self, methodName='runTest'):
super().__init__(methodName)
print("【%s】测试开始执行" % self.test_name)
# 测试准备
# 1. 读取测试数据,包括测试数据、接口返回预测、数据库信息预测
def setUp(self):
self.test_data = self.load_test_data()
# 测试流程开始
# 1. 接口访问 并 获取接口访问结果
# 2. 获取数据库相关数据
# 3. 接口返回数据与预测数据对比
# 4. 数据库真实数据与预测数据对比
def test_flow(self):
self.accessInterface()
self.get_db_result()
self.predictResponse()
def tearDown(self):
print("tear_down is Running")
print(self.test_data)
if self.cur is not None:
self.cur.close()
if self.conn is not None:
self.conn.close()
# 加载测试数据
def load_test_data(self):
print("【%s】测试用例加载中......" % self.test_name)
# 读取测试数据
test_cases = ExcelUtil.read_testdata_by_excel(
WorkspaceUtil.get_root_path() + self.excel_path_name)
# 测试数据转json
for each in test_cases:
request = json.loads(each['request'])
each['predict_response'] = json.loads(each['predict_response'])
each['start_params'] = json.loads(each['start_params'])
each['predict_db_data'] = json.loads(each['predict_db_data'])
each['response'] = {}
each['db_data'] = {}
# 请求参数自定义方法调用
for key, value in request.items():
if is_keyword(value):
request[key] = analysis_param(value)
each['request'] = request
# 签名
sign = SignUtil.sign_api_channel_map(each['start_params']['channelKey'],
MapUtil.to_tree_map(each['request']))
each['request']['sign'] = sign
self.test_data[each['id']] = each
print("测试用例:", self.test_data)
print("【%s】测试用例加载完毕" % self.test_name)
return self.test_data
# 获取数据库内容
def get_db_result(self, delay_second=0):
self.conn = DBUtil.get_connection(DBName.CHANJET_PAY_CHANNEL, DBENV.STABLE)
self.cur = self.conn.cursor()
for key, value in self.test_data.items():
self.cur.execute(r"select * from cp_chn_pay_trans where trans_no = '%s'" % (value['request']['trxId']))
each_result = self.cur.fetchall()
self.test_data[key]['db_data']['channel'] = each_result
# 访问接口
def accessInterface(self):
for key, value in self.test_data.items():
# 发送请求
resp = requests.post(url=self.url, json=value['request'])
# 获取接口访问状态
self.test_data[key]['response']['statusCode'] = resp.status_code
# 获取响应的内容
self.test_data[key]['response']['data'] = resp.text
return self.test_data
# 对比返回结果
def compareResponse(self):
for key, value in self.test_data.items():
real_resp = value['response']
pre_resp = value['predict_response']
self.assertMultiLineEqual(real_resp['statusCode'], pre_resp['statusCode'], '接口访问状态测试未通过')
if pre_resp['data'] is None:
self.assertIsNone(real_resp['data'], '接口返回[data]测试不通过')
else:
real_data = real_resp['data']
pre_resp = pre_resp['data']
self.assertMultiLineEqual(real_data['code'], pre_resp['code'], r'接口返回[code]测试不通过')
return self.test_data
if __name__ == '__main__':
unittest.main()
| {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,499 | powerWishes/chanpay-test | refs/heads/master | /util/ExcelUtil.py | import xlrd
from util import DataConvertUtil, WorkspaceUtil
head_map = {0: "request", 1: "predict_response", 2: "predict_db_data", 3: "response", 4: "db_data",
5: "start_params"}
excel_cols = 6
# 从Excel读取测试用例
# 返回数据格式:
# [{'request': '{"accUserNo":"4000","trxChnNo":"2100","trxTyp":"1201","trxId":"17b80f1d1f75adeb","trxAmt":0.01,"mrchntNo":"__createRadomUuid(16)__","mrchntNm":"刘无媚-页面新增企业商户","productDesc":"自动化测试用例1"}',
# 'predict_response': '{"code":"000000","message":"SUCCESS","data":{"rpCd":"000000","rpDesc":"SUCCESS","qrCodeInfo":"__checkStartWith(https://)__"}}',
# 'predict_db_data': '',
# 'response': '',
# 'db_data': '',
# 'start_params': '{"db_search_delay_seconds":0}',
# 'id': 1}]
def read_testdata_by_excel(file_path_name):
# 打开文件
global workbook
try:
workbook = xlrd.open_workbook(file_path_name)
except BaseException as e:
print("Read Excel Error:", e)
raise e
# 获取sheet
sheet_count = workbook.nsheets
# 选择页数检查
if sheet_count < 1:
raise Exception("该Excel内容为空,或页数不存在!")
sheet = workbook.sheet_by_index(0)
# 选择开始行检查
row_count = sheet.nrows
if row_count < 1:
raise Exception("起始行数据不存在!")
col_count = sheet.ncols
if col_count < 1:
raise Exception("起始列数据不存在!")
data = [] # 总数据
id = 1 # 为每一行生成一个id
# 数据获取
for rowNum in range(1, sheet.nrows):
row = sheet.row(rowNum)
row_map = {} # 每行数据
for colNum in range(0, excel_cols):
row_map[head_map[colNum]] = row[colNum].value
row_map['id'] = id
id += 1
data.append(row_map)
return data
if __name__ == '__main__':
print(read_testdata_by_excel(
WorkspaceUtil.get_root_path() + '/resource/testdata/trans/gateway/qrcode/unionMainSweep.xlsx'))
| {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,500 | powerWishes/chanpay-test | refs/heads/master | /test/common/paramUtil.py | import re
import uuid
UNKNOW = 0
METHOD = 1 # __abc(a,b)__
REQ_PARAM = 2 # _a_
SPECIAL_PARAM = 3 # *None*
# 自定义方法
class ParamUtil:
# 创建无“-”UUID
def createEasyUuid(self, len='32'):
return str(uuid.uuid1()).replace('-', '')[:int(len)]
# 创建有“-”UUID
def createUuid(self, len='36'):
return str(uuid.uuid1())[:int(len)]
# 校验param是否以a开头
def checkStartWith(self, param, a):
return param.startswith(a)
# 解析参数,即自定义方法调用
# param 需解析的字符串
# data 方法调用的额外参数,最终会拼在请求参数的头部
def analysis_param(param, *data):
kwtype = get_keyword_type(param)
if UNKNOW == kwtype:
return param
if METHOD == kwtype:
paramUtil = ParamUtil()
method_name = get_method_name(param)
params = get_args(param)
if len(data) > 0:
params = data + params
method = None
if method_name is not None and hasattr(paramUtil, method_name):
method = getattr(paramUtil, method_name)
else:
raise Exception('该自定义方法不存在')
if method_name == 'createEasyUuid':
if params is None:
return method()
else:
return method(params[0])
elif method_name == 'createUuid':
if params is None:
return method()
else:
return method(params[0])
elif method_name == 'checkStartWith':
if params is None:
return method()
else:
return method(params[0], params[1])
else:
raise Exception('该自定义方法不存在')
if REQ_PARAM == kwtype:
pass
if SPECIAL_PARAM == kwtype:
pass
# 方法获取
if param.startswith('__') and param.endswith("__"):
paramUtil = ParamUtil()
# 参数获取
param_list = param.split()
if hasattr(paramUtil, 'createRadomUuid'):
a = getattr(paramUtil, 'createRadomUuid') # 如果有方法method,否则打印其地址,否则打印default
print(a(16))
def is_keyword(param):
if type(param) != type(''):
return False
if param.startswith('__') and param.endswith("__"):
return True
if param.startswith('_') and param.endswith("_"):
return True
if param.startswith('*') and param.endswith("*"):
return True
return False
# 获取请求参数
# 返回格式:
# ['str1','str2'...]
# 注意:无请求参数返回None
def get_args(param):
# 正则匹配参数
paramsstr = re.search(r'\(.*\)', param)
if paramsstr is None:
return None
# 参数字符串截取
index = paramsstr.span()
realparamsstr = param[(index[0] + 1):(index[1] - 1)]
# 获取参数列表
paramslist = realparamsstr.split(',')
return paramslist
# 获取请求参数
# 返回格式:
# 'methodName'
# 注意:无方法名返回None
def get_method_name(param):
# 正则匹配参数
paramsstr = re.search(r'__.*\(', param)
if paramsstr is None:
return None
# 参数字符串截取
index = paramsstr.span()
method_name = param[(index[0] + 2):(index[1] - 1)]
return method_name
def get_keyword_type(param):
if type(param) != type(''):
return UNKNOW
if param.startswith('__') and param.endswith("__"):
return METHOD
if param.startswith('_') and param.endswith("_"):
return REQ_PARAM
if param.startswith('*') and param.endswith("*"):
return SPECIAL_PARAM
return UNKNOW
def a(*alist):
b = len(alist)
print(b)
if __name__ == '__main__':
a()
| {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,501 | powerWishes/chanpay-test | refs/heads/master | /util/DataConvertUtil.py | from facade.enum.base import DataTypeEnum
def convert(param, dataType):
try:
if dataType is None:
return param
if param is None:
return None
if param == '':
return None
elif dataType == DataTypeEnum.BOOLEAN:
return bool(param)
elif dataType == DataTypeEnum.DOUBLE:
return float(param)
elif dataType == DataTypeEnum.INTEGER:
return int(param)
elif dataType == DataTypeEnum.LONG:
return int(param)
elif dataType == DataTypeEnum.STRING:
return str(param)
else:
return param
except Exception as e:
raise Exception("数据转换错误,请检查数据类型列表或数据!")
| {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,502 | powerWishes/chanpay-test | refs/heads/master | /util/SignUtil.py | import hashlib
# 渠道api签名
# 秘钥与请求参数值拼接后进行md5签名,字典应为按key排序的OrderedDict
def sign_api_channel_map(channelKey, orderedDict):
sign_data = str(channelKey)
for key, value in orderedDict.items():
sign_data += str(value)
md = hashlib.md5()
md.update(sign_data.encode('utf-8'))
return md.hexdigest()
| {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,503 | powerWishes/chanpay-test | refs/heads/master | /util/DBUtil.py | from configparser import ConfigParser
import psycopg2 as psycopg2
from facade.dto.db.DBConnectionInfo import DBConnectionInfo
from facade.enum.db import DBName, DBENV
from util import WorkspaceUtil
DB_CONFIG_PATH = WorkspaceUtil.get_root_path()+"/resource/config/db/"
def get_db_config(file_path_name, env):
cp = ConfigParser()
cp.read(file_path_name)
global section
if env is None or not cp.has_section(env):
section = cp.sections()[0]
else:
section = env
if section is None:
return
username = cp.get(section, 'username')
password = cp.get(section, 'password')
host = cp.get(section, 'host')
port = cp.get(section, 'port')
database = cp.get(section, 'database')
dbtype = cp.get(section, 'dbtype')
db_config = DBConnectionInfo(database, username, password, host, port, dbtype)
return db_config
def get_connection_by_config(db_config_info):
if db_config_info is None:
return None
conn = psycopg2.connect(database=db_config_info.database, user=db_config_info.username,
password=db_config_info.password, host=db_config_info.host, port=db_config_info.port)
return conn
def get_connection(db_name, env):
db_config = get_db_config(DB_CONFIG_PATH + db_name + ".cfg", env)
return get_connection_by_config(db_config)
# if __name__ == '__main__':
# a = get_db_config(DB_CONFIG_PATH + DBName.CHANJET_PAY_CHANNEL + ".cfg", "stable")
# with get_connection_by_config(a) as aaa:
# cur = aaa.cursor()
# bbb = cur.execute("select * from cp_chn_pay_trans limit 10")
# ccc = cur.fetchall()
# print(ccc)
# cur.close()
# with get_connection(DBName.CHANJET_PAY_CHANNEL, DBENV.STABLE) as aaa:
# cur = aaa.cursor()
# bbb = cur.execute("select * from cp_chn_pay_trans limit 10")
# ccc = cur.fetchall()
# print(ccc)
# cur.close()
| {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
75,504 | powerWishes/chanpay-test | refs/heads/master | /facade/enum/db/DBENV.py | STABLE = 'stable'
TEST = 'test'
ONLINE = 'online'
DEV = 'dev' | {"/test/trans/gateway/qrcode/unionMainSweep.py": ["/test/common/paramUtil.py"], "/util/DBUtil.py": ["/facade/dto/db/DBConnectionInfo.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.