seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16009044244 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Makes an organized git repo of a book folder
"""
from __future__ import print_function
import codecs
import os
from os.path import abspath, dirname
import jinja2
import sh
from .parameters import GITHUB_ORG
class NewFilesHandler():
""" NewFilesHandler - templates and copies additional files to book repos
"""
README_FILENAME = 'README.rst'
def __init__(self, book):
self.book = book
package_loader = jinja2.PackageLoader('gitenberg', 'templates')
self.env = jinja2.Environment(loader=package_loader)
def add_new_files(self):
self.template_readme()
self.travis_files()
self.copy_files()
def template_readme(self):
template = self.env.get_template('README.rst.j2')
readme_text = template.render(
authors=self.book.meta.authors_short(),
**self.book.meta.metadata
)
readme_path = "{0}/{1}".format(
self.book.local_path,
self.README_FILENAME
)
with codecs.open(readme_path, 'w', 'utf-8') as readme_file:
readme_file.write(readme_text)
def travis_files(self):
template = self.env.get_template('.travis.yml')
travis_key = self.book.github_repo.travis_key()
travis_text = template.render({
'epub_title': 'book',
'encrypted_key': travis_key,
'repo_name': self.book.meta._repo,
'repo_owner': GITHUB_ORG
})
fpath = os.path.join(self.book.local_path, ".travis.yml")
with open(fpath, 'w') as f:
f.write(travis_text)
if self.book.github_repo.travis_key():
fpath = os.path.join(self.book.local_path, ".travis.deploy.api_key.txt")
with open(fpath, 'w') as f:
f.write(travis_key)
def copy_files(self):
""" Copy the LICENSE and CONTRIBUTING files to each folder repo
Generate covers if needed. Dump the metadata.
"""
files = [u'LICENSE', u'CONTRIBUTING.rst']
this_dir = dirname(abspath(__file__))
for _file in files:
sh.cp(
'{0}/templates/{1}'.format(this_dir, _file),
'{0}/'.format(self.book.local_path)
)
# copy metadata rdf file
sh.cp(
self.book.meta.rdf_path,
'{0}/'.format(self.book.local_path)
)
if 'GITenberg' not in self.book.meta.subjects:
self.book.meta.metadata['subjects'].append('GITenberg')
if not self.book.meta._version:
self.book.meta.matadata["_version"] = "0.0.1"
self.book.meta.dump_file(os.path.join(self.book.local_path, 'metadata.yaml'))
| mgotliboym/gitberg | gitenberg/make.py | make.py | py | 2,764 | python | en | code | null | github-code | 6 | [
{
"api_name": "jinja2.PackageLoader",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "parameters.GITHUB... |
73817284026 | """An ext to listen for message events and syncs them to the database."""
import discord
from discord.ext import commands
from sqlalchemy import update
from metricity.bot import Bot
from metricity.config import BotConfig
from metricity.database import async_session
from metricity.exts.event_listeners import _utils
from metricity.models import Message, User
class MessageListeners(commands.Cog):
"""Listen for message events and sync them to the database."""
def __init__(self, bot: Bot) -> None:
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
"""Add a message to the table when one is sent providing the author has accepted."""
if not message.guild:
return
if message.author.bot:
return
if message.guild.id != BotConfig.guild_id:
return
if message.type in (discord.MessageType.thread_created, discord.MessageType.auto_moderation_action):
return
await self.bot.sync_process_complete.wait()
await self.bot.channel_sync_in_progress.wait()
async with async_session() as sess:
if not await sess.get(User, str(message.author.id)):
return
cat_id = message.channel.category.id if message.channel.category else None
if cat_id in BotConfig.ignore_categories:
return
from_thread = isinstance(message.channel, discord.Thread)
await _utils.sync_message(message, sess, from_thread=from_thread)
await sess.commit()
@commands.Cog.listener()
async def on_raw_message_delete(self, message: discord.RawMessageDeleteEvent) -> None:
"""If a message is deleted and we have a record of it set the is_deleted flag."""
async with async_session() as sess:
await sess.execute(update(Message).where(Message.id == str(message.message_id)).values(is_deleted=True))
await sess.commit()
@commands.Cog.listener()
async def on_raw_bulk_message_delete(self, messages: discord.RawBulkMessageDeleteEvent) -> None:
"""If messages are deleted in bulk and we have a record of them set the is_deleted flag."""
async with async_session() as sess:
await sess.execute(update(Message).where(Message.id.in_(messages.message_ids)).values(is_deleted=True))
await sess.commit()
async def setup(bot: Bot) -> None:
"""Load the MessageListeners cog."""
await bot.add_cog(MessageListeners(bot))
| python-discord/metricity | metricity/exts/event_listeners/message_listeners.py | message_listeners.py | py | 2,561 | python | en | code | 39 | github-code | 6 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "metricity.bot.Bot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "... |
33831413289 | from typing import List
def two_sum(lis: List[int], target: int):
dici = {}
for i, value in enumerate(lis):
objetive = target - value
if objetive in dici:
return [dici[objetive], i]
dici[value] = i
return []
print(two_sum([1, 2, 3, 4, 5, 6], 7))
| R0bertWell/interview_questions | reexercises/two_sum_target.py | two_sum_target.py | py | 298 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 4,
"usage_type": "name"
}
] |
913555112 | from coc import utils
from datetime import datetime
from discord.ext import commands, tasks
class DatabaseBackground(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.update.start()
def cog_unload(self):
self.update.cancel()
@commands.command(name="add_user")
async def add_user(self, ctx, player_tag):
"""Command is used to register a user to the database"""
player_tag = utils.correct_tag(player_tag)
player = await self.bot.coc.get_player(player_tag)
self.bot.dbconn.register_user((player.tag, player.name, player.town_hall, ))
await ctx.send(f"User added: {player.name}")
@tasks.loop(minutes=3.0)
async def update(self):
"""This method updates the database every 3 minutes"""
tags = self.bot.dbconn.get_players()
tag_list = [tag[0] for tag in tags]
async for player in self.bot.coc.get_players(tag_list):
self.bot.dbconn.update_donation((datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
player.tag,
player.get_achievement("Friend in Need").value))
@update.before_loop
async def before_update(self):
"""This method prevents the task from running before the bot is connected"""
await self.bot.wait_until_ready()
def setup(bot):
bot.add_cog(DatabaseBackground(bot))
| wpmjones/coc_sample_bot | cogs/database_bg.py | database_bg.py | py | 1,440 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "coc.utils.correct_tag",
"line_number": 17,
"usage_type": "call"
},
{
"api_name":... |
8779553577 | import requests
import logging
from bs4 import BeautifulSoup
logger = logging.getLogger()
logger.setLevel(level=logging.INFO)
class WikiWorker():
def __init__(self) -> None:
self._url = "https://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
@staticmethod
def _extract_company_symbol(page_html):
soup = BeautifulSoup(page_html,'html.parser')
table = soup.find(id='constituents')
table_rows = table.find_all('tr')
for table_row in table_rows[1:]:
symbol = table_row.find('td').text.strip('\n')
yield symbol
def get_sp_500_companies(self):
response = requests.get(url=self._url)
if response.status_code != 200:
logger.warning('Not able to find companies!')
return []
yield from self._extract_company_symbol(response.text)
if __name__ == '__main__':
wiki = WikiWorker()
counter = 0
for symbol in wiki.get_sp_500_companies():
print(symbol)
counter += 1
if counter > 5:
break
| atula28os/Multithreads | workers/WikiWorker.py | WikiWorker.py | py | 1,125 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
... |
60098854 | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import mv
class HPPs_dispersion():
def __init__(self,filename):
self.name = filename
def ReadData(self):
data_file_y = os.path.join(fr'./dispersion/y/n{self.name}.txt')
data_y = pd.read_csv(data_file_y, sep='\s+', header=None)
data_file_z = os.path.join(fr'./dispersion/z/n{self.name}.txt')
data_z = pd.read_csv(data_file_z, sep='\s+', header=None)
# print(data[0])
return data_y,data_z
def Figure(self):
data_y,data_z = HPPs_dispersion.ReadData(self)
# print(data_z)
plt.figure(dpi=200)
# print(data_y)
plt.title(f'{self.name}_dispersion')
# plt.style.use('seaborn-whitegrid')
plt.scatter(data_y[0], data_y[1],s=3,color='b',label='x-y direction')
plt.scatter(data_z[0], data_z[1],s=3,color='r',label='z direction')
#平滑化处理
# x1_smooth = np.linspace(data_y[0].min(),data_y[0].max())
# y1_smooth = make_interp_spline(data_y[0],data_y[1],x1_smooth)
# plt.plot(x1_smooth,y1_smooth)
#legend
plt.xlabel('q(1/m)')
plt.ylabel('frequency(Hz)')
# Process(target=HPPs_dispersion.Figure(self)).start()
plt.legend()
# plt.show()
# plt.savefig(fr'./PNG/dispersion/{self.name}_dispersion_1e+8.png', dpi=200)
def savefig(self):
plt.savefig(fr'./PNG/dispersion/n{self.name}n.png', dpi=500)
print(fr'{self.name}'+" is saved")
# list = ['BN','BP','AlN','AlP']
list = ['BP','AlN','AlP','BN']
# for item in list:
# a1 =HPPs_dispersion(item)
# a1.ReadData()
# a1.Figure()
# a1.savefig()
# filename = 'BN'
for item in list:
# direction = 'z'
# srcfile_y = f'E:\py_project\Linux_connect\dispersion\bulk\{item}\y\dispersion.txt'
# dstfile_y = f'E:\py_project\Linux_connect\HPPP_plt\dispersion\y/{item}_bulk.txt'
# mv.mycopyfile(srcfile_y,dstfile_y)
# srcfile_z = f'E:\py_project\Linux_connect\dispersion\bulk\{item}\z\dispersion.txt'
# dstfile_z = f'E:\py_project\Linux_connect\HPPP_plt\dispersion\z\{item}_bulk.txt'
# mv.mycopyfile(srcfile_z,dstfile_z)
# plt.figure(dpi=200)
a1 =HPPs_dispersion(item)
a1.ReadData()
a1.Figure()
a1.savefig()
| foreseefy/HPPP | HPPs_dispersion.py | HPPs_dispersion.py | py | 2,318 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
16543286247 | from nuitka.containers.OrderedDicts import OrderedDict
from nuitka.Errors import NuitkaOptimizationError
from nuitka.PythonVersions import python_version
from nuitka.utils.InstanceCounters import (
counted_del,
counted_init,
isCountingInstances,
)
from nuitka.Variables import LocalsDictVariable, LocalVariable
from .shapes.BuiltinTypeShapes import tshape_dict
from .shapes.StandardShapes import tshape_unknown
locals_dict_handles = {}
def getLocalsDictType(kind):
if kind == "python2_function_exec":
locals_scope = LocalsDictExecHandle
elif kind == "python_function":
locals_scope = LocalsDictFunctionHandle
elif kind == "python3_class":
locals_scope = LocalsMappingHandle
elif kind == "python2_class":
locals_scope = LocalsDictHandle
elif kind == "module_dict":
locals_scope = GlobalsDictHandle
else:
assert False, kind
return locals_scope
def getLocalsDictHandle(locals_name, kind, owner):
# Duplicates are bad and cannot be tolerated.
if locals_name in locals_dict_handles:
raise NuitkaOptimizationError(
"duplicate locals name",
locals_name,
kind,
owner.getFullName(),
owner.getCompileTimeFilename(),
locals_dict_handles[locals_name].owner.getFullName(),
locals_dict_handles[locals_name].owner.getCompileTimeFilename(),
)
locals_dict_handles[locals_name] = getLocalsDictType(kind)(
locals_name=locals_name, owner=owner
)
return locals_dict_handles[locals_name]
class LocalsDictHandleBase(object):
# TODO: Might remove some of these later, pylint: disable=too-many-instance-attributes
__slots__ = (
"locals_name",
# TODO: Specialize what the kinds really use what.
"variables",
"local_variables",
"providing",
"mark_for_propagation",
"prevented_propagation",
"propagation",
"owner",
"complete",
)
@counted_init
def __init__(self, locals_name, owner):
self.locals_name = locals_name
self.owner = owner
# For locals dict variables in this scope.
self.variables = {}
# For local variables in this scope.
self.local_variables = {}
self.providing = OrderedDict()
# Can this be eliminated through replacement of temporary variables, or has
# e.g. the use of locals prevented this, which it should in classes.
self.mark_for_propagation = False
self.propagation = None
self.complete = False
if isCountingInstances():
__del__ = counted_del()
def __repr__(self):
return "<%s of %s>" % (self.__class__.__name__, self.locals_name)
def getName(self):
return self.locals_name
def makeClone(self, new_owner):
count = 1
# Make it unique.
while 1:
locals_name = self.locals_name + "_inline_%d" % count
if locals_name not in locals_dict_handles:
break
count += 1
result = self.__class__(locals_name=locals_name, owner=new_owner)
variable_translation = {}
# Clone variables as well.
for variable_name, variable in self.variables.items():
new_variable = variable.makeClone(new_owner=new_owner)
variable_translation[variable] = new_variable
result.variables[variable_name] = new_variable
for variable_name, variable in self.local_variables.items():
new_variable = variable.makeClone(new_owner=new_owner)
variable_translation[variable] = new_variable
result.local_variables[variable_name] = new_variable
result.providing = OrderedDict()
for variable_name, variable in self.providing.items():
if variable in variable_translation:
new_variable = variable_translation[variable]
else:
new_variable = variable.makeClone(new_owner=new_owner)
variable_translation[variable] = new_variable
result.providing[variable_name] = new_variable
return result, variable_translation
@staticmethod
def getTypeShape():
return tshape_dict
@staticmethod
def hasShapeDictionaryExact():
return True
def getCodeName(self):
return self.locals_name
@staticmethod
def isModuleScope():
return False
@staticmethod
def isClassScope():
return False
@staticmethod
def isFunctionScope():
return False
@staticmethod
def isUnoptimizedFunctionScope():
return False
def getProvidedVariables(self):
return self.providing.values()
def registerProvidedVariable(self, variable):
variable_name = variable.getName()
self.providing[variable_name] = variable
def unregisterProvidedVariable(self, variable):
"""Remove provided variable, e.g. because it became unused."""
variable_name = variable.getName()
if variable_name in self.providing:
del self.providing[variable_name]
registerClosureVariable = registerProvidedVariable
unregisterClosureVariable = unregisterProvidedVariable
def hasProvidedVariable(self, variable_name):
"""Test if a variable is provided."""
return variable_name in self.providing
def getProvidedVariable(self, variable_name):
"""Test if a variable is provided."""
return self.providing[variable_name]
def getLocalsRelevantVariables(self):
"""The variables relevant to locals."""
return self.providing.values()
def getLocalsDictVariable(self, variable_name):
if variable_name not in self.variables:
result = LocalsDictVariable(owner=self, variable_name=variable_name)
self.variables[variable_name] = result
return self.variables[variable_name]
# TODO: Have variable ownership moved to the locals scope, so owner becomes not needed here.
def getLocalVariable(self, owner, variable_name):
if variable_name not in self.local_variables:
result = LocalVariable(owner=owner, variable_name=variable_name)
self.local_variables[variable_name] = result
return self.local_variables[variable_name]
@staticmethod
def preventLocalsDictPropagation():
pass
@staticmethod
def isPreventedPropagation():
return False
def markForLocalsDictPropagation(self):
self.mark_for_propagation = True
def isMarkedForPropagation(self):
return self.mark_for_propagation
def allocateTempReplacementVariable(self, trace_collection, variable_name):
if self.propagation is None:
self.propagation = OrderedDict()
if variable_name not in self.propagation:
provider = trace_collection.getOwner()
self.propagation[variable_name] = provider.allocateTempVariable(
temp_scope=None, name=self.getCodeName() + "_key_" + variable_name
)
return self.propagation[variable_name]
def getPropagationVariables(self):
if self.propagation is None:
return ()
return self.propagation
def finalize(self):
# Make it unusable when it's become empty, not used.
self.owner.locals_scope = None
del self.owner
del self.propagation
del self.mark_for_propagation
for variable in self.variables.values():
variable.finalize()
for variable in self.local_variables.values():
variable.finalize()
del self.variables
del self.providing
def markAsComplete(self, trace_collection):
self.complete = True
self._considerUnusedUserLocalVariables(trace_collection)
self._considerPropagation(trace_collection)
# TODO: Limited to Python2 classes for now, more overloads need to be added, this
# ought to be abstract and have variants with TODOs for each of them.
@staticmethod
def _considerPropagation(trace_collection):
"""For overload by scope type. Check if this can be replaced."""
def onPropagationComplete(self):
self.variables = {}
self.mark_for_propagation = False
def _considerUnusedUserLocalVariables(self, trace_collection):
"""Check scope for unused variables."""
provided = self.getProvidedVariables()
removals = []
for variable in provided:
if (
variable.isLocalVariable()
and not variable.isParameterVariable()
and variable.getOwner() is self.owner
):
empty = trace_collection.hasEmptyTraces(variable)
if empty:
removals.append(variable)
for variable in removals:
self.unregisterProvidedVariable(variable)
trace_collection.signalChange(
"var_usage",
self.owner.getSourceReference(),
message="Remove unused local variable '%s'." % variable.getName(),
)
class LocalsDictHandle(LocalsDictHandleBase):
"""Locals dict for a Python class with mere dict."""
__slots__ = ()
@staticmethod
def isClassScope():
return True
@staticmethod
def getMappingValueShape(variable):
# We don't yet track dictionaries, let alone mapping values.
# pylint: disable=unused-argument
return tshape_unknown
def _considerPropagation(self, trace_collection):
if not self.variables:
return
for variable in self.variables.values():
for variable_trace in variable.traces:
if variable_trace.inhibitsClassScopeForwardPropagation():
return
trace_collection.signalChange(
"var_usage",
self.owner.getSourceReference(),
message="Forward propagate locals dictionary.",
)
self.markForLocalsDictPropagation()
class LocalsMappingHandle(LocalsDictHandle):
"""Locals dict of a Python3 class with a mapping."""
__slots__ = ("type_shape",)
# TODO: Removable condition once Python 3.3 support is dropped.
if python_version >= 0x340:
__slots__ += ("prevented_propagation",)
def __init__(self, locals_name, owner):
LocalsDictHandle.__init__(self, locals_name=locals_name, owner=owner)
self.type_shape = tshape_unknown
if python_version >= 0x340:
self.prevented_propagation = False
def getTypeShape(self):
# TODO: Make mapping available for this.
return self.type_shape
def setTypeShape(self, type_shape):
self.type_shape = type_shape
def hasShapeDictionaryExact(self):
return self.type_shape is tshape_dict
if python_version >= 0x340:
def markAsComplete(self, trace_collection):
# For this run, it cannot be done yet.
if self.prevented_propagation:
# False alarm, this is available.
self.prevented_propagation = False
return
self.complete = True
def preventLocalsDictPropagation(self):
self.prevented_propagation = True
def isPreventedPropagation(self):
return self.prevented_propagation
def _considerPropagation(self, trace_collection):
if not self.variables:
return
if self.type_shape is not tshape_dict:
return
for variable in self.variables.values():
for variable_trace in variable.traces:
if variable_trace.inhibitsClassScopeForwardPropagation():
return
trace_collection.signalChange(
"var_usage",
self.owner.getSourceReference(),
message="Forward propagate locals dictionary.",
)
self.markForLocalsDictPropagation()
@staticmethod
def isClassScope():
return True
class LocalsDictExecHandle(LocalsDictHandleBase):
"""Locals dict of a Python2 function with an exec."""
__slots__ = ("closure_variables",)
def __init__(self, locals_name, owner):
LocalsDictHandleBase.__init__(self, locals_name=locals_name, owner=owner)
self.closure_variables = None
@staticmethod
def isFunctionScope():
return True
@staticmethod
def isUnoptimizedFunctionScope():
return True
def getLocalsRelevantVariables(self):
if self.closure_variables is None:
return self.providing.values()
else:
return [
variable
for variable in self.providing.values()
if variable not in self.closure_variables
]
# TODO: What about the ".0" variety, we used to exclude it.
def registerClosureVariable(self, variable):
self.registerProvidedVariable(variable)
if self.closure_variables is None:
self.closure_variables = set()
self.closure_variables.add(variable)
def unregisterClosureVariable(self, variable):
self.unregisterProvidedVariable(variable)
variable_name = variable.getName()
if variable_name in self.providing:
del self.providing[variable_name]
class LocalsDictFunctionHandle(LocalsDictHandleBase):
"""Locals dict of a Python3 function or Python2 function without an exec."""
__slots__ = ()
@staticmethod
def isFunctionScope():
return True
class GlobalsDictHandle(LocalsDictHandleBase):
__slots__ = ("escaped",)
def __init__(self, locals_name, owner):
LocalsDictHandleBase.__init__(self, locals_name=locals_name, owner=owner)
self.escaped = False
@staticmethod
def isModuleScope():
return True
def markAsEscaped(self):
self.escaped = True
def isEscaped(self):
return self.escaped
| Nuitka/Nuitka | nuitka/nodes/LocalsScopes.py | LocalsScopes.py | py | 14,085 | python | en | code | 10,019 | github-code | 6 | [
{
"api_name": "nuitka.Errors.NuitkaOptimizationError",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "nuitka.containers.OrderedDicts.OrderedDict",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "nuitka.utils.InstanceCounters.counted_init",
"line_number": ... |
41119941323 | import numpy as np
import funcs
from utils import fs, unison_shuffled_copies
# from matplotlib import pyplot as plt
# import random
# from utils import *
# import scipy.io as sio
# from copy import deepcopy
LR = 2
LR_DECAY = .9999
MIN_LR = 0.000000001
DEBUG = False
class Layer:
def __init__(self, in_dim, out_dim):
np.random.seed(42)
self.weights = np.random.normal(size=(in_dim, out_dim), loc=0.0, scale=1.0 )
self.bias= np.random.normal(size=(out_dim, 1), loc=0.0, scale=1.0 )
self.Y = np.zeros((out_dim, 1)) #todo: may need to switch dims order
self.X = None
self.df_dtheta = None
def dy_dw_t_v(self, v, act_tag):
W, b, x = self.weights, self.b, self.X
output = act_tag((W @ x) + b)
output = output * v
output = output * x.T
class NN:
def __init__(self, layer_dims, act=funcs.tanh, act_tag=funcs.tanh_tag, lr=LR):
self.act = act
self.d_act = act_tag
self.lr = lr
self.num_layers = len(layer_dims) - 1
self.layers = [None] * self.num_layers
for i in range(0, self.num_layers):
self.layers[i] = Layer(layer_dims[i], layer_dims[i+1])
def predict(self, X):
my_print("\n\n********** Predict() **********")
my_print(f"input: {X}")
output = X
#propogate data forward through the layers
for i in range(0, self.num_layers):
self.layers[i].X = output
my_print(f"\nlayer: {i}")
my_print(f"\tW: {self.layers[i].weights}")
output = (output @ self.layers[i].weights)
my_print(f"\toutput: {output}")
#output += self.layers[i].bias.T
f = funcs.softmax if (i == self.num_layers - 1) else self.act
f_str = "softmax" if (i == self.num_layers - 1) else "self.act"
output = f(output)
my_print(f"\t\t{f_str}(output): {output}")
self.layers[i].Y = output
return output
def learn(self, expected, data):
my_print("\n\n********** Learn() **********")
pred = self.layers[-1].Y
my_print(f"input: {data}\n")
my_print(f"pred: {pred}\n")
my_print(f"real: {expected}\n")
err = (pred - expected).T
for i in range(2, self.num_layers + 2):
my_print(f"\ni: {i}")
my_print(f"\terr: {err}")
d_f = funcs.grad_softmax_old if (i == 2) else self.d_act
d_f_str = "grad_softmax" if (i == 2) else "self.d_act"
my_print(f"\td_f: {d_f_str}")
input = self.layers[self.num_layers - i].Y if (i < self.num_layers + 1) else data
output = self.layers[self.num_layers - i + 1].Y
my_print(f"\tinput: {input}\n")
my_print(f"\toutput: {output}\n")
a = input.T
b = (err.T * d_f(output))
my_print(f"\ta: {a.shape}, b: {b.shape}")
dW = -self.lr * (err @ input)#(a @ b)
dB = -(self.lr) * np.mean(b, axis=0, keepdims=True)
my_print(f"\tdW:\n{dW}")
err = self.layers[self.num_layers - i + 1].weights @ err
my_print(f"\tW before update:\n{self.layers[self.num_layers - i + 1].weights}")
self.layers[self.num_layers - i + 1].weights += dW.T
my_print(f"\tW after update:\n{self.layers[self.num_layers - i + 1].weights}")
#self.layers[self.num_layers - i + 1].bias += dB.T
self.lr = max(LR_DECAY * self.lr, MIN_LR)
def train(self, inputs, labels, mini_batch_size, batch_size, num_epochs=1):
batch_err = 0
num_correct, total = 0, 0
p_stats = np.zeros((labels.shape[1]))
r_stats = np.zeros((labels.shape[1]))
epoch_acc = 0
for epoch in range(num_epochs):
inputs, labels = unison_shuffled_copies(inputs, labels)
print(f"---------- Epoch #{epoch} ----------")
for i in range(0, len(inputs), mini_batch_size):
input = inputs[i:i+mini_batch_size]
expected = labels[i:i+mini_batch_size]
pred = self.predict(input)
batch_err += get_error(pred, expected)
num_correct_i, total_i = accuracy(pred, expected)
num_correct += num_correct_i
total += total_i
epoch_acc += num_correct_i
p_stats_i, r_stats_i = stats(pred, expected)
p_stats += p_stats_i
r_stats += r_stats_i
self.learn(expected, input)
if (i + mini_batch_size) % batch_size == 0:
print(f"{int(i/batch_size)}\tlr: {fs(self.lr)}\terr: {fs(batch_err/batch_size)}\tacc: {num_correct}/{total}")#\tps: {p_stats}\trs: {r_stats}")
batch_err = 0
num_correct, total = 0, 0
p_stats = np.zeros((labels.shape[1]))
r_stats = np.zeros((labels.shape[1]))
print(f"epoch acc: {epoch_acc} / {len(inputs)}\t({int((epoch_acc * 100)/len(inputs))}%)")
epoch_acc = 0
self.lr = LR
def accuracy(pred, real):
num_correct = 0
total = len(pred)
for i in range(total):
if (np.argmax(pred[i]) == np.argmax(real[i])):
num_correct += 1
return num_correct, total
def print_pred_real_acc(pred, real):
print("*************")
print("pred:")
print(pred)
print("\nreal:")
print(real)
print(f"acc: {accuracy(pred, real)}")
print("*************")
def stats(preds, reals):
p_stats = [0] * preds.shape[1]
r_stats = [0] * reals.shape[1]
for i in range(preds.shape[0]):
pred = np.argmax(preds[i])
p_stats[pred] += 1
real = np.argmax(reals[i])
r_stats[real] += 1
return np.array(p_stats), np.array(r_stats)
def get_error(pred, real):
output = pred - real
output = output * output
output = np.sum(np.sum(output))
# print(f"pred: {pred}\nreal: {real}\nerr: {output}")
return output
def foo(v):
v1, v2, v3 = v[0]
if (v1 + v2 > v3):
return np.array([[0, 1]])
else:
return np.array([[1, 0]])
def my_print(s):
if DEBUG:
print(s) | eladfeld/deepLearningFirstAssingnment | code/NN2.py | NN2.py | py | 6,346 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random"... |
32581486001 | import pickle
import pennylane as qml
from pennylane import numpy as np
from math import pi
from ChemModel import translator, quantum_net
from Arguments import Arguments
# load molecular datasets (OH: 12 qubits)
# OHdatasets = qml.data.load("qchem", molname="OH", basis="STO-3G", bondlength=0.9)
# OHdata = OHdatasets[0]
# hamiltonian = OHdata.hamiltonian
# print(OHdata.molecule)
# print("molecular dataset used: {}".format(OHdata))
def chemistry(design):
np.random.seed(42)
args = Arguments()
symbols = ["O", "H"]
coordinates = np.array([0.0, 0.0, 0.0, 0.45, -0.1525, -0.8454])
# Building the molecular hamiltonian
hamiltonian, qubits = qml.qchem.molecular_hamiltonian(symbols, coordinates, charge=1)
dev = qml.device("lightning.qubit", wires=args.n_qubits)
@qml.qnode(dev, diff_method="adjoint")
def cost_fn(theta):
quantum_net(theta, design)
return qml.expval(hamiltonian)
print(hamiltonian)
energy = []
for i in range(5):
q_params = 2 * pi * np.random.rand(design['layer_repe'] * args.n_qubits * 2)
opt = qml.GradientDescentOptimizer(stepsize=0.4)
for n in range(50):
q_params, prev_energy = opt.step_and_cost(cost_fn, q_params)
print(f"--- Step: {n}, Energy: {cost_fn(q_params):.8f}")
energy.append(cost_fn(q_params))
metrics = np.mean(energy)
report = {'energy': metrics}
print(metrics)
return report
if __name__ == '__main__':
# with open('data/chemistry_dataset', 'rb') as json_data:
# data = pickle.load(json_data)
net = [1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 4, 3, 4, 3, 1, 0, 1, 2, 3, 4, 5]
design = translator(net)
report = chemistry(design)
| katiexu/QC_Contest_test | chemistryOH.py | chemistryOH.py | py | 1,791 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pennylane.numpy.random.seed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pennylane.numpy.random",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pennylane.numpy",
"line_number": 18,
"usage_type": "name"
},
{
"api_name"... |
71261714748 | # -*- coding: utf-8 -*-
__all__ = ('tianshou_imitation_policy',)
from utils.vec_data import VecData
from torch import nn
import torch
import gym
import numpy as np
from tianshou.data import Batch, to_torch
class tianshou_imitation_policy(nn.Module):
def __init__(self, network, lr, weight_decay, mode='pi'):
assert mode in ['pi', 'q', 'v']
super().__init__()
self._grad_step = 0
self.observation_space = gym.spaces.Box(0, 1, shape=VecData.state_shape[1:], dtype=np.bool)
self.mode = mode
self.action_space = gym.spaces.Discrete(VecData.action_shape[1])
self.network = network
self.device = 'cpu'
weight_decay_list = (param for name, param in network.named_parameters() if name[-4:] != 'bias' and "bn" not in name)
no_decay_list = (param for name, param in network.named_parameters() if name[-4:] == 'bias' or "bn" in name)
assert no_decay_list
parameters = [{'params': weight_decay_list},
{'params': no_decay_list, 'weight_decay': 0.}]
self.optim = torch.optim.Adam(parameters, lr=lr, weight_decay=weight_decay)
for m in network.modules():
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
def to(self, device):
self.device = device
return super().to(device)
def load(self, path):
state_dict = torch.load(path, map_location=self.device)
self.load_state_dict(state_dict)
def forward(self, batch, state=None, mask=None):
if mask is None:
return Batch(act=batch.obs.gt_action, state=state)
logits = self.network(batch)
return logits + (logits.min() - logits.max() - 20) * mask
def post_process_fn(self, batch, buffer, indices):
if hasattr(buffer, "update_weight") and hasattr(batch, "weight"):
buffer.update_weight(indices, batch.weight)
def update(self, sample_size, buffer, val=False):
batch, indices = buffer.sample(sample_size)
if type(batch) is dict:
batch = Batch(obs=batch)
obs = to_torch(batch.obs.obs, device=self.device).float().view(-1, 145, 4, 9)
mask = (~to_torch(batch.obs.mask, device=self.device)).float()
gt_action = to_torch(batch.obs.gt_action, device=self.device).long()
rew = to_torch(batch.obs.rew, device=self.device).float()
losses = []
if self.mode == 'pi':
if val:
logits = self(obs, mask=mask)
loss = nn.CrossEntropyLoss()(logits, gt_action)
losses.append(loss.item())
else:
for i in range(1):
self.optim.zero_grad()
logits = self(obs, mask=mask)
loss = nn.CrossEntropyLoss()(logits, gt_action)
loss.backward()
self.optim.step()
losses.append(loss.item())
self.post_process_fn(batch, buffer, indices)
return {("val-loss" if val else "loss"): losses, "eq-ratio": (logits.detach().argmax(dim=-1) == gt_action).float().mean().item()}
elif self.mode == 'q':
norm_rew = (-0.2 * (rew + rew.mean() * 3)).exp()
norm_rew = 0.4 * norm_rew / (1 + norm_rew).pow(2)
if val:
with torch.no_grad():
logits = self(obs, mask=mask).squeeze(1)
logit = torch.gather(logits.log_softmax(dim=-1), 1, gt_action.unsqueeze(1)).squeeze(0)
loss = (-logit.exp() * norm_rew).mean()
losses.append(loss.item())
else:
for i in range(1):
self.optim.zero_grad()
logits = self(obs, mask=mask).squeeze(1)
logit = torch.gather(logits.log_softmax(dim=-1), 1, gt_action.unsqueeze(1)).squeeze(0)
loss = (-logit.exp() * norm_rew).mean()
loss.backward()
self.optim.step()
losses.append(loss.item())
self.post_process_fn(batch, buffer, indices)
return {("val-loss" if val else "loss"): losses, "eq-ratio": (logits.detach().argmax(dim=-1) == gt_action).float().mean().item()}
elif self.mode == 'v':
# rew = rew * 0.1
# rew = rew.sgn()
if val:
with torch.no_grad():
logits = self.network(obs)
category = torch.empty_like(rew).long()
category[:] = 4
category[rew < 50] = 3
category[rew < 32] = 2
category[rew < 0] = 1
category[rew < -8] = 0
correct_ratio = (logits.argmax(dim=-1) == category).float().mean()
win_ratio = (logits.argmax(dim=-1)[category > 2] == category[category > 2]).float().mean()
loss = nn.CrossEntropyLoss()(logits, category)
# loss = (logits.squeeze(1) - rew * 0.1).pow(2).mean()
losses.append(loss.item())
else:
for i in range(1):
if self._grad_step % 5 == 0:
self.optim.zero_grad()
logits = self.network(obs)
category = torch.empty_like(rew).long()
category[:] = 4
category[rew < 50] = 3
category[rew < 32] = 2
category[rew < 0] = 1
category[rew < -8] = 0
correct_ratio = (logits.argmax(dim=-1) == category).float().mean()
win_ratio = (logits.argmax(dim=-1)[category > 2] == category[category > 2]).float().mean()
loss = nn.CrossEntropyLoss()(logits, category)
# loss = (logits.squeeze(1) - rew * 0.1).pow(2).mean()
loss.backward()
losses.append(loss.item())
self.post_process_fn(batch, buffer, indices)
self._grad_step += 1
if self._grad_step % 5 == 0:
self.optim.step()
# return {("val-loss" if val else "loss"): losses}
return {("val-loss" if val else "loss"): losses, "cr": [correct_ratio.item()] * 10, "wr": [win_ratio.item()] * 10}
def map_action(self, action):
return action
| illusive-chase/ChineseStandardMahjong | learning/imitation.py | imitation.py | py | 6,685 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "gym.spaces.Box",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line_n... |
44092813645 | from gym_compete_rllib import create_env
from ray.tune.registry import ENV_CREATOR, _global_registry
def test_create_env():
env_creator = _global_registry.get(ENV_CREATOR, "multicomp")
env_config = {'with_video': False,
"SingleAgentToMultiAgent": False,
"env_name": "multicomp/YouShallNotPassHumans-v0"}
env = env_creator(env_config)
assert env.n_policies == 2
assert env.observation_space.shape == (380,)
assert env.action_space.shape == (17,)
assert env.player_names == ['player_1', 'player_2']
def episode(env):
obs = env.reset()
def check_obs(obs, error_on_empty=True):
assert isinstance(obs, dict)
if error_on_empty:
assert set(obs.keys()) == set(env.player_names), f"{obs.keys()} {env.player_names}"
assert all([o.shape == env.observation_space.shape for o in obs.values()])
check_obs(obs)
while True:
actions = {p: env.action_space.sample() for p in env.player_names}
obs, reward, done, info = env.step(actions)
check_obs(obs, error_on_empty=False)
if done['__all__']:
break
for _ in range(10):
episode(env)
if __name__ == '__main__':
test_create_env() | HumanCompatibleAI/better-adversarial-defenses | gym_compete_rllib/test_load_rllib_env.py | test_load_rllib_env.py | py | 1,332 | python | en | code | 11 | github-code | 6 | [
{
"api_name": "ray.tune.registry._global_registry.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "ray.tune.registry.ENV_CREATOR",
"line_number": 6,
"usage_type": "argument"
},
{
"api_name": "ray.tune.registry._global_registry",
"line_number": 6,
"usage_type... |
28585638952 | from sklearn.model_selection import train_test_split
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
import tensorflow_datasets as tfds
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
from bert import run_classifier_with_tfhub
#https://github.com/google-research/bert.git
import sys
from tensorflow import keras
import os
import re
from transformers import *
import numpy as np
from tensorflow.python.lib.io import file_io
import pickle
import gc
import threading
import logging
import argparse
"""
Usage
>> python -u runBert.py @args.txt
python -u runBert.py @params_model1.txt
------Example args.txt file -----
--tpuAddress node-3
--tpuZone us-central1-f
--outputDir test
--seqLen 15
--modelHub https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1
--batchSize 64
--epochs 40
--dropout .9
"""
####################################################
############ Setting output directory ##############
####################################################
def getDir(bucket, output_dir):
return 'gs://{}/{}'.format(bucket, output_dir)
def setUp_output_dir():
DO_DELETE = True
USE_BUCKET =True
if USE_BUCKET:
OUTPUT_DIR = getDir(BUCKET, OUTPUT_DIR)
if DO_DELETE:
try:
tf.gfile.DeleteRecursively(OUTPUT_DIR)
except:
# doesn't matter if the directory didn't exist
pass
tf.gfile.MakeDirs(OUTPUT_DIR)
print('***** Model output directory: {} *****'.format(OUTPUT_DIR))
#################################################
############# Load Data set #####################
#################################################
def loadPdData(gsPath):
return pd.read_csv(gsPath, sep = "\t")
def saveToGcloud(path,data,isPandas = False ):
'''Saves to gcloud so we dont have to do this long ass step every time'''
if isPandas:
data.to_csv(path, index=False, sep="\t")
else:
with file_io.FileIO(path, mode='w') as f:
pickle.dump(data,f)
def readFromGcloud(path, isPandas = False):
if isPandas:
return pd.read_csv(path,sep="\t" )
else:
with file_io.FileIO(path, mode='rb') as f:
return pickle.load(f)
def worker_downloadTestData(name):
"""
Worker so we can download test data asynch
"""
logging.info("Thread %s: starting for loading test data", name)
global test_features
train_features = readFromGcloud(TEST_TFRecord_PATH)
logging.info("Thread %s: finishing for loading test data", name)
#######################################################
############# Creating a model #######################
#######################################################
def create_model(is_training, input_ids, input_mask, segment_ids, labels,
num_labels, bert_hub_module_handle, dropout):
"""Creates a classification model."""
tags = set()
if is_training:
tags.add("train")
bert_module = hub.Module(bert_hub_module_handle, tags=tags, trainable=True)
bert_inputs = dict(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
bert_outputs = bert_module(
inputs=bert_inputs,
signature="tokens",
as_dict=True)
output_layer = bert_outputs["pooled_output"]
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=dropout)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(num_labels, learning_rate, num_train_steps,
num_warmup_steps, use_tpu, bert_hub_module_handle, dropout):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
is_training, input_ids, input_mask, segment_ids, label_ids, num_labels,
bert_hub_module_handle, dropout)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(label_ids, predictions)
loss = tf.metrics.mean(per_example_loss)
true_pos = tf.metrics.true_positives(
label_ids,
predictions)
true_neg = tf.metrics.true_negatives(
label_ids,
predictions)
false_pos = tf.metrics.false_positives(
label_ids,
predictions)
false_neg = tf.metrics.false_negatives(
label_ids,
predictions)
return {
"true_positives": true_pos,
"true_negatives": true_neg,
"false_positives": false_pos,
"false_negatives": false_neg,
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics)
elif mode == tf.estimator.ModeKeys.PREDICT:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions={"probabilities": probabilities})
else:
raise ValueError(
"Only TRAIN, EVAL and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
####################################################
###### FUnctions to train + evaluate model #########
####################################################
def get_run_config(output_dir):
"""
Used for run configuration when TPU used
"""
return tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=output_dir,
save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=ITERATIONS_PER_LOOP,
num_shards=NUM_TPU_CORES,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2))
def getEstimator(mode_fn):
"""
Returns the estimator used to train/eval model
"""
return tf.estimator.tpu.TPUEstimator(
use_tpu=True,
model_fn=mode_fn,
config=get_run_config(OUTPUT_DIR),
train_batch_size=BATCH_SIZE,
eval_batch_size=EVAL_BATCH_SIZE,
predict_batch_size=PREDICT_BATCH_SIZE,
eval_on_tpu = True
)
def model_train(estimator):
"""
Trains the model, rt only good for TPU
"""
#Set drop_remainder =True to fix a TPU error
#https://stackoverflow.com/questions/58029896/bert-fine-tuning-with-estimators-on-tpus-on-colab-typeerror-unsupported-operand
print('***** Started training at {} *****'.format(datetime.now()))
print(' Num examples = {}'.format(len(train_features)))
print(' Batch size = {}'.format(BATCH_SIZE))
tf.logging.info(" Num steps = %d", num_train_steps)
current_time = datetime.now()
train_input_fn = run_classifier.input_fn_builder(
features=train_features,
seq_length=MAX_SEQ_LENGTH,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
print("Finished: Training took time ", datetime.now() - current_time)
#train_features
def model_evaluate(estimator, data):
"""
Evaluates the model
"""
print('***** Started evaluation at {} *****'.format(datetime.now()))
print(' Num examples = {}'.format(len(data)))
print(' Batch size = {}'.format(EVAL_BATCH_SIZE))
# Eval will be slightly WRONG on the TPU because it will truncate
# the last batch.
eval_steps = int(len(data) / EVAL_BATCH_SIZE)
eval_input_fn = run_classifier.input_fn_builder(
features=data,
seq_length=MAX_SEQ_LENGTH,
is_training=False,
drop_remainder=True)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
print('***** Finished evaluation at {} *****'.format(datetime.now()))
output_eval_file = os.path.join(OUTPUT_DIR, "eval","eval_results.txt")
tf.gfile.MakeDirs(os.path.join(OUTPUT_DIR, "eval"))
with tf.gfile.GFile(output_eval_file, "w") as writer:
print("***** Eval results *****")
for key in sorted(result.keys()):
print(' {} = {}'.format(key, str(result[key])))
writer.write("%s = %s\n" % (key, str(result[key])))
####################################################
################# Utility Functions ##############
####################################################
def saveModelParams(params, _dir):
"""
Save model params to gCloud
"""
model_params_file = os.path.join(_dir,"modelParams","model_parameters.txt")
tf.gfile.MakeDirs(os.path.join(_dir, "modelParams"))
with tf.gfile.GFile(model_params_file, "w") as writer:
print("***** Model Parameters *****")
for key in sorted(params.keys()):
print(' {} = {}'.format(key, str(params[key])))
writer.write("%s = %s\n" % (key, str(params[key])))
print("Model paramters at: {}".format(os.path.join(_dir, "modelParams")))
def convert_arg_line_to_args(arg_line):
"""
From: https://stackoverflow.com/questions/25084993/why-isnt-fromfile-prefix-chars-in-python-argparse-working
"""
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
if __name__ == "__main__":
my_parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
prog="runBert",
description='Run bert on patent data!!')
my_parser.convert_arg_line_to_args = convert_arg_line_to_args
my_parser.add_argument(
'-tpuAddress',
action='store',
type=str,
required=True,
help="The address of TPU node"
)
my_parser.add_argument(
'-tpuZone',
action='store',
type=str,
required=False,
nargs='?',
default="us-central1-f",
help="The zone that the TPU is in: default us-central1-f"
)
my_parser.add_argument(
'-outputDir',
action='store',
type=str,
required=True,
help="The output dir of results: will be stored in gs bucket `patents-research` under folder bertResults{outputDir}"
)
my_parser.add_argument(
'-seqLen',
action='store',
type=int,
required=True,
help="The sequence length for the language model"
)
my_parser.add_argument(
'-modelHub',
action='store',
type=str,
required=False,
nargs='?',
default="https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1",
help="The Bert model Hub"
)
my_parser.add_argument(
'-batchSize',
action='store',
type=int,
required=False,
default=64,
nargs='?',
help="The training batch size"
)
my_parser.add_argument(
'-epochs',
action='store',
type=float,
required=False,
default=40.0,
nargs='?',
help="The number of epochs"
)
my_parser.add_argument(
'-dropout',
action='store',
type=float,
required=False,
default=0.7,
nargs='?',
help="Percent of data to keep"
)
args = my_parser.parse_args()
##### SET TPU CONSTANTS AND CONNECT TO IT #######
TPU_ADDRESS = args.tpuAddress
TPU_ZONE = args.tpuZone
USE_TPU =True
ITERATIONS_PER_LOOP = 1000
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=TPU_ADDRESS, zone=TPU_ZONE)
tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
tf.distribute.experimental.TPUStrategy(tpu_cluster_resolver)
#NUM_TPU_CORES = len(tf.config.experimental.list_logical_devices('TPU'))
NUM_TPU_CORES = 8
if NUM_TPU_CORES==0:
sys.exit("Problem with tpu make sure region is correct or tpu is runnign")
###################################
####### CONSTANTS ##################
####################################
DATA_PATH = "gs://patents-research/patent_research/data_frwdcorrect.tsv"
OUTPUT_DIR = "bertResults_{}".format(args.outputDir)# where the model will be saved
BUCKET = "patents-research"
DATA_COLUMN = 'text'
LABEL_COLUMN = 'label'
label_list = [0, 1, 2]
MAX_SEQ_LENGTH = args.seqLen
TRAIN_TFRecord_PATH= "gs://patents-research/patent_research/{}_{}.pickle".format("train_features",MAX_SEQ_LENGTH)
TEST_TFRecord_PATH= "gs://patents-research/patent_research/{}_{}.pickle".format("test_features",MAX_SEQ_LENGTH)
BERT_MODEL_HUB = args.modelHub
#Set output directory
setUp_output_dir()
# Force TF Hub writes to the GS bucket we provide.
os.environ['TFHUB_CACHE_DIR'] = os.path.join(OUTPUT_DIR,"tfhub_cache")
tf.gfile.MakeDirs(os.path.join(OUTPUT_DIR,"tfhub_cache"))
# Model Parameters
# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)
BATCH_SIZE = args.batchSize
EVAL_BATCH_SIZE = NUM_TPU_CORES
PREDICT_BATCH_SIZE = NUM_TPU_CORES
LEARNING_RATE = 2e-5
NUM_TRAIN_EPOCHS = args.epochs
DROPOUT_KEEP_PROB = args.dropout
# Warmup is a period of time where hte learning rate
# is small and gradually increases--usually helps training.
WARMUP_PROPORTION = 0.1
# Model configs
SAVE_CHECKPOINTS_STEPS = 1000
SAVE_SUMMARY_STEPS = 100
# Compute # train and warmup steps from batch size
num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)
num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)
params={
"TPU_ADDRESS":TPU_ADDRESS,
"TPU_ZONE":TPU_ZONE,
"TPU_ITERATIONS_PER_LOOP":ITERATIONS_PER_LOOP,
"NUM_TPU_CORES":NUM_TPU_CORES,
"TFHUB_CACHE_DIR":os.path.join(OUTPUT_DIR,"tfhub_cache"),
"DATA_PATH":DATA_PATH,
"OUTPUT_DIR":OUTPUT_DIR,
"MAX_SEQ_LENGTH":MAX_SEQ_LENGTH,
"TRAIN_TFRecord_PATH":TRAIN_TFRecord_PATH,
"TEST_TFRecord_PATH":TEST_TFRecord_PATH,
"BERT_MODEL_HUB":BERT_MODEL_HUB,
"BATCH_SIZE":BATCH_SIZE,
"EVAL_BATCH_SIZE":EVAL_BATCH_SIZE,
"PREDICT_BATCH_SIZE":PREDICT_BATCH_SIZE,
"LEARNING_RATE":LEARNING_RATE,
"NUM_TRAIN_EPOCHS":NUM_TRAIN_EPOCHS,
"DROPOUT_KEEP_PROB":DROPOUT_KEEP_PROB,
"WARMUP_PROPORTION":WARMUP_PROPORTION,
"SAVE_CHECKPOINTS_STEPS":SAVE_CHECKPOINTS_STEPS,
"SAVE_SUMMARY_STEPS":SAVE_SUMMARY_STEPS,
"num_train_steps":num_train_steps,
"num_warmup_steps":num_warmup_steps
}
saveModelParams(params,OUTPUT_DIR)
#####################################################
########### RUNNING SET UP FUNCTIONS ################
#####################################################
# Download train data
print("Reading {} from gCloud".format(TRAIN_TFRecord_PATH))
#train_features = readFromGcloud(TRAIN_TFRecord_PATH)
print("Finished {} from gCloud!".format(TRAIN_TFRecord_PATH))
# Download test data async - test data will be saved at test_features
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,datefmt="%H:%M:%S")
getTestData_thread = threading.Thread(target=worker_downloadTestData, args=(1,))
#getTestData_thread.start() #async download of test data
#####################################################
########## Train + Eval Model #######################
#####################################################
mode_fn = model_fn_builder(
num_labels=len(label_list),
learning_rate=LEARNING_RATE,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
dropout = DROPOUT_KEEP_PROB,
use_tpu = USE_TPU,
bert_hub_module_handle = BERT_MODEL_HUB
)
#estimator = getEstimator(mode_fn)
#model_train(estimator)
#gc.collect()
#del train_features # Remove train_features might cause mem limit
#gc.collect() # Remove train_features might cause mem limit
#getTestData_thread.join()
#gc.collect()
#model_evaluate(estimator, train_features)
| jdanene/patent-language-modeling | src/analysis/code/runBert.py | runBert.py | py | 18,927 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tensorflow.gfile.DeleteRecursively",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "tensorflow.gfile",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.gfile.MakeDirs",
"line_number": 63,
"usage_type": "call"
},
{
... |
9506994070 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import lxml
import lxml.html.clean
import requests
import wikipedia as wp
from transliterate import translit
def get_html_from_text(raw_html):
# clean_args = {
# "javascript": True, # strip javascript
# "page_structure": False, # leave page structure alone
# "style": True # remove CSS styling
# }
# clean_html = lxml.html.clean.Cleaner(**clean_args).clean_html(raw_html)
html = lxml.html.fromstring(raw_html)
return html
def get_element_by_selector(b, selector):
select_result = list(b.cssselect(selector))
if len(select_result) == 0:
return None
return select_result[0]
def get_info_from_block(b):
d = dict()
p = get_element_by_selector(b, 'p[itemprop="description"]')
d['short'] = '\n'.join(t for t in p.itertext()) if p is not None else None
# short = '\n'.join(t for t in p.itertext())
label_select = get_element_by_selector(b, 'h3 > a')
d['label'] = label_select.text if label_select is not None else None
lat_select = get_element_by_selector(b, 'meta[itemprop="latitude"]')
d['lat'] = lat_select.attrib['content'] if lat_select is not None else None
long_select = get_element_by_selector(b, 'meta[itemprop="longitude"]')
d['long'] = long_select.attrib['content'] if long_select is not None else None
return d
# print(d)
def get_infos():
response = requests.get('https://autotravel.ru/excite.php/1055/1')
raw_html = response.text
# BLOCK_SELECTOR = 'div[itemtype="http://schema.org/Place"]'
BLOCK_SELECTOR = 'div[class="col-md-12 col-xs-12"] > div'
html = get_html_from_text(raw_html)
blocks = html.cssselect(BLOCK_SELECTOR)
infos = list(filter(lambda d: d['long'] is not None and d['lat'] is not None, map(get_info_from_block, blocks)))
return infos
def check_label(d, key):
return type(d[key]) == str and d[key].find('\n') == -1
def search(query, lang):
wp.set_lang(lang)
wp_search = wp.search(query)
if len(wp_search) == 0:
return None
return wp_search[0]
def get_page(d):
label = d['label']
ru_label = d['label_ru']
exception = wp.exceptions.WikipediaException
def try_different(suffix=''):
try:
p = wp.page(ru_label + suffix)
except exception:
try:
p = wp.page(label + suffix)
except exception:
p = None
return p
p = try_different()
if p is None:
p = try_different(' (Санкт-Петербург)')
return p
class ExtractorError(RuntimeError):
def __init__(self, message):
super(ExtractorError, self).__init__(message)
self.message = message
OUTPUT_DIRECTORY = "sights/"
if __name__ == '__main__':
infos = get_infos()
bad_records = []
for i, d in enumerate(infos):
label = d['label']
try:
# en_search = search(label, 'en')
# if en_search is None:
en_search = translit(label, reversed=True)
d['label_en'] = en_search
assert check_label(d, 'label_en')
ru_search = search(label, 'ru')
if ru_search is None:
raise ExtractorError('ru_search')
d['label_ru'] = ru_search
assert check_label(d, 'label_ru')
p = get_page(d)
if p is None:
raise ExtractorError('get_page')
if d['short'] is None:
print(i, label, "does not have short description from site\n".format(d['label']))
d['short'] = p.summary
d['long_description'] = p.summary
try:
d['lat'] = float(p.coordinates[0])
d['long'] = float(p.coordinates[1])
except KeyError:
pass
d['url'] = p.url
d['name'] = ''.join(filter(lambda c: c.isalnum() or c == '_', d['label_en'].replace(' ', '_')))
if d['name'].startswith('List'):
print(d)
f = open(OUTPUT_DIRECTORY + d['name'] + '.sight', 'w')
f.write('\n'.join([
d['label'],
d['label_ru'],
d['label_en'],
str(d['lat']) + ' ' + str(d['long']),
d['url'],
'===',
d['short'],
'===',
d['long_description'],
]))
f.close()
except ExtractorError as e:
print(i, label, e.message)
bad_records.append(d)
f = open('bad_records.txt', 'w')
f.write('\n'.join([str(record) for record in bad_records]))
f.close()
| OSLL/adfmp18-PiterSights | crawler/site_extractor.py | site_extractor.py | py | 4,706 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "lxml.html.fromstring",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "wikipedia.set_lang",... |
24126442093 | import gc
import sys
import wx
from weakref import ref
from testutil import check_collected
foo = 0
success = 0
def test_callafter_leak():
def func():
global foo
foo = 42
wr = ref(func)
wx.CallAfter(func)
del func
# make sure that func runs
wx.GetApp().Yield()
assert wr() is None, gc.get_referrers(gc.get_referrers(wr())[0])
assert foo == 42
global success
success = success + 1
def main():
a = wx.PySimpleApp()
N = 100
for x in xrange(N):
wx.CallAfter(test_callafter_leak)
wx.CallAfter(gc.collect)
for x in xrange(N):
wx.CallAfter(test_callafter_leak)
wx.CallAfter(a.ExitMainLoop)
a.MainLoop()
global success
assert success == N*2
if __name__ == '__main__':
main()
| ifwe/wxpy | src/tests/test_callafter.py | test_callafter.py | py | 804 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "weakref.ref",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "wx.CallAfter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "wx.GetApp",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "gc.get_referrers",
"line_numbe... |
44426526976 | import json
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, connect_nodes_bi, connect_nodes, sync_blocks, disconnect_nodes_bi
from test_framework.key import CECKey
from test_framework.blocktools import create_block, create_coinbase
from test_framework.script import hash160, CScript, OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG, SignatureHashForkId, SIGHASH_ALL , SIGHASH_FORKID
from test_framework.mininode import CTransaction, CTxOut, CTxIn, COutPoint, ToHex
from test_framework.authproxy import JSONRPCException
class User:
def __init__(self, secret_bytes):
self.key = CECKey()
self.key.set_secretbytes(secret_bytes)
self.pubkey = self.key.get_pubkey()
def spend_to_pkh (self, node, spend_tx, n, amount, to_pubkey):
value = int(amount)
scriptPubKey = CScript([OP_DUP, OP_HASH160, hash160(to_pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
tx = CTransaction()
assert (n < len(spend_tx.vout))
tx.vin.append(CTxIn(COutPoint(spend_tx.sha256, n), b"", 0xffffffff))
tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
self.__sign_tx(tx, spend_tx, n)
tx.rehash()
node.sendrawtransaction(ToHex(tx), False, True)
if False: # if we want to get the tx as json formatted output for debugging
tx_json = node.decoderawtransaction(ToHex(tx))
for output in tx_json['vout']:
output['value'] = float(output['value'])
text = json.dumps(tx_json, indent=4)
print("ds transaction:", text)
return tx
def __sign_tx(self, sign_tx, spend_tx, n):
sighash = SignatureHashForkId( spend_tx.vout[n].scriptPubKey, sign_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue )
sign_tx.vin[0].scriptSig = CScript([self.key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])), self.pubkey ])
class CompetingChainsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.nodeargs = ["-txindex=1", "-disablesafemode=0", "-debug=1"]
self.extra_args = [self.nodeargs, self.nodeargs]
self.nbDoubleSpends = 3
self.lenChain0 = 8 # more than SAFE_MODE_MAX_VALID_FORK_LENGTH 7
self.lenChain1 = 18 # less than SAFE_MODE_MAX_VALID_FORK_DISTANCE (72)
self.FORK_ROOT_HEIGHT = 200
def setup_network(self):
self.setup_nodes()
def make_coinbase(self, conn_rpc):
tip = conn_rpc.getblock(conn_rpc.getbestblockhash())
coinbase_tx = create_coinbase(tip["height"] + 1)
block = create_block(int(tip["hash"], 16), coinbase_tx, tip["time"] + 1)
block.solve()
conn_rpc.submitblock(ToHex(block))
return coinbase_tx
def send_funds_to_attacker (self, node, attacker, coinbase_tx):
funding_amount = int(coinbase_tx.vout[0].nValue / self.nbDoubleSpends)
funding_tx = CTransaction()
funding_tx.vin.append(CTxIn(COutPoint(coinbase_tx.sha256, 0), b"", 0xffffffff))
scriptPubKey = CScript([OP_DUP, OP_HASH160, hash160(attacker.pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
for i in range(self.nbDoubleSpends):
funding_tx.vout.append(CTxOut(funding_amount, scriptPubKey))
funding_tx.rehash()
funding_txid = node.sendrawtransaction(ToHex(funding_tx), False, True)
assert_equal(node.getrawmempool(), [funding_txid])
return funding_tx
def contains_double_spends (self):
spent_inputs = set([])
seen_transactions = []
ds_counter = 0
for node in self.nodes:
for height in range(node.getblockcount() + 1):
blockhash = node.getblockhash(height)
block = node.getblock(blockhash, 2)
for txraw in block['tx']:
if txraw['txid'] in seen_transactions:
continue
else:
seen_transactions.append(txraw['txid'])
for i in txraw['vin']:
if 'coinbase' in i:
continue
new_element = (i['txid'], i['vout'])
if new_element in spent_inputs:
ds_counter += 1
else:
spent_inputs.add(new_element)
return ds_counter
def run_test(self):
# Test 1:
# 1. fund an attacker for the test on node0
# 2. progress to block height 200
# 3. sync all nodes
# 4. disconnect the two nodes forking at block height 200
# 5. spend attackers fund in node0 and double spend them in node1
# 6. Assert that the two chains actually contain the attackers double-spends
attacker = User(b"horsebattery")
friend0_of_attacker = User(b"fatstack")
friend1_of_attacker = User(b"fatheap")
node0 = self.nodes[0] # victim node
node1 = self.nodes[1] # node under control of attacker
self.log.info("fund attacker. We fund him at height 200 -2")
self.log.info("just for debugging convenience. We plan to fork at height 200")
coinbase_tx = self.make_coinbase(node0)
node0.generate(self.FORK_ROOT_HEIGHT - 2)
assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT - 1)
self.log.info("fund attacker")
funding_tx = self.send_funds_to_attacker (node0, attacker, coinbase_tx)
node0.generate(1)
assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT + 0)
self.log.info("sync nodes. All nodes have the same chain and funding transactions after syncing")
connect_nodes_bi(self.nodes, 0, 1)
sync_blocks(self.nodes)
disconnect_nodes_bi(self.nodes, 0, 1)
# fork from here
assert (node0.getblockcount() == node1.getblockcount())
self.log.info("spends attackers funds in node0")
for i in range(self.nbDoubleSpends):
attacker.spend_to_pkh(node0, funding_tx, i, funding_tx.vout[i].nValue, friend0_of_attacker.pubkey)
node0.generate(1)
assert (node0.getblockcount() == self.FORK_ROOT_HEIGHT + 1)
self.log.info("double spend attacker funds in node1")
for i in range(self.nbDoubleSpends):
attacker.spend_to_pkh(node1, funding_tx, i, funding_tx.vout[i].nValue, friend1_of_attacker.pubkey)
node1.generate(1)
first_bad_block = node1.getbestblockhash()
assert (node1.getblockcount() == self.FORK_ROOT_HEIGHT + 1)
self.log.info("check that funds have been double spent to different addresses")
assert(self.contains_double_spends () == self.nbDoubleSpends)
# Test 2.
# 1. Progress the two competing chains in node0 and node1 to different lengths (configurable).
# node1 shall hold the longer chain and is the one controlled by the attacker.
# The two nodes are not connected to each other directly or indirectly and at this point
# contain the doulbe-spends we have prapared.
# 2. connect the nodes and sync them to force a reorg
# 3. Assert that all double-spends disappeared - which nontheless means the attack succeeded.
assert(self.lenChain0 <= self.lenChain1)
self.log.info("Mine lenChain0 blocks on node0")
node0.generate(self.lenChain0 - 1)
assert(node0.getblockcount() == self.FORK_ROOT_HEIGHT + self.lenChain0)
self.log.info("Mine competing lenChain1 blocks on node1")
node1.generate(self.lenChain1 - 1)
assert(node1.getblockcount() == self.FORK_ROOT_HEIGHT + self.lenChain1)
self.log.info("Connect nodes to force a reorg")
connect_nodes(self.nodes, 1, 0)
sync_blocks(self.nodes[0:2])
if self.lenChain1 > self.lenChain0:
assert(node0.getblockcount() == self.FORK_ROOT_HEIGHT + self.lenChain1)
else:
assert(node1.getblockcount() == self.FORK_ROOT_HEIGHT + self.lenChain0)
self.log.info("check that both nodes have the same chains")
lastblock0 = node0.getbestblockhash()
lastblock1 = node1.getbestblockhash()
assert(lastblock0 == lastblock1)
self.log.info("check that double-spends have been removed")
assert (self.contains_double_spends () == 0)
# Test 3: Assert that safemode has been reached
try:
node0.rpc.getbalance()
assert False, "Should not come to here, should raise exception in line above."
except JSONRPCException as e:
assert e.error["message"] == "Safe mode: Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues. A large valid fork has been detected."
# Test 4: Assert that safemode is exited if the offending chain is invalidated
node0.invalidateblock(first_bad_block)
node0.ignoresafemodeforblock(first_bad_block)
balance = node0.rpc.getbalance()
assert (balance != None)
if __name__ == '__main__':
CompetingChainsTest().main()
| bitcoin-sv/bitcoin-sv | test/functional/bsv-block-ds-attack.py | bsv-block-ds-attack.py | py | 9,229 | python | en | code | 597 | github-code | 6 | [
{
"api_name": "test_framework.key.CECKey",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "test_framework.script.CScript",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "test_framework.script.OP_DUP",
"line_number": 20,
"usage_type": "name"
},
{
... |
30823175530 | from django.urls import path
from payment import views
app_name = 'payment'
urlpatterns = [
path('canceled/$', views.payment_canceled, name='canceled'),
path('done/$', views.payment_done, name='done'),
path('(?P<id>\d+)/process', views.payment_process, name='process')
]
| studiosemicolon/onlineshop | payment/urls.py | urls.py | py | 287 | python | en | code | 23 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "payment.views.payment_canceled",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "payment.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "djang... |
580269488 | import json, requests, pytest
from pydantic import BaseModel, field_validator
from unittest.mock import Mock, MagicMock
class Location:
def __init__(self, longitudecls, latitudecls):
self._longitude = longitudecls
self._latitude = latitudecls
def get_weather(self):
weather_data = requests.get(
f'https://fcc-weather-api.glitch.me/api/current?lat={self._latitude}&lon={self._longitude}')
data = json.loads(weather_data.text)
try:
if weather_data.status_code != 200:
raise Exception(f'Error request, status code : {weather_data.status_code}')
try:
feels_like_temp = data.get['main']['feels_like']
except:
feels_like_temp = 0
dict_for_weather = {
'temperature': {
'temp': data['main']['temp'],
'feels_like': feels_like_temp,
'temp_min': data['main']['temp_min'],
'temp_max': data['main']['temp_max']
},
'pressure': data['main']['pressure'],
'description': data['weather'][0]['description'],
'name': data['name']
}
except Exception as e:
return None
return WeatherPydantic(**dict_for_weather)
class TemperaturePydantic(BaseModel):
temp: float
feels_like: float
temp_min: float
temp_max: float
@field_validator('temp')
def validate_temp(cls, temp: int):
return round(temp * 1.8 + 32, 3)
class WeatherPydantic(BaseModel):
temperature: TemperaturePydantic
pressure: float
description: str
name: str
# longitude = int(input('Please enter longitude: '))
# latitude = int(input('Please inter latitude: '))
loc = Location(50, 38)
weather = loc.get_weather()
print(weather)
def test_get_weather(mocker):
mocker.patch.object(
requests,
'get',
return_value=Mock(
status_code=200,
text=json.dumps(
{
"coord": {
"lon": 50,
"lat": 28
}, "weather": [
{
"id": 800,
"main": "Clear",
"description": "clear sky",
}
],
"base": "stations",
"main": {
"temp": 33.27,
"feels_like": 0.0,
"temp_min": 33.27,
"temp_max": 33.27,
"pressure": 1001,
"humidity": 72,
"sea_level": 1001,
"grnd_level": 1001
},
"visibility": 10000,
"wind": {
"speed": 3.25,
"deg": 258,
"gust": 3.6
},
"clouds": {
"all": 1
},
"dt": 1691623796,
"sys": {
"country": "SA",
"sunrise": 1691633230,
"sunset": 1691681029},
"timezone": 12600,
"id": 109435,
"name": "Jubail",
"cod": 200
}
)
)
)
test_loc = Location(50, 28)
actual = test_loc.get_weather()
expected = WeatherPydantic(
temperature=TemperaturePydantic.model_construct(
temp=round(33.27*1.8 + 32, 3),
feels_like=0.00,
temp_min=33.27,
temp_max=33.27
),
pressure=1001,
description='clear sky',
name='Jubail',
)
assert actual == expected | MrDumper/Roma | weather_HW.py | weather_HW.py | py | 4,128 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "pydantic.field_validator",... |
18042025196 | # __init__.py
__version__ = "1.2.2" # Be sure to update version in setup.py as well
from difflib import SequenceMatcher
from sh3ll.command import command
from art import tprint
class IS(object):
def __init__(self, name="", font="", prefix="CLA>"):
self.name = name
self.font = font
self.prefix = prefix
self.commands = []
self.categories = []
"""
Param: -variable value
"""
def run(self):
if self.font != "":
tprint(self.name, font=self.font)
else:
tprint(self.name)
while True:
try:
line = input(self.prefix).lower()
except KeyboardInterrupt:
exit()
inputted_command = line.split()[0]
# Parse command into command and arguments
args = []
tmp = line.split()[1:]
count = 0
for arg in range(len(tmp)):
if count == 0:
if tmp[arg][0] == "'":
for arg2 in range(len(tmp[arg + 1:])):
if tmp[arg + 1 + arg2][-1] == "'":
args.append(''.join([s.strip("'") for s in tmp[arg:arg + arg2 + 2]]))
count = len(tmp[arg:arg + arg2 + 1])
else:
if tmp[arg][-1] != "'":
args.append(tmp[arg])
else:
count -= 1
cmds = [cmd.name for cmd in self.commands]
categories = [cmd.category for cmd in self.commands]
aliases = {}
for command in self.commands:
aliases[command.name] = command.aliases
if inputted_command != "help" and inputted_command != "exit" and inputted_command != "q":
if inputted_command in cmds and self.commands[cmds.index(inputted_command)].category == "":
self.commands[cmds.index(inputted_command)].execute(args)
elif inputted_command in categories:
if line.split()[1] in cmds:
if self.commands[cmds.index(line.split()[1])].category == line.split()[0]:
self.commands[cmds.index(line.split()[1])].execute(args[1:])
else:
for command in self.commands:
if line.split()[1] in command.aliases:
command.execute(args[1:])
else:
highestSimilarity = 0
mostSimilarCommand = ""
mostSimilarCommandCategory = ""
for command in self.commands:
similarity = SequenceMatcher(None, command.name, inputted_command).ratio()
if similarity > highestSimilarity:
highestSimilarity = SequenceMatcher(None, command.name, inputted_command).ratio()
mostSimilarCommand = command.name
mostSimilarCommandCategory = command.category
print(f"Command not recognized.\nDid you mean: '{mostSimilarCommandCategory} {mostSimilarCommand}'?")
else:
self.help() if inputted_command == "help" else exit()
def help(self):
print("help\tDisplays this menu")
print("exit OR q\tExits the program")
for command in self.commands:
if command.category == "":
print(f"{command.name}\t{command.help}")
print()
for category in self.categories:
if category != "":
print(f"\"{category}\" Commands:\n" + ("-" * (len(category) + 12)))
cmds = []
for command in self.commands:
if command.category == category:
cmds.append(command)
longest_name = max([len(cmd.name) for cmd in cmds])
longest_aliases = max([len(str(cmd.aliases)) for cmd in cmds])
longest_help = max([len(cmd.help) for cmd in cmds])
print("\tCommand" + (" " * (abs((len(category) + 1 + longest_name) - 7) + 4)) + "Aliases" + (
" " * (abs(longest_aliases - 7) + 4)) + "Help" + " " * (abs(longest_help - 4) + 4))
print("\t" + ("-" * 7) + (" " * (abs((len(category) + 1 + longest_name) - 7) + 4)) + ("-" * 8) + (
" " * (abs(longest_aliases - 8) + 4)) + ("-" * 4))
for command in cmds:
if abs(longest_name - len(command.name)) == 0:
print(f"\t{category} {command.name}" + (" " * (abs((len(category) + 1 + longest_name) - (
len(category) + len(command.name) + 1)) + 4)), end="")
else:
print(f"\t{category} {command.name}" + (" " * (
abs((len(category) + 1 + longest_name) - len(f"{category} {command.name}")) + 4)),
end="")
if abs(longest_aliases - len(str(command.aliases))) == 0:
print(f"{command.aliases} ", end="")
else:
print(f"{command.aliases}" + (" " * (abs(longest_aliases - len(str(command.aliases))) + 4)),
end="")
print(f"{command.help}" + (" " * abs(longest_help - len(command.help))))
print()
def command(self, name="Unknown command", aliases=[], help="No help given", category="", progress=()):
def wrap(function):
if category not in self.categories:
self.categories.append(category) # Auto register cats
self.commands.append(command(function, name=name, aliases=aliases, help=help, category=category, progress=progress))
def wrapped_function(*args):
return function(*args)
return wrapped_function
return wrap | HullaBrian/sh3ll | sh3ll/__init__.py | __init__.py | py | 6,104 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "art.tprint",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "art.tprint",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sh3ll.command.command",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "sh3ll.command.command.nam... |
855722734 | #!/usr/bin/env python
# Demonstrate how to use the vtkBoxWidget to control volume rendering
# within the interior of the widget.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Load a volume, use the widget to control what's volume
# rendered. Basically the idea is that the vtkBoxWidget provides a box
# which clips the volume rendering.
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64, 64)
v16.GetOutput().SetOrigin(0.0, 0.0, 0.0)
v16.SetDataByteOrderToLittleEndian()
v16.SetFilePrefix(VTK_DATA_ROOT+ "/Data/headsq/quarter")
v16.SetImageRange(1, 93)
v16.SetDataSpacing(3.2, 3.2, 1.5)
tfun = vtk.vtkPiecewiseFunction()
tfun.AddPoint(70.0, 0.0)
tfun.AddPoint(599.0, 0)
tfun.AddPoint(600.0, 0)
tfun.AddPoint(1195.0, 0)
tfun.AddPoint(1200, .2)
tfun.AddPoint(1300, .3)
tfun.AddPoint(2000, .3)
tfun.AddPoint(4095.0, 1.0)
ctfun = vtk.vtkColorTransferFunction()
ctfun.AddRGBPoint(0.0, 0.5, 0.0, 0.0)
ctfun.AddRGBPoint(600.0, 1.0, 0.5, 0.5)
ctfun.AddRGBPoint(1280.0, 0.9, 0.2, 0.3)
ctfun.AddRGBPoint(1960.0, 0.81, 0.27, 0.1)
ctfun.AddRGBPoint(4095.0, 0.5, 0.5, 0.5)
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
volumeMapper = vtk.vtkVolumeRayCastMapper()
volumeMapper.SetInputConnection(v16.GetOutputPort())
volumeMapper.SetVolumeRayCastFunction(compositeFunction)
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(ctfun)
volumeProperty.SetScalarOpacity(tfun)
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
newvol = vtk.vtkVolume()
newvol.SetMapper(volumeMapper)
newvol.SetProperty(volumeProperty)
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(v16.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# Create the RenderWindow, Renderer and both Actors
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# The SetInteractor method is how 3D widgets are associated with the
# render window interactor. Internally, SetInteractor sets up a bunch
# of callbacks using the Command/Observer mechanism (AddObserver()).
boxWidget = vtk.vtkBoxWidget()
boxWidget.SetInteractor(iren)
boxWidget.SetPlaceFactor(1.0)
# Add the actors to the renderer, set the background and size
ren.AddActor(outlineActor)
ren.AddVolume(newvol)
ren.SetBackground(0, 0, 0)
renWin.SetSize(300, 300)
# When interaction starts, the requested frame rate is increased.
def StartInteraction(obj, event):
global renWin
renWin.SetDesiredUpdateRate(10)
# When interaction ends, the requested frame rate is decreased to
# normal levels. This causes a full resolution render to occur.
def EndInteraction(obj, event):
global renWin
renWin.SetDesiredUpdateRate(0.001)
# The implicit function vtkPlanes is used in conjunction with the
# volume ray cast mapper to limit which portion of the volume is
# volume rendered.
planes = vtk.vtkPlanes()
def ClipVolumeRender(obj, event):
global planes, volumeMapper
obj.GetPlanes(planes)
volumeMapper.SetClippingPlanes(planes)
# Place the interactor initially. The output of the reader is used to
# place the box widget.
boxWidget.SetInput(v16.GetOutput())
boxWidget.PlaceWidget()
boxWidget.InsideOutOn()
boxWidget.AddObserver("StartInteractionEvent", StartInteraction)
boxWidget.AddObserver("InteractionEvent", ClipVolumeRender)
boxWidget.AddObserver("EndInteractionEvent", EndInteraction)
outlineProperty = boxWidget.GetOutlineProperty()
outlineProperty.SetRepresentationToWireframe()
outlineProperty.SetAmbient(1.0)
outlineProperty.SetAmbientColor(1, 1, 1)
outlineProperty.SetLineWidth(3)
selectedOutlineProperty = boxWidget.GetSelectedOutlineProperty()
selectedOutlineProperty.SetRepresentationToWireframe()
selectedOutlineProperty.SetAmbient(1.0)
selectedOutlineProperty.SetAmbientColor(1, 0, 0)
selectedOutlineProperty.SetLineWidth(3)
iren.Initialize()
renWin.Render()
iren.Start()
| VisTrails/VisTrails | examples/vtk_examples/GUI/VolumeRenderWithBoxWidget.py | VolumeRenderWithBoxWidget.py | py | 4,067 | python | en | code | 100 | github-code | 6 | [
{
"api_name": "vtk.util.misc.vtkGetDataRoot",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "vtk.vtkVolume16Reader",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "vtk.vtkPiecewiseFunction",
"line_number": 21,
"usage_type": "call"
},
{
"api_na... |
33522558184 | from django.contrib.auth import get_user_model
from django.utils import timezone
from django.core.mail import send_mail
User = get_user_model()
def wish_birthday():
today = timezone.now().date()
user_list = User.objects.filter(birthday__day=today.day, birthday__month=today.month)
for item in user_list:
subject = 'Birthday Wish!'
body = 'Hi {},\n Happy Birthday!!!'.format(item.username)
send_mail(subject, body, 'contact@yourdomain.com', [item.email]) | napitsakun/backend_task | user/cron.py | cron.py | py | 497 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 9,
"usage_type": "name"
},
{
"a... |
20825309063 | from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
def get_feature_matrix(N = 55):
#initialize the feature vector with zeros.
x_vec = np.zeros((N,3))
x = []
for i in range (N):
im = Image.open("images/image_{number}.jpg".format(number=i+1))
width, height = im.size
rgb_im = im.convert('RGB')
red = []
green = []
blue = []
for y in range (height):
for x in range (width):
pixel = rgb_im.getpixel((x,y))
red.append(pixel[0])
green.append(pixel[1])
blue.append(pixel[2])
x_vec[i] = np.mean(red), np.mean(green), np.mean(blue)
return x_vec;
def get_labels(N=55):
y = np.zeros((N,1));
y = np.zeros((N,1))
#raise NotImplementedError()
for i in range (0,20):
y[i] = 1
for i in range (21, N):
y[i] = 0
return y
def sigmoid_func(z):
sigmoid = 1/(1+np.exp(-z))
return sigmoid
def gradient(X,y,w):
grad = []
grad = np.transpose(np.dot(np.transpose(X), -(y-sigmoid_func(np.dot(X, np.transpose(w))))))/len(X)
#raise NotImplementedError()
return grad
def logisticRegression_func(X,y,step_size, K):
N = X.shape[0]
d = X.shape[1]
# Initialize w as 1xd array.
w = np.zeros((1,d))
loss = float('inf')
loss_list = []
for i in range(K):
# loss_list.append(gradient(X,y,w))
cost = (1/N*np.dot(np.transpose(-y),np.log(sigmoid_func(np.dot(X,np.transpose(w)))))-np.dot(np.transpose((1-y)),np.log(1-sigmoid_func(np.dot(X,np.transpose(w))))))
w = np.subtract(w, step_size*loss_list[i])
loss_list.append(cost[0])
#raise NotImplementedError()
return loss_list, w
""" Predict Output """
def predict_output(X,w):
y = []
value = sigmoid_func(np.dot(X, np.transpose(w)))
for i in range (len(value)):
if value[i] >= 0.5:
y.append(1)
else:
y.append(0)
return y
y = get_labels()
X = get_feature_matrix()
# Full Vector
# Let s label : Grass = 1 , Soil = 0, Tiles = 0
assert X.shape == (55,3)
#axes = Visualize_data(X,y)
step_size = 1e-5
num_iter = 3000
e_list, w_opt = logisticRegression_func(X,y,step_size,num_iter)
print ('The optimal weight vector is:', w_opt)
y_hat = predict_output(X,w_opt)
def visualize_error(X, y, step_sizes, best = None, num_iter = 2000):
plt.figure(figsize=(12, 4))
fig, axes = plt.subplots(1, 2,figsize=(12, 4))
for step in step_sizes:
loss_list, w_opt = logisticRegression_func(X, y, step, num_iter)
#raise NotImplementedError()
n = len(loss_list) # Size of list remains the same.
x_axes = np.linspace(0,n,n,endpoint=False)
axes[0].plot(x_axes, loss_list, label=step)
axes[0].set_xlabel('Number of Iterations')
axes[0].set_ylabel('Loss Function')
axes[0].legend()
axes[0].set_title(r'$\bf{Figure\ 4.}$Converge of GD')
for step in step_sizes:
### STUDENT TASK ###
# Plot Error against Step Size.
# Now mark the best converge in red. Use value from best as a correct step size.
loss_list, w_opt = logisticRegression_func(X, y, step, num_iter)
# YOUR CODE HERE
#raise NotImplementedError()
n = len(loss_list) # Size of list remains the same.
x_axes = np.linspace(0,n,n,endpoint=False)
if step == best:
axes[1].plot(x_axes, loss_list, label=step, color="red")
else:
axes[1].plot(x_axes, loss_list, label=step, color="blue")
axes[1].set_xlabel('Number of Iterations')
axes[1].set_ylabel('Loss Function')
axes[1].legend()
axes[1].set_title(r'$\bf{Figure\ 5.}$Converge of GD')
plt.tight_layout()
return best, axes
### STUDENT TASK ###
# Change best=None into step size from the list that provides the fastest converge. e.g best=1
res0_1, axes = visualize_error(X/255, y, best=None, step_sizes=[0.1,0.5,1,5,10,16])
# YOUR CODE HERE
#raise NotImplementedError() | laurivoipio/MLBP | Round3 - Classification/ML3_2.py | ML3_2.py | py | 4,065 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 2... |
35351271287 | '''
started on 2022/06/13
end on 2022/xx/xx
@author zelo2
'''
import torch
import torch.nn as nn
class LightGCN(nn.Module):
def __init__(self, n_user, n_item, norm_adj, device, args):
super(LightGCN, self).__init__()
self.device = device
self.n_user = n_user
self.n_item = n_item
self.norm_adj = norm_adj
self.embed_size = args.embed_size
self.batch_size = args.batch_size
self.layer_num = args.layer_num
self.reg_value = eval(args.reg)[0]
self.embeding_dict = self.init_weight()
self.sp_norm_adj = self.convert_coo_matirix_2_sp_tensor(self.norm_adj).to(self.device)
def init_weight(self):
'''Embedding with xavier initialization'''
initializer = nn.init.xavier_uniform_
embedding_dict = nn.ParameterDict({
'user_embed': nn.Parameter(initializer(torch.empty(self.n_user,
self.embed_size))),
'item_embed': nn.Parameter(initializer(torch.empty(self.n_item,
self.embed_size)))
})
# self.user_embedding = nn.Embedding(self.n_user, self.embed_size)
# self.item_embedding = nn.Embedding(self.n_user, self.embed_size)
# nn.init.xavier_uniform_(self.user_embedding.weight)
# nn.init.xavier_uniform_(self.item_embedding.weight)
return embedding_dict
def convert_coo_matirix_2_sp_tensor(self, X):
coo = X.tocoo()
# coo matrix--((data, (row, column)), shape)
# data:矩阵中的数据, row, column表示这个数据在哪一行哪一列
i = torch.LongTensor([coo.row, coo.col]) # [row, column]
v = torch.from_numpy(coo.data).float() # data
return torch.sparse.FloatTensor(i, v, coo.shape)
def sparse_dropout(self, x, rate, noise_shape):
save_probability = 1 - rate
# torch.rand: 均匀分布采样[0,1]
# 因此加上它之后,大于1的概率即为 1 - node_dropout_rate
save_probability += torch.rand(noise_shape)
dropout_mask = torch.float(save_probability).type(torch.bool)
i = x._indices()
v = x._values()
i = i[:, dropout_mask]
v = v[dropout_mask]
out = torch.sparse.FloatTensor(i, v, x.shape)
return out * (1. / (1 - rate)) # dropout部分节点,重新正则化。
# return out
def forward(self, user, pos_item, neg_item, drop_flag=False):
A = self.sp_norm_adj
embedding_matrix = torch.cat([self.embeding_dict['user_embed'], self.embeding_dict['item_embed']]
, 0) # [M+N, embedding_size]
embedding_matrix = embedding_matrix.to(self.device)
all_embeddings = [embedding_matrix]
for k in range(self.layer_num):
# Graph Convolution operation without self connection
embedding_matrix = torch.sparse.mm(A, embedding_matrix)
# Normalization
# norm_embeddings = F.normalize(embedding_matrix, p=2, dim=1) # normalize each row
all_embeddings += [embedding_matrix]
all_embeddings = torch.stack(all_embeddings, dim=1)
all_embeddings = torch.mean(all_embeddings, dim=1)
user_embeddings = all_embeddings[:self.n_user, :]
item_embeddings = all_embeddings[self.n_user:, :]
user_embeddings = user_embeddings[user, :]
pos_item_embeddings = item_embeddings[pos_item, :]
neg_item_embeddings = item_embeddings[neg_item, :]
return user_embeddings, pos_item_embeddings, neg_item_embeddings # [batch_size, embed_size * layer_num] * 3
def bpr_loss(self, users, pos_items, neg_items):
'''
:param users: user embeddings [batch_size, embed_size * layer_num]
:param pos_items: positive item embeddings
:param neg_items: negative item embeddings
:return: Bayesian Personalized Ranking loss (BPR loss)
'''
pos_inner_product = torch.mul(users, pos_items)
neg_inner_product = torch.mul(users, neg_items)
pos_inner_product = torch.sum(pos_inner_product, axis=1) # sum each row [batch_size]
neg_inner_product = torch.sum(neg_inner_product, axis=1)
loss_value = nn.LogSigmoid()(pos_inner_product - neg_inner_product)
loss_value = -1 * torch.mean(loss_value)
# L2范式:所有元素的平方和 开根号
l2_value = torch.norm(users, p=2) ** 2 + torch.norm(pos_items, p=2) ** 2 + torch.norm(neg_items, p=2) ** 2
l2_value /= 2
# for k in range(self.layer_num):
# l2_value += torch.norm(self.weight_dict['W1_layer%d' % k], p=2) ** 2
# l2_value += torch.norm(self.weight_dict['b1_layer%d' % k], p=2) ** 2
# l2_value /= (2 + self.layer_num * 2)
l2_value = self.reg_value * l2_value / self.batch_size
return loss_value + l2_value
| zelo2/NGCF | LightGCN/lightGCN_model.py | lightGCN_model.py | py | 4,989 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.init",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line... |
17260440247 | """
Module that can parse chess notation for individual moves. Mostly to debug
things and/or introduce chess states without having to wire up the entire
camera setup on a physical board.
Note that we're using standard Algebraic Notation:
https://en.wikipedia.org/wiki/Algebraic_notation_(chess)
Maybe we move on to FEN https://en.wikipedia.org/wiki/Forsyth%E2%80%93Edwards_Notation
to start from boards?
BUGS:
- Doesn't handle pawn promotions
- Doesn't handle disambiguations (when two knights can reach the same place)
"""
import re
from enum import Enum
from collections import namedtuple
# Data definitions. Currently don't allow for draws.
Piece = Enum('Piece', 'Pawn Rook Knight Bishop Queen King')
Action = Enum('Action', 'Move Capture CastleKingside CastleQueenside PawnPromotion')
Modifier = Enum('Modifier', 'Check CheckMate')
Col = Enum('Col', 'A B C D E F G H')
Row = Enum('Row', 'One Two Three Four Five Six Seven Eight')
Position = namedtuple('Position', 'row col')
Move = namedtuple('Move', 'piece action position modifiers')
# Black could be None in the case of a white Checkmate
Turn = namedtuple('Turn', 'white black')
LINE_REGEX = re.compile('(?:\d+\.\s+)\s*(\S+)(?:\s+(\S+)\s*)?$')
POSITION_PATTERN = '([a-h])(1|2|3|4|5|6|7|8)'
POSITION_REGEX = re.compile(POSITION_PATTERN)
PIECE_MAP = {
'B': Piece.Bishop,
'R': Piece.Rook,
'Q': Piece.Queen,
'K': Piece.King,
'N': Piece.Knight
}
COL_MAP = {'a': Col.A, 'b': Col.B, 'c': Col.C, 'd': Col.D, 'e': Col.E, 'f': Col.F, 'g': Col.G, 'h': Col.H}
ROW_MAP = {
'1': Row.One,
'2': Row.Two,
'3': Row.Three,
'4': Row.Four,
'5': Row.Five,
'6': Row.Six,
'7': Row.Seven,
'8': Row.Eight
}
ACTION_MAP = {
'x': Action.Capture,
'O-O': Action.CastleKingside,
'O-O-O': Action.CastleQueenside,
'=': Action.PawnPromotion
}
def parse_file(filename):
with open(filename) as f:
lines = f.readlines()
return [parse_line(line.rstrip('\n')) for line in lines]
def parse_line(line):
components = LINE_REGEX.match(line)
white_move = _parse_move(components.group(1))
black_move = None
black_move_spec = components.group(2)
if black_move_spec:
black_move = _parse_move(black_move_spec)
return Turn(white=white_move, black=black_move)
def _parse_move(move):
if re.match('O-O-O', move):
return Move(piece=None, action=Action.CastleQueenside, position=None, modifiers=[])
elif re.match('O-O', move):
return Move(piece=None, action=Action.CastleKingside, position=None, modifiers=[])
piece = _get_piece(move)
action = _get_action(move)
position = _get_position(move)
modifiers = _get_modifiers(move)
return Move(piece=piece, action=action, position=position, modifiers=modifiers)
def _get_piece(move):
"""
The piece is realatively easy to determine: it's either a pawn, or directly
determined by its first letter. Gets _a little_ weird for when pawns capture,
so we default to that if the first character isnt' a recognized one.
"""
match = re.search('^' + POSITION_PATTERN, move)
if match:
return Piece.Pawn
else:
return PIECE_MAP.get(move[0], Piece.Pawn)
def _get_action(move):
for action in ACTION_MAP.iterkeys():
if re.search(action, move):
return ACTION_MAP[action]
return Action.Move
def _get_position(move):
"""
The position is pretty easily determined by one of the "acceptable letters" followed by
an acceptable number.
"""
match = POSITION_REGEX.search(move)
return Position(col=COL_MAP[match.group(1)], row=ROW_MAP[match.group(2)])
def _get_modifiers(move):
modifiers = []
if re.search('\+', move):
modifiers.append(Modifier.Check)
elif re.search('#', move):
modifiers.append(Modifier.CheckMate)
return modifiers
def test_data():
return [
Turn(white=Move(piece=Piece.Pawn,
action=Action.Move,
position=Position(col=Col.E, row=Row.Four),
modifiers=[]),
black=Move(piece=Piece.Pawn,
action=Action.Move,
position=Position(col=Col.E, row=Row.Five),
modifiers=[])),
]
| stay-whimsical/screamchess | src/chess/parser.py | parser.py | py | 4,334 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 27,
... |
13300580084 | # -*- coding: utf-8 -*-
"""
Helper functions for classification and quantization
Created on Mon Dec 5 14:50:27 2016
@author: brady
"""
import os
import numpy as np
from sklearn.tree import tree, _tree
def quantize(data, precision):
"""
Turns floating point into fixed point data
:param data: vector to quantize, assumes np-array
:param precision: number of fixed points bits to used
:returns: vector of length[data], with precision bits
"""
data = np.array(data)
data = data*1e5
xmax = np.amax(np.abs(data))
#if xmax <= 0:
# xmax = 0.000001 # helps with stability
xq = xmax * np.minimum(
np.round(data*(2**(precision-1))/xmax) / (2**(precision-1)),
1-1/(2**(precision-1))
)
return xq/1e5
def tree_to_code(tree, feature_names, precision):
tree_ = tree.tree_
feature_name = [
feature_names[i] if i != _tree.TREE_UNDEFINED else "undefined!"
for i in tree_.feature
]
valid_thresh = [
t if t > 0 else np.min(np.abs(tree_.threshold))
for t in tree_.threshold
]
quant_thresh = quantize(valid_thresh, precision)
def recurse(node, depth, quant_tree_str):
indent = " " * depth
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_name[node]
threshold = quant_thresh[node]
quant_tree_str += "{}if {} <= {}:\n".format(indent, name, threshold)
quant_tree_str += recurse(tree_.children_left[node], depth + 1, '')
quant_tree_str += "{}else: # if {} > {}\n".format(indent, name, threshold)
quant_tree_str += recurse(tree_.children_right[node], depth + 1, '')
return quant_tree_str
else:
quant_tree_str += "{}return {}\n".format(indent, np.argmax(tree_.value[node]))
return quant_tree_str
quant_tree_str = "def tree_{}b(features):\n".format(precision)
quant_tree_str = recurse(0, 1, quant_tree_str)
return quant_tree_str
def gen_quant_trees_str(tree, precisions):
func_list_str = ''
for p in precisions:
names = ['features['+str(x)+']' for x in range(20)]
func_list_str += tree_to_code(tree, names, p)
func_list_str += "##################################################\n"
return func_list_str
def make_quant_trees_module(filename, tree, precisions):
trees_str = gen_quant_trees_str(tree, precisions)
with open(filename, 'w') as f:
f.write(trees_str)
def get_tree_results(tree, Xtest):
"""
Runs data through a quantized DecisionTreeClassifier
:param tree: DTC function handle
:param Xtest: data to test
:returns: predicted results
"""
results = [tree(X) for X in Xtest]
return np.array([results], ndmin=1).T
if __name__ == '__main__':
DIR = r'C:\Users\brady\GitHub\MinVAD\feature_extract'
tr_data = np.load(os.path.join(DIR, 'train_130k.npy'))
tr_class = np.load(os.path.join(DIR, 'train_130k_class.npy'))
myData = np.hstack((tr_data, tr_class))
np.random.shuffle(myData)
cutoff = int(np.floor(0.8 * len(tr_class)))
clf = tree.DecisionTreeClassifier(max_depth = 5)
clf = clf.fit(myData[:cutoff, :19], myData[:cutoff, 20])
test_str = gen_quant_trees_str(clf, np.arange(16, 15, -1))
print(test_str) | bradysalz/MinVAD | classifier/training_helpers.py | training_helpers.py | py | 3,449 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 2... |
30168367656 | # %%
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import ks_2samp
import seaborn as sns
import pandas as pd
import random
from collections import defaultdict
from scipy.stats import ks_2samp, wasserstein_distance
from doubt import Boot
from nobias import ExplanationShiftDetector
random.seed(0)
# Scikit Learn
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import roc_auc_score, mean_squared_error
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import LogisticRegression, Lasso, LinearRegression
from sklearn.ensemble import (
GradientBoostingRegressor,
GradientBoostingClassifier,
)
plt.style.use("seaborn-whitegrid")
from matplotlib import rcParams
rcParams["axes.labelsize"] = 14
rcParams["xtick.labelsize"] = 12
rcParams["ytick.labelsize"] = 12
rcParams["figure.figsize"] = 16, 8
rcParams.update({"font.size": 22})
from xgboost import XGBRegressor, XGBClassifier
import shap
from alibi_detect.cd import ChiSquareDrift, TabularDrift, ClassifierDrift
from tqdm import tqdm
import lime.lime_tabular
import os
import sys
def blockPrint():
sys.stdout = open(os.devnull, "w")
blockPrint()
# %%
# %%
def create_explanation(X, model):
exp = X.copy()[:0]
for i, _ in tqdm(enumerate(X.iterrows())):
ex = explainer.explain_instance(X.iloc[i], model.predict)
exx = pd.DataFrame(ex.local_exp[0], columns=["feature", "weight"]).sort_values(
"feature"
)
exx.feature = X.columns
exx = exx.T
# Make header first row
new_header = exx.iloc[0] # grab the first row for the header
exx = exx[1:] # take the data less the header row
exx.columns = new_header
exx.reset_index(inplace=True)
exp = pd.concat([exp, exx])
return exp
def train_esd(X, X_ood, model, detector):
aux = create_explanation(X, model)
aux["y"] = 0
aux_ood = create_explanation(X_ood, model)
aux_ood["y"] = 1
df = aux.append(aux_ood).drop(columns=["index"])
X_tr, X_te, y_tr, y_te = train_test_split(
df.drop("y", axis=1), df["y"], test_size=0.5, random_state=42
)
detector.fit(X_tr, y_tr)
# return auc
return roc_auc_score(y_te, detector.predict_proba(X_te)[:, 1])
# %%
res = []
for i in np.linspace(0, 1, 11):
rho = i
## Sensitivity experiment
sigma = 1
mean = [0, 0]
cov = [[sigma, 0], [0, sigma]]
samples = 5_000
x1, x2 = np.random.multivariate_normal(mean, cov, samples).T
x3 = np.random.normal(0, sigma, samples)
# Different values
mean = [0, 0]
cov = [[sigma, rho], [rho, sigma]]
x11, x22 = np.random.multivariate_normal(mean, cov, samples).T
x33 = np.random.normal(0, sigma, samples)
# Create Data
df = pd.DataFrame(data=[x1, x2, x3]).T
df.columns = ["Var%d" % (i + 1) for i in range(df.shape[1])]
# df["target"] = np.where(df["Var1"] * df["Var2"] > 0, 1, 0)
df["target"] = (
df["Var1"] * df["Var2"] + df["Var3"] + np.random.normal(0, 0.1, samples)
)
df["target"] = np.where(df["target"] > df["target"].mean(), 1, 0)
X_ood = pd.DataFrame(data=[x11, x22, x33]).T
X_ood.columns = ["Var%d" % (i + 1) for i in range(X_ood.shape[1])]
## Split Data
X_tr, X_te, y_tr, y_te = train_test_split(df.drop(columns="target"), df["target"])
## Fit our ML model
model = GradientBoostingClassifier()
model_r = GradientBoostingRegressor()
# model = LinearRegression()
model.fit(X_tr, y_tr)
model_r.fit(X_tr, y_tr)
# Input KS Test
input_ks = 1
# Classifier Drift
classifierDrift = 1
# Output test
output_ks = 1
wass = 1
# Uncertainty
unc = 1
# Explanation Shift
ESD = ExplanationShiftDetector(
model=XGBClassifier(),
gmodel=Pipeline(
[
("scaler", StandardScaler()),
("lr", LogisticRegression(penalty="l1", solver="liblinear")),
]
),
)
ESD.fit(X_tr, y_tr, X_ood)
esd = ESD.get_auc_val()
# Lime
explainer = lime.lime_tabular.LimeTabularExplainer(
X_tr.values,
feature_names=X_tr.columns,
class_names=["y"],
discretize_continuous=True,
verbose=True,
mode="regression",
)
auc_lime = train_esd(
X_te, X_ood, XGBClassifier().fit(X_tr, y_tr), LogisticRegression()
)
res.append([rho, input_ks, classifierDrift, output_ks, wass, unc, esd, auc_lime])
# %%
results = pd.DataFrame(
res,
columns=[
"rho",
"input_ks",
"classifierDrift",
"output_ks",
"wass",
"unc",
"esd",
"lime",
],
)
# %%
plt.figure()
plt.plot(results["rho"], results["esd"], label="Explanation Shift - SHAP")
ci = 1.96 * np.std(results["esd"]) / np.sqrt(len(results["rho"]))
plt.fill_between(
results["rho"], (results["esd"] - ci), (results["esd"] + ci), alpha=0.1
)
plt.plot(results["rho"], results["lime"], label="Explanation Shift - Lime")
ci = 1.96 * np.std(results["lime"]) / np.sqrt(len(results["rho"]))
plt.fill_between(
results["rho"], (results["lime"] - ci), (results["lime"] + ci), alpha=0.1
)
plt.legend()
plt.xlabel("Correlation coefficient")
plt.ylabel("AUC Explanation Shift Detector")
plt.title("Sensitivity to Multicovariate Shift")
plt.tight_layout()
plt.savefig("images/SOTAsensitivityLime.pdf", bbox_inches="tight")
plt.show()
# %%
| cmougan/ExplanationShift | syntheticLime.py | syntheticLime.py | py | 5,594 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "random.seed",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "... |
14762866711 | from tkinter import *
import tkinter.font
from gpiozero import LED
import RPi.GPIO
RPi.GPIO.setmode(RPi.GPIO.BCM)
blue = LED(2)
green = LED(3)
red = LED(4)
win = Tk()
win.title("LED GUI Toggler")
myFont = tkinter.font.Font(family = 'Helvetica', size = 12, weight = "bold")
def ledToggleBlue():
if blue.is_lit:
blue.off()
blueButton["text"] = "Turn Blue LED on"
else:
blue.on()
blueButton["text"] = "Turn Blue LED off"
def ledToggleRed():
if red.is_lit:
red.off()
redButton["text"] = "Turn Red LED on"
else:
red.on()
redButton["text"] = "Turn Red LED off"
def ledToggleGreen():
if green.is_lit:
green.off()
greenButton["text"] = "Turn Green LED on"
else:
green.on()
greenButton["text"] = "Turn Green LED off"
def close():
RPi.GPIO.cleanup()
win.destroy()
blueButton = Button(win, text = 'Turn Blue LED on', font = myFont, command = ledToggleBlue, bg = 'blue', height = 1, width = 24)
blueButton.grid(row=0,column=1)
redButton = Button(win, text = 'Turn Red LED on', font = myFont, command = ledToggleRed, bg = 'red', height = 1, width = 24)
redButton.grid(row=1,column=1)
greenButton = Button(win, text = 'Turn Green LED on', font = myFont, command = ledToggleGreen, bg = 'green', height = 1, width = 24)
greenButton.grid(row=2,column=1)
exitButton = Button(win, text = 'Exit', font = myFont, command = close, bg = 'red2', height = 1, width = 6)
exitButton.grid(row=3,column=1)
| chris-yl31/SIT210-Task5.2C-RPiGUI | GUI.py | GUI.py | py | 1,525 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "RPi.GPIO.GPIO.setmode",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO.GPIO",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "gpiozero.LED",
"li... |
31179240116 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
demand = [990,1980,3961,2971,1980]
d=0 # d% shortage allowance
Y_b = [1.3086,1.3671,1.4183,1.4538,1.5122] # Fabric yield (consumption rate) rate per garment of size 饾浗
U = 0.85
l_max= 20
e= .07 # Fabric end allowance
f= 2.90 # Fabric cost
if len(demand)!=len(Y_b):
raise ValueError('number of sizes and number of fabric consumption does not match')
# In[2]:
#Input variables (Marker)
M_d = 10 # Average marker design time (minute)
z = 0.65 # Printing speed per minute
v = 0.30 #Standard cost per minute in marker making floor (labor, machine & electricity)
# In[3]:
#Input variables (Cutting Time)
T_G = 30 # General Preparation Time
x= .20 # Average spreading speed in minutes after taking account for the idle strokes.
T_M= 2 # Time for Placement of the marker
t_c= 4.5 # SMV of cutting time per garment pattern
T_S= 5 # preparation time for sticker placement
饾憽_饾憦 = 2.837 # Standard minute value (SMV) of time takes to bundle.
饾憦 = 15 # pieces of garments in one bundle
饾懁 = 0.20 # standard cost per minute in cutting floor (labor, machine & electricity)
P_min, P_max= 10,350
# In[4]:
import numpy as np
import math
import pandas as pd
from copy import deepcopy
rng = np.random.default_rng()
import random
import time
import matplotlib.pylab as plt
import plotly.express as px
# In[5]:
def Update_Res(R,GG,PP):
for s in range(len(GG)): #Updating Residual within the while loop
R=R-np.dot(GG[s],PP[s])
return R
# In[6]:
def Length(g_i_j):
l_i = e+ np.dot(g_i_j,Y_b)/U
return l_i
# In[7]:
def Shortage_allowance(Q,d=0.01):
temp=np.dot((1-d),Q)
return [round(i) for i in temp]
Q_b= Shortage_allowance(demand,d)
# Q_b
# In[8]:
from Heuristics import H1,H3,H5
# In[9]:
# Sol_1 = H5(Q=Q_b,Y=Y_b,Pm=[P_min,P_max],U=U,e=e,l_max=l_max)
# Sol_1
# ## Objective Function
# In[10]:
def ObjectiveFunction (chromosome):
temp_Chromosome=deepcopy(chromosome)
G_a_b = temp_Chromosome['G']
P_a = temp_Chromosome['P']
Alpha = len(P_a) # number of Sections
''' Fabric Cost '''
# Total fabric length = L # Total Fabric Cost = C_F
l_a=[Length(G_a_b[alpha]) for alpha in range(Alpha) ] #Length function
L= np.dot(l_a,P_a) #Multiply then Sum
C_F = L*f
#print('Total Fabric Cost = C_F: ',C_F)
''' Marker Cost '''
#Marker Making Cost = C_M
M_p_a = [(la-e)/z for la in l_a] # devide each element of a 'l_a' by 'z'
#M_p_a = Marker Printing time (minute) of section alpha
'''
饾憻 = {1 ; 饾憱饾憮 饾憽h饾憭 饾憵饾憥饾憻饾憳饾憭饾憻 饾憱饾憼 饾憦饾憭饾憱饾憶饾憯 饾憿饾憼饾憭饾憫 饾憮饾憸饾憻 饾憽h饾憭 饾憮饾憱饾憻饾憼饾憽 饾憽饾憱饾憵饾憭
{0 ; 饾憱饾憮 饾憽h饾憭 饾憵饾憥饾憻饾憳饾憭饾憻 h饾憥饾憼 饾憦饾憭饾憭饾憶 饾憿饾憼饾憭饾憫 饾憦饾憭饾憮饾憸饾憻饾憭
'''
r=[]
for i in range(Alpha):
temp=0
j=i-1
while j>=0:
if G_a_b[i]== G_a_b[j]:
temp+=1
break
j-=1
if temp==0:
r.append(1)
else:
r.append(0)
C_M = 0
for 伪 in range(Alpha):
if l_a[伪]>e: # this makes sure that section has at least one garments
C_M += (M_d*r[伪] + M_p_a[伪])*v
# 'if la>e' makes sure that the section contain at least one garments,
# not all G_a_b values are zero
''' Cutting Cost '''
# Cutting Time of one section = T_T # Total Cutting Cost = C_C
#T_T =T_G + T_F +T_M+ T_c+T_S +T_B
T_C=[] #Cutting time for every section
for alpha in range(Alpha):
T_C.append(sum(G_a_b[alpha])*t_c)
T_F=[] # Fab spreading time for each section
for 伪 in range(Alpha):
T_F.append(l_a[伪]*P_a[伪]/x)
T_B=[] #Bundleing time for each section
for 伪 in range(Alpha):
T_B.append(math.ceil(P_a[伪]/b)*sum(G_a_b[伪])*t_b)
T_T_T = 0 #Total cutting time
for 伪 in range(Alpha):
if l_a[伪]>e: # this makes sure that section has at least one garments
T_T_T+=T_G+T_F[伪]+T_M+T_C[伪]+T_S+ T_B[伪]
C_C = T_T_T*w #Total cutting cost
''' Total Cost '''
# Total Cost = C_T = C_F + C_M + C_C
return C_F+C_M+C_C
# In[11]:
# ObjectiveFunction(Sol_1)
# ## Fitness Score
# In[12]:
def Fitness(chromosome):
t_chromosome=deepcopy(chromosome)
G_a_b= t_chromosome['G']
P_a = t_chromosome['P']
Beta= len(demand)
score= ObjectiveFunction(t_chromosome)
#print('score:',score)
fitness_score=score
''' Penalty for shortage production '''
R= Update_Res(R=demand,GG=G_a_b,PP=P_a)
for beta in range(Beta):
if R[beta]>0:
s_penalty= R[beta]/sum(demand)
fitness_score +=score*s_penalty
''' Penalty for excess production '''
r=np.dot(1.02,demand) # additional 2% allowance
R= Update_Res(R=r,GG=G_a_b,PP=P_a)
#print(R)
for beta in range(Beta):
if R[beta]<0:
e_penalty= (-R[beta]/sum(demand))*2 # 2times than s_penalty
fitness_score +=score*e_penalty
''' double check if the solution is valid '''
res= Update_Res(R=Q_b,GG=G_a_b,PP=P_a)
if max(res)>0:
'''solution is unvalid'''
fitness_score +=10000 #this will eventualy make the solution extinct.
return fitness_score
# Fitness(Sol_1)
# ## Function Initial Population Generation
# In[13]:
def GeneratePopulation(pop_size):
P_of_S=[]
for p in range(pop_size):
rng = np.random.default_rng()
h=rng.integers(0,3)
#print('h:',h)
if h==0:
sol=H1(Q=Q_b,Y=Y_b,Pm=[P_min,P_max],U=U,e=e,l_max=l_max)
elif h==1:
sol=H3(Q=Q_b,Y=Y_b,Pm=[P_min,P_max],U=U,e=e,l_max=l_max)
else:
sol=H5(Q=Q_b,Y=Y_b,Pm=[P_min,P_max],U=U,e=e,l_max=l_max)
P_of_S.append(sol)
return P_of_S
# Pool_of_Sol= GeneratePopulation(100)
# print(Pool_of_Sol)
# In[14]:
def S_with_F(p_o_s):
p_o_s_with_f= deepcopy(p_o_s)
for i in range(len(p_o_s)):
if 'F' not in p_o_s[i]:
p_o_s_with_f[i]['F']=Fitness(p_o_s[i])
return p_o_s_with_f
# ## PSO
# ### Cleaning section with zeros
# In[15]:
def CleanZeros (Sol):
Solution=deepcopy(Sol)
j=0
while j < len(Solution['G']):
if max(Solution['G'][j])==0:
Solution['G'].pop(j)
Solution['P'].pop(j)
continue
j+=1
#This is to make sure
if len(Solution['G'])!=len(Solution['P']):
raise ValueError('P and G lengths are not same')
return Solution
# In[16]:
# CleanZeros(Sol_1)
# ## Velocity Update (Jarboui et al. 2008)
# Lets assume 1st sol as X, 2nd Sol as Pbest, and 3rd Sol as Gbest
# #### Now we have to calculate Y
# ##### Initial Velocity generator
# In[17]:
def initial_velocity(Range, Sol): #Range is a list
a,b= Range
m=len(Sol['G'])
#generate a random uniform array [-a,b] of the same size of the solutions
v=(b-a) * np.random.random_sample(m) +a #http://bit.ly/3To2OWe
v=v.tolist()
return {'V':v}
# In[18]:
def Get_Y(X,GBest,PBest): #(Jarboui et al., 2008, p. 302)
y=[]
lens=[len(i) for i in [X['G'],GBest['G'],PBest['G']]]
min_len=min(lens)
for i in range(min_len):
if X['G'][i]==GBest['G'][i] and X['G'][i]==PBest['G'][i]:
y.append(random.choice([-1,1]))
elif X['G'][i]==GBest['G'][i]:
y.append(1)
elif X['G'][i]==PBest['G'][i]:
y.append(-1)
else:
y.append(0)
return {'Y':y}
# ### Now we have to calculate Velocity
# In[19]:
def New_V(YY,VV,c1=1,c2=1,w=.75): #Parameter setting: (Jarboui et al., 2008, p. 306)
Y=deepcopy(YY)
V=deepcopy(VV)
lens=[len(i) for i in [Y['Y'],V['V']]]
min_len=min(lens)
for i in range(min_len):
y=Y['Y'][i]
v=V['V'][i]
V['V'][i]= w*v+ np.random.rand()*c1*(-1-y)+np.random.rand()*c2*(1-y)
return V
# ### Now we need to calculate 位
# In[20]:
def Get_位(YY,VV):
Y=deepcopy(YY)
V=deepcopy(VV)
lens=[len(i) for i in [Y['Y'],V['V']]]
min_len=min(lens)
位=[]
for i in range(min_len):
位.append(Y['Y'][i]+V['V'][i])
return {'位':位}
# 位=Get_位(Y,V)
# 位
# ### Update X with Eq-10 (Jarboui et al., 2008, p. 303)
# In[21]:
def Perturbation(xg,xp,R,p_rate):
if np.random.rand()<p_rate:
p1,p2=sorted([xp,min(P_max,max(P_min,max(R)))])
xp= rng.integers(p1,p2+1)
if xp<P_min:
xp=P_min
for j in range(len(xg)): #small purtubration (like mutaion)
if np.random.rand()<p_rate:
xg[j]=0
temp= min(math.ceil(R[j]/xp),math.floor((l_max-Length(xg))/(Y_b[j]/U)))
temp= max(0,temp)
#xg[j]=max(0,temp)
xg[j]=rng.integers(0,temp+1)
return xg,xp
def Update_X(XX,GBest,PBest,位位, 蠒=0.5, p_rate=.05):
X=deepcopy(XX)
位=deepcopy(位位)
lens=[len(i) for i in [X['G'],GBest['G'],PBest['G'],位['位']]]
min_len=min(lens)
XG=[]
XP=[]
R= Update_Res(R=Q_b,GG=XG,PP=XP)
for i in range(min_len):
if 位['位'][i] > 蠒:
#print('Gbest')
#xg,xp=Perturbation(xg=GBest['G'][i],xp=GBest['P'][i],R=R,p_rate=p_rate)
xg=GBest['G'][i]
xp=GBest['P'][i]
elif 位['位'][i] < -蠒:
#print('Pbest')
#xg,xp=Perturbation(xg=GBest['G'][i],xp=GBest['P'][i],R=R,p_rate=p_rate)
xg=PBest['G'][i]
xp=PBest['P'][i]
else:
#print('X')
xg,xp= Perturbation(xg=X['G'][i],xp=X['P'][i],R=R,p_rate=p_rate) #Perturbation function
XG.append(xg)
XP.append(xp)
R= Update_Res(R=Q_b,GG=XG,PP=XP)
if max(R)<=0:
#print('break')
return {'G':XG,'P':XP}
for i in range(min_len, len(X['G'])):
xg,xp= Perturbation(xg=X['G'][i],xp=X['P'][i],R=R,p_rate=p_rate) #Perturbation function
XG.append(xg)
XP.append(xp)
R=Update_Res(R=Q_b,GG=XG,PP=XP)
if max(R)<=0:
#print('break')
return {'G':XG,'P':XP}
if max(R)>0:
#print(R)
#Use H1 or H3 algorithm to pack all sizes
randint =rng.integers(2)
if randint==0:
#print('H1')
h=H1(Q=R,Y=Y_b,Pm=[P_min,P_max],U=U,e=e,l_max=l_max)
else:
#print('H3')
h=H3(Q=R,Y=Y_b,Pm=[P_min,P_max],U=U,e=e,l_max=l_max)
g,p = h.values()
#print(g,p)
XG=XG+g
XP=XP+p
return {'G':XG,'P':XP}
# newX= Update_X(X,Gbest,Pbest,newY)
# newX
# In[22]:
y=[1,2,3,4]
c=[1,2]
print(y[:len(c)])
# In[23]:
def Update_dimension(XX,VV, in_vel_range=[-0.5,0.5]):
mm= len(XX['G'])
m= len(VV['V'])
if mm <= m:
return {'V':VV['V'][:m]}
else:
a,b= in_vel_range
v=(b-a) * np.random.random_sample(mm-m) +a #http://bit.ly/3To2OWe
v=v.tolist()
V=VV['V']+v
return {'V':V}
# In[24]:
def Get_Gbest(p_o_s):
gbest=p_o_s[0]
for i in range(len(p_o_s)):
if Fitness(p_o_s[i])<Fitness(gbest):
gbest= p_o_s[i]
return gbest
# Gbest=Get_Gbest(Pool_of_Sol)
# Gbest
# In[25]:
# newX= Update_X(X,Gbest,Pbest,newY)
# newX
# In[26]:
# Fitness(newX)
# In[27]:
#Pool_of_Sol
# # Main PSO
# In[36]:
get_ipython().run_line_magic('matplotlib', 'inline')
def PSO(swarmsize,iteration,蠒=.7,c1=2,c2=2,w=1, in_vel_range=[-0.6,0.6],p_rate=.2):
P_of_S= GeneratePopulation(swarmsize)
P_of_Pbest=P_of_S
P_of_Velocity= [initial_velocity(in_vel_range,P_of_S[i]) for i in range(len(P_of_S))]
Gbest= P_of_S[rng.integers(0,swarmsize)]
o= Gbest
bests=[Fitness(Gbest)]
for i in range(iteration):
for j in range(len(P_of_S)):
X=P_of_S[j]
Pbest=P_of_Pbest[j]
V= P_of_Velocity[j]
Y= Get_Y(X=X,GBest=Gbest,PBest=Pbest)
newV= New_V(YY=Y,VV=V,c1=c1,c2=c2,w=w)
位= Get_位(YY=Y,VV=newV)
newX= Update_X(XX=X,GBest=Gbest,PBest=Pbest,位位=位,蠒=蠒, p_rate=p_rate)
P_of_S[j]=newX
newV= Update_dimension(XX=newX,VV= newV, in_vel_range=in_vel_range)
P_of_Velocity[j]= newV
f=Fitness(newX)
if f < Fitness(Pbest):
P_of_Pbest[j]= newX
if f < Fitness(Gbest):
Gbest=newX
#print(Gbest, Fitness(Gbest))
bests.append(Fitness(Gbest))
xx=[i for i in range(len(bests))]
fig=px.line(x=xx,
y=bests,
title=f'swarmsize={swarmsize},iteration= {iteration},蠒={蠒},c1= {c1},c2={c2},w={w}, Gbest={bests[-1]}',
labels=dict(x='iteration',y='fitness'))
fig.show()
#plt.plot(xx,bests)
#plt.title(f'swarmsize={swarmsize},iteration= {iteration},蠒={蠒},c1= {c1},c2={c2},w={w}, Gbest={bests[-1]}')
return CleanZeros(Gbest)
PSO(swarmsize=50,iteration=250)
# In[33]:
ObjectiveFunction(o)
# In[34]:
ObjectiveFunction(g)
# In[37]:
Dataset={
'demands':[[872,1743,3486,2614,1743],
[12,67,131,187,191,138,79,27],
[990,1980,3961,2971,1980],
[193,501,1018,1249,998,564,250,128]],
'consumption':[[0.6119,0.6315,0.6499,0.6721,0.6921],
[0.7198,0.7352,0.7614,0.7878,0.8146,0.8423,0.8579,0.8985],
[1.3086,1.3671,1.4183,1.4538,1.5122],
[1.3350,1.3998,1.4356,1.4826,1.5440,1.5878,1.6313,1.6908]],
'price':[1.51,2.43,1.95,2.9]
}
df = pd.DataFrame(columns=['蠒','c1','c2','w','p_rate','solution','fitness'])
for i in range(len(Dataset['demands'])):
demand=Dataset['demands'][i]
Q_b= Shortage_allowance(demand,d)
Y_b=Dataset['consumption'][i]
f=Dataset['price'][i]
PSO(swarmsize=100,iteration=120,c1=1,c2=2,蠒 =.4,w=.75,p_rate=.2)
# In[ ]:
# In[29]:
from itertools import product
# In[30]:
蠒=[.4,.5,.6,.7]
c1=[1,1.5,2]
c2=[1,1.5,2]
ww=[.6,.75,1,1.25]
p_rate=[.05,.1,.2,.3]
iteration=product(蠒,c1,c2,ww,p_rate)
#print(list(iteration))
# In[31]:
df = pd.DataFrame(columns=['蠒','c1','c2','w','p_rate','solution','fitness'])
for 蠒,c1,c2,ww,p_rate in product(蠒,c1,c2,ww,p_rate):
best=PSO(swarmsize=100,iteration=120,c1=c1,c2=c2,蠒=蠒,w=ww,p_rate=p_rate)
fitness=Fitness(best)
df = df.append({'蠒':蠒,'c1':c1,'c2': c2,'w': ww,'p_rate': p_rate,'solution':best,'fitness':fitness}, ignore_index=True)
df.to_csv('PSO_GridSearch_from_Notebook8.csv')
# In[32]:
print(df[['蠒','c1','c2','w','p_rate','fitness']])
# import plotly
# import plotly.graph_objs as go
#
#
# #Read cars data from csv
#
#
# #Set marker properties
# markersize = df['c2']
# markercolor = df['w']
# markershape = df['c1'].replace(1,"square").replace(1.5,"circle").replace(2,'diamond')
#
#
# #Make Plotly figure
# fig1 = go.scater3d( x=df['伪'],
# y=df['p_rate'],
# z=df['fitness'],
# marker=dict(#size=markersize,
# #color=markercolor,
# #symbol=markershape,
# opacity=0.9,
# reversescale=True,
# colorscale='dense'),
# line=dict (width=0.02),
# mode='markers')
#
# #Make Plot.ly Layout
# mylayout = go.Layout(scene=dict(xaxis=dict( title='伪'),
# yaxis=dict( title='p_rate'),
# zaxis=dict(title='fitness')),)
#
# #Plot and save html
# plotly.offline.plot({"data": [fig1],
# "layout": mylayout},
# auto_open=True,
# filename=("6DPlot.html"))
#
# In[33]:
import plotly.express as px #https://plotly.com/python/3d-scatter-plots/
fig = px.scatter_3d(df, x='蠒', y='p_rate', z='fitness',
color='c1', symbol='c2', size='w')
fig.show()
# In[34]:
df['c1+c2']=df['c1'].map(str)+','+df['c2'].map(str)
df
# In[35]:
fig = px.scatter_3d(df, x='蠒', y='p_rate', z='fitness',
color='c1+c2', symbol='w')
fig.show()
# In[36]:
fig = px.parallel_coordinates(df, color="fitness",
dimensions=['c1','蠒', 'c2','p_rate','w','fitness','c1+c2'],
#color_continuous_scale=px.colors.diverging.Tealrose,
#color_continuous_midpoint=0
)
fig.show()
# In[37]:
df.sort_values('fitness').head(10)
# In[38]:
type(df['c1'][1])
# In[39]:
Dataset={
'demands':[[872,1743,3486,2614,1743],
[12,67,131,187,191,138,79,27],
[990,1980,3961,2971,1980],
[193,501,1018,1249,998,564,250,128]],
'consumption':[[0.6119,0.6315,0.6499,0.6721,0.6921],
[0.7198,0.7352,0.7614,0.7878,0.8146,0.8423,0.8579,0.8985],
[1.3086,1.3671,1.4183,1.4538,1.5122],
[1.3350,1.3998,1.4356,1.4826,1.5440,1.5878,1.6313,1.6908]],
'price':[1.51,2.43,1.95,2.9]
}
# In[40]:
for i in range(len(Dataset['demands'])):
demand=Dataset['demands'][i]
Q_b= Shortage_allowance(demand,d)
Y_b=Dataset['consumption'][i]
f=Dataset['price'][i]
best=PSO(swarmsize=100,iteration=120,c1=1,c2=2,蠒=.4,w=.75,p_rate=.2)
fitness=Fitness(best)
df = df.append({'蠒':蠒,'c1':c1,'c2': c2,'w': ww,'p_rate': p_rate,'solution':best,'fitness':fitness}, ignore_index=True)
df.to_csv('PSO_GridSearch_from_Notebook7.csv')
# In[ ]:
# In[ ]:
| sharif8410/COP_Doc | PSO Clean notebook-Heuristic import.py | PSO Clean notebook-Heuristic import.py | py | 18,651 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.random.default_rng",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
... |
19875967712 |
import numpy as np
import matplotlib.pyplot as plt
import time
import numpy as np
import pandas as pd
import time
import gc
import random
import sklearn
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score, GridSearchCV, cross_validate, train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, normalize
from sklearn.decomposition import PCA
from sklearn.impute import SimpleImputer
from sklearn import tree
from sklearn.metrics import plot_roc_curve
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from kneed import KneeLocator
import seaborn as sb
from sklearn.metrics import silhouette_score
from sklearn import manifold
from sklearn.mixture import GaussianMixture
class Data():
def dataAllocation(self, path):
# Separate out the x_data and y_data and return each
# args: string path for .csv file
# return: pandas dataframe, pandas series
# -------------------------------
# ADD CODE HERE
df = pd.read_csv(path)
xcols = ['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8']
ycol = ['y']
x_data = df[xcols]
y_data = df[ycol]
# print(y_data[y_data.y == 1].shape[0])
# print(df.shape[0])
# -------------------------------
return x_data, y_data.values.ravel()
def processed_data_Allocation(self, path):
# Read the processed dataset
# -------------------------------
df = pd.read_csv(path)
xcols = ["age","education","default","housing","loan","contact","month","day_of_week","campaign","previous","poutcome","emp.var.rate","cons.price.idx","cons.conf.idx","euribor3m","nr.employed","job_blue-collar","job_entrepreneur","job_housemaid","job_management","job_retired","job_self-employed","job_services","job_student","job_technician","job_unemployed","marital_married","marital_single"]
ycol = ['y']
x_data = df[xcols]
y_data = df[ycol]
return x_data, y_data.values.ravel()
def trainSets(self, x_data, y_data):
# Split 70% of the data into training and 30% into test sets. Call them x_train, x_test, y_train and y_test.
# Use the train_test_split method in sklearn with the parameter 'shuffle' set to true and the 'random_state' set to 614.
# args: pandas dataframe, pandas dataframe
# return: pandas dataframe, pandas dataframe, pandas series, pandas series
# -------------------------------
# ADD CODE HERE
x_train, x_test, y_train, y_test = train_test_split(
x_data, y_data, test_size=0.2, shuffle=True, random_state=614)
# -------------------------------
return x_train, x_test, y_train, y_test
def dataPreProcess(self, x_train, x_test):
# Pre-process the data to standardize it, otherwise the grid search will take much longer.
# args: pandas dataframe, pandas dataframe
# return: pandas dataframe, pandas dataframe
# -------------------------------
# ADD CODE HERE
scaler = StandardScaler()
scaler.fit(x_train)
scaled_x_train = scaler.transform(x_train)
scaled_x_test = scaler.transform(x_test)
# -------------------------------
return scaled_x_train, scaled_x_test
fig, axs = plt.subplots(2, 3)
##################### Diabetes data #############################
dataset = Data()
data = 'data/pima-indians-diabetes.csv'
x_data, y_data = dataset.dataAllocation(data)
x_train, x_test, y_train, y_test = dataset.trainSets(x_data, y_data)
x_train_scaled, x_test_scaled = dataset.dataPreProcess(x_train, x_test)
# KM with DS1
kmeans_kwargs = {'init': 'random', 'n_init':10, 'max_iter':100, 'random_state':42, 'algorithm':'full',}
start = time.time()
kmeans = KMeans(n_clusters=4, **kmeans_kwargs)
label = kmeans.fit(x_train_scaled).labels_
end = time.time()
print("KM with DS1 time:", end-start)
tsne_transform = manifold.TSNE(n_components=2, perplexity=100, init='pca', random_state=42)
feature2D_DS1 = tsne_transform.fit_transform(x_train_scaled)
axs[0, 0].scatter(feature2D_DS1[:,0], feature2D_DS1[:,1], c=label, cmap=plt.cm.Spectral, s=5)
axs[0, 0].set_title('Clusters for DS1 with KM')
# EM with DS1
em_kwargs = {'covariance_type': 'full', 'n_init':10, 'max_iter':100, 'random_state':42, 'init_params':'kmeans'}
start = time.time()
em = GaussianMixture(n_components=5, **em_kwargs)
label = em.fit(x_train_scaled).predict(x_train_scaled)
end = time.time()
print("EM with DS1 time:", end-start)
tsne_transform = manifold.TSNE(n_components=2, perplexity=100, init='pca', random_state=42)
feature2D_DS1 = tsne_transform.fit_transform(x_train_scaled)
axs[0, 1].scatter(feature2D_DS1[:,0], feature2D_DS1[:,1], c=label, cmap=plt.cm.Spectral, s=5)
axs[0, 1].set_title('Clusters for DS1 with EM')
axs[0, 2].scatter(feature2D_DS1[:,0], feature2D_DS1[:,1], c=y_train, cmap=plt.cm.Spectral, s=5)
axs[0, 2].set_title('Clusters for DS1 - True Label')
dataset = Data()
data = 'data/bank_marketing.csv'
x_data, y_data = dataset.processed_data_Allocation(data)
x_train, x_test, y_train, y_test = dataset.trainSets(x_data, y_data)
x_train_scaled, x_test_scaled = dataset.dataPreProcess(x_train, x_test)
# KM with DS2
kmeans_kwargs = {'init': 'random', 'n_init':10, 'max_iter':100, 'random_state':42, 'algorithm':'full',}
start = time.time()
kmeans = KMeans(n_clusters=6, **kmeans_kwargs)
label = kmeans.fit(x_train_scaled).labels_
end = time.time()
print("KM with DS2 time:", end-start)
tsne_transform = manifold.TSNE(n_components=2, perplexity=100, init='pca', random_state=42)
feature2D_DS2 = tsne_transform.fit_transform(x_train_scaled)
axs[1, 0].scatter(feature2D_DS2[:,0], feature2D_DS2[:,1], c=label, cmap=plt.cm.Spectral, s=5)
axs[1, 0].set_title('Clusters for DS2 with KM')
# EM with DS2
em_kwargs = {'covariance_type': 'full', 'n_init':10, 'max_iter':100, 'random_state':42, 'init_params':'kmeans'}
start = time.time()
em = GaussianMixture(n_components=5, **em_kwargs)
label = em.fit(x_train_scaled).predict(x_train_scaled)
end = time.time()
print("EM with DS2 time:", end-start)
tsne_transform = manifold.TSNE(n_components=2, perplexity=100, init='pca', random_state=42)
feature2D_DS2 = tsne_transform.fit_transform(x_train_scaled)
axs[1, 1].scatter(feature2D_DS2[:,0], feature2D_DS2[:,1], c=label, cmap=plt.cm.Spectral, s=5)
axs[1, 1].set_title('Clusters for DS2 with EM')
axs[1, 2].scatter(feature2D_DS2[:,0], feature2D_DS2[:,1], c=y_train, cmap=plt.cm.Spectral, s=5)
axs[1, 2].set_title('Clusters for DS2 - True Label')
fig.tight_layout()
plt.show()
| RuizeHu/Gatech_CS_7641_UnsupervisedLearning | code/Clusters_Plot.py | Clusters_Plot.py | py | 6,940 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 71,
"usage_type": "call"
},
{
"api_name... |
25129650646 | import numpy as np
import keras
from keras.datasets import mnist
class Dataset:
def __init__(self, path, local):
"""
Initialize the MNIST dataset.
Parameters path and local are only included to fit the interface of Dataset
:param path: Ignored
:param local: Ignored
"""
(x, y), (_, _) = mnist.load_data()
# Configure input
x = (x.astype(np.float32) - 127.5) / 127.5
x = np.expand_dims(x, axis=3)
x_padding = np.zeros((x.shape[0], 64, 64, 1)) - 1
x_padding[:, :28, :28, :] = x
x = x_padding
y = keras.utils.np_utils.to_categorical(y, 10)
self.x = x
self.y = y
print('Loaded dataset')
print('X:', self.x.shape)
print('Y:', self.y.shape)
| jessica-dl/2XB3-ML-Training | trainer/mnist_dataset.py | mnist_dataset.py | py | 804 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "keras.datasets.mnist.load_data",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "keras.datasets.mnist",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "numpy.float32",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name":... |
31232927085 | import logging
import requests
from io import BytesIO
from django.core.management import BaseCommand
from places.models import Place, PlaceImage
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Load information about places with media files'
def add_arguments(self, parser):
parser.add_argument('resource_url', type=str)
def handle(self, *args, **options):
resource_url = options['resource_url']
logger.info(f'START LOADING DATA FROM RESOURCE {resource_url}')
try:
response = requests.get(resource_url)
response.raise_for_status()
except requests.exceptions.HTTPError as e:
logger.error(f'UNABLE TO LOAD DATA FROM RESOURCE {resource_url}, details: {e}')
return
place_data = response.json()
place, created = Place.objects.get_or_create(
title=place_data['title'],
defaults={
'short_title': place_data['title'],
'short_description': place_data['description_short'],
'long_description': place_data['description_long'],
'lng': place_data['coordinates']['lng'],
'lat': place_data['coordinates']['lat'],
'place_id': place_data['title'],
}
)
if created:
for i, img_url in enumerate(place_data['imgs'], start=1):
try:
img_response = requests.get(img_url)
img_response.raise_for_status()
except requests.exceptions.HTTPError as e:
logger.error(f'UNABLE TO SAVE IMAGE FROM FROM RESOURCE {img_url}, details: {e}')
continue
img = BytesIO(img_response.content)
place_image, img_created = PlaceImage.objects.get_or_create(
place=place,
position=i
)
place_image.image.save(f'place-{place.id}-img-{i}', img, save=True)
action = 'CREATED' if created else 'UPDATED'
logger.info(f'{action} PLACE {place}')
logger.info(f'END LOADING DATA FROM RESOURCE {resource_url}')
| vitaliy-pavlenko/where_to_go | places/management/commands/load_place.py | load_place.py | py | 2,202 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.core.management.BaseCommand",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "requ... |
75226773628 | import pymongo
from data_handlers import import_from_csv
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
core_db = myclient["core"]
indices_col = core_db["indices"]
historical_data_col = core_db["historical_data"]
instruments_file = "C:\\Users\\Prathiksha\\Documents\\Prashanth\\Trading\\instruments_nse.csv"
market_cap_file = "C:\\Users\\Prathiksha\\Documents\\Prashanth\\Trading\\MCAP31032021_0.csv"
mis_data_file = "C:\\Users\\Prathiksha\\Documents\\Prashanth\\Trading\\Zerodha - Intraday margins - EQ- MIS_CO leverages.csv"
instruments_df = import_from_csv(instruments_file)
market_cap_df = import_from_csv(market_cap_file)
mis_df = import_from_csv(mis_data_file)
print(mis_df.columns)
def insert_to_db(df, db_col):
indices = df.columns
for ind in df.index:
input_dict = {}
for index in indices:
input_dict[index] = str(df[index][ind])
db_col.insert_one(input_dict)
def update_to_db(df, db_col, indices, field):
for ind in df.index:
for i in range(0, len(indices)):
try:
db_col.update_one({"tradingsymbol":df[indices[i]][ind]},{"$set":{field:"true"}})
except Exception as e:
print(e)
# try:
# db_col.update_one({"tradingsymbol":df[indices[i]][ind]+"-BE"},{"$set":{"rank":df['Sr. No.'][ind], "market_cap":df['market_cap'][ind]}})
# except Exception as e:
# print(e)
update_to_db(mis_df, indices_col, ['Symbol'], "mis_status")
#insert_to_db(instruments_df, indices_col)
| prashanth470/trading | source/data/db_operations.py | db_operations.py | py | 1,622 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "data_handlers.import_from_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "data_handlers.import_from_csv",
"line_number": 13,
"usage_type": "call"
},
{
"ap... |
9489616666 | import healsparse as hs
import healpy as hp
import numpy as np
from optparse import OptionParser
def main():
usage = "%prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("--input_file",type="string",dest="infilename",help="Input file",default='/pool/cosmo01_data1/des/y6_sp_maps/mangle_maps/SPmaps/band_g/y6a2_g_o.4096_t.32768_AIRMASS.MAX_EQU.fits.fz')
parser.add_option("--nside_coverage",type="int",dest="nside_coverage",help="nside coverage value",default=32)
parser.add_option("--nside_out",type="int",dest="nside_out",help="nside of output file",default=4096)
parser.add_option("--nest",action="store_true",dest="isnest",help="Toggle NEST to True",default=False)
parser.add_option("--healpix",action="store_true",dest="ishealpix",help="Toggle healpix format to True",default=False)
parser.add_option("--mask",action="store_true",dest="applymask",help="Toggle mask application to True",default=False)
parser.add_option("--input_file_maskname",type="string",dest="maskname",help="Mask file",default='/pool/cosmo01_data1/des/y6_sp_maps/official_v1/outliers_analysis/bao_mask/jointmasks/Y6LSSBAO_V2_MASK_WITHDEPTH_up_to_22.5_jointmask_0.01percent_sb_mean_0.5max_val_neb_mean_gcs_bit64.fits.gz')
parser.add_option("--output_healsparse",action="store_true",dest="ouths",help="Toggle healsparse output",default=False)
parser.add_option("--output_dir",type="string",dest="outdir",help="Output directory",default='./')
parser.add_option("--output_file_mapname",type="string",dest="mapname",help="Output file map name",default='MAPNAME')
parser.add_option("--output_file_statname",type="string",dest="statname",help="Output file stat name",default='STATNAME')
parser.add_option("--output_file_bandname",type="string",dest="bandname",help="Output file band name",default=None)
(options, args) = parser.parse_args()
print('Selected NEST',options.isnest)
print('nside',options.nside_out)
#read a map
print('Reading map',options.infilename)
print('Selected input format as Healpix',options.ishealpix)
if options.applymask:
print('Applying mask',options.maskname)
if options.ishealpix:
inmap = hp.read_map(options.infilename, nest = options.isnest)
if options.applymask:
maskmap = hp.read_map(options.maskname,nest=False,partial=True)
mask = np.where(maskmap == hp.UNSEEN) #masked out regions assumed to be assigned UNSEEN
inmap[mask[0]] = hp.UNSEEN
else:
inmap = hs.HealSparseMap.read(options.infilename, options.nside_coverage)
#build string with file name
if options.isnest:
nestring = 'NEST'
else:
nestring = 'RING'
if options.bandname is None:
outfilename_noext = '_'.join(('y6',options.mapname,options.statname,str(options.nside_out),nestring))
else:
outfilename_noext = '_'.join(('y6',options.mapname,options.statname,options.bandname,str(options.nside_out),nestring))
if options.ouths:
extension = '.hs'
else:
extension = '.fits'
outfilename = options.outdir + outfilename_noext + extension
#write maps
print('Writing map',outfilename)
if options.ishealpix: #input is healpix
if options.ouths: #output in healsparse format
conv_map = hs.HealSparseMap(nside_coverage=options.nside_coverage, healpix_map=inmap)
conv_map.write(outfilename, clobber=True)
else: #output in healpix
if options.isnest:
order = 'NESTED'
else:
order = 'RING'
lores_map = hp.ud_grade(inmap, options.nside_out, order_in = order, order_out = order)
hp.write_map(outfilename, lores_map, nest = options.isnest, overwrite=True)
else: #input is healsparse
if options.ouths: #output in healsparse format
inmap.write(outfilename, clobber=True)
else: #output in healpix
conv_map = inmap.generate_healpix_map(nside=options.nside_out, reduction='mean', nest = options.isnest)
hp.write_map(outfilename, conv_map, nest = options.isnest, overwrite=True)
if __name__ == "__main__":
main()
| nsevilla/utilities | converthshp.py | converthshp.py | py | 4,218 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "optparse.OptionParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "healpy.read_map",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "healpy.read_map",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.where",
... |
18308321752 | from unittest import TestCase
from sqlite3 import IntegrityError
import os
from shutil import copytree, rmtree
from random import randint, random
import uuid
from tempfile import gettempdir
from shapely.geometry import Point
import shapely.wkb
from aequilibrae.project import Project
from ...data import siouxfalls_project
class TestNode(TestCase):
def setUp(self) -> None:
os.environ["PATH"] = os.path.join(gettempdir(), "temp_data") + ";" + os.environ["PATH"]
self.proj_dir = os.path.join(gettempdir(), uuid.uuid4().hex)
copytree(siouxfalls_project, self.proj_dir)
self.project = Project()
self.project.open(self.proj_dir)
self.network = self.project.network
self.curr = self.project.conn.cursor()
def tearDown(self) -> None:
self.curr.close()
self.project.close()
try:
rmtree(self.proj_dir)
except Exception as e:
print(f"Failed to remove at {e.args}")
def test_save_and_assignment(self):
nodes = self.network.nodes
nd = randint(1, 24)
node = nodes.get(nd)
with self.assertRaises(AttributeError):
node.modes = "abc"
with self.assertRaises(AttributeError):
node.link_types = "default"
with self.assertRaises(AttributeError):
node.node_id = 2
with self.assertRaises(ValueError):
node.is_centroid = 2
node.is_centroid = 0
self.assertEqual(0, node.is_centroid, "Assignment of is_centroid did not work")
x = node.geometry.x + random()
y = node.geometry.y + random()
node.geometry = Point([x, y])
node.save()
self.curr.execute("Select is_centroid, asBinary(geometry) from nodes where node_id=?;", [nd])
flag, wkb = self.curr.fetchone()
self.assertEqual(flag, 0, "Saving of is_centroid failed")
geo = shapely.wkb.loads(wkb)
self.assertEqual(geo.x, x, "Geometry X saved wrong")
self.assertEqual(geo.y, y, "Geometry Y saved wrong")
self.curr.execute("Select asBinary(geometry) from links where a_node=?;", [nd])
wkb = self.curr.fetchone()[0]
geo2 = shapely.wkb.loads(wkb)
self.assertEqual(geo2.xy[0][0], x, "Saving node geometry broke underlying network")
self.assertEqual(geo2.xy[1][0], y, "Saving node geometry broke underlying network")
def test_data_fields(self):
nodes = self.network.nodes
node1 = nodes.get(randint(1, 24))
node2 = nodes.get(randint(1, 24))
self.assertEqual(node1.data_fields(), node2.data_fields(), "Different nodes have different data fields")
fields = sorted(node1.data_fields())
self.curr.execute("pragma table_info(nodes)")
dt = self.curr.fetchall()
actual_fields = sorted([x[1] for x in dt if x[1] != "ogc_fid"])
self.assertEqual(fields, actual_fields, "Node has unexpected set of fields")
def test_renumber(self):
nodes = self.network.nodes
node = nodes.get(randint(2, 24))
x = node.geometry.x
y = node.geometry.y
with self.assertRaises(IntegrityError):
node.renumber(1)
num = randint(25, 2000)
node.renumber(num)
self.curr.execute("Select asBinary(geometry) from nodes where node_id=?;", [num])
wkb = self.curr.fetchone()[0]
geo = shapely.wkb.loads(wkb)
self.assertEqual(geo.x, x, "Renumbering failed")
self.assertEqual(geo.y, y, "Renumbering failed")
| AequilibraE/aequilibrae | tests/aequilibrae/project/test_node.py | test_node.py | py | 3,560 | python | en | code | 140 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_nu... |
75331159226 | import ops
import iopc
TARBALL_FILE="drbd-utils-8.9.10.tar.gz"
TARBALL_DIR="drbd-utils-8.9.10"
INSTALL_DIR="drbd-utils-bin"
pkg_path = ""
output_dir = ""
tarball_pkg = ""
tarball_dir = ""
install_dir = ""
install_tmp_dir = ""
cc_host = ""
def set_global(args):
global pkg_path
global output_dir
global tarball_pkg
global install_dir
global install_tmp_dir
global tarball_dir
global cc_host
pkg_path = args["pkg_path"]
output_dir = args["output_path"]
tarball_pkg = ops.path_join(pkg_path, TARBALL_FILE)
install_dir = ops.path_join(output_dir, INSTALL_DIR)
install_tmp_dir = ops.path_join(output_dir, INSTALL_DIR + "-tmp")
tarball_dir = ops.path_join(output_dir, TARBALL_DIR)
cc_host_str = ops.getEnv("CROSS_COMPILE")
cc_host = cc_host_str[:len(cc_host_str) - 1]
def MAIN_ENV(args):
set_global(args)
ops.exportEnv(ops.setEnv("CC", ops.getEnv("CROSS_COMPILE") + "gcc"))
ops.exportEnv(ops.setEnv("CXX", ops.getEnv("CROSS_COMPILE") + "g++"))
ops.exportEnv(ops.setEnv("CROSS", ops.getEnv("CROSS_COMPILE")))
ops.exportEnv(ops.setEnv("DESTDIR", install_tmp_dir))
return False
def MAIN_EXTRACT(args):
set_global(args)
ops.unTarGz(tarball_pkg, output_dir)
#ops.copyto(ops.path_join(pkg_path, "finit.conf"), output_dir)
return True
def MAIN_PATCH(args, patch_group_name):
set_global(args)
for patch in iopc.get_patch_list(pkg_path, patch_group_name):
if iopc.apply_patch(tarball_dir, patch):
continue
else:
sys.exit(1)
return True
def MAIN_CONFIGURE(args):
set_global(args)
extra_conf = []
extra_conf.append("--with-distro=generic")
extra_conf.append("--without-manual")
extra_conf.append("--without-udev")
extra_conf.append("--without-83support")
extra_conf.append("--with-initscripttype=sysv")
#extra_conf.append("--prefix=" + install_dir)
extra_conf.append("--host=" + cc_host)
iopc.configure(tarball_dir, extra_conf)
return True
def MAIN_BUILD(args):
set_global(args)
ops.mkdir(install_dir)
ops.mkdir(install_tmp_dir)
iopc.make(tarball_dir)
iopc.make_install(tarball_dir)
return False
def MAIN_INSTALL(args):
set_global(args)
ops.mkdir(install_dir)
ops.copyto(ops.path_join(install_tmp_dir, "lib"), install_dir)
ops.mkdir(ops.path_join(install_dir, "sbin"))
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/sbin/drbdadm"), ops.path_join(install_dir, "/sbin"))
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/sbin/drbdsetup"), ops.path_join(install_dir, "/sbin"))
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/sbin/drbdmeta"), ops.path_join(install_dir, "/sbin"))
ops.mkdir(ops.path_join(install_dir, "usr/"))
ops.rm_file(ops.path_join(install_dir, "var"))
ops.ln(install_dir, "/var", ops.path_join(install_dir, "var"))
iopc.installBin(args["pkg_name"], ops.path_join(install_dir, "lib/."), "lib")
iopc.installBin(args["pkg_name"], ops.path_join(install_dir, "sbin/."), "sbin")
iopc.installBin(args["pkg_name"], ops.path_join(install_dir, "var"), "usr/local")
#iopc.installBin(args["pkg_name"], ops.path_join(output_dir, "finit.conf"), "etc")
return False
def MAIN_SDKENV(args):
set_global(args)
return False
def MAIN_CLEAN_BUILD(args):
set_global(args)
return False
def MAIN(args):
set_global(args)
| YuanYuLin/drbd-utils | Package/CONFIG.py | CONFIG.py | py | 3,434 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ops.path_join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "ops.path_join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "ops.path_join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "ops.path_join",
"line_n... |
72128887548 | from OpenGL.GL import *
from common import get_namekey
import numpy as np
import pyglet
# #---badway
# vaoidx = VAO( {0:3,1:2},
# #np.array([0,0,0, 0,0, 0.5,0,0, 1,0, 0.5,0.5,0, 1,1, 0,0.5,0, 0,1, ]).astype('float32'),
# #np.array([0,0,0, 0,0, 1,0,0, 1,0, 1,1,0, 1,1, 0,1,0, 0,1, ]).astype('float32'),
# np.array([ [0,0,0, 0,0], [1,0,0, 1,0], [1,1,0, 1,1], [0,1,0, 0,1] ]).astype('float32'),
# np.array([0,1,2,0,2,3,]).astype('uint')
# )
#hard to parse. we take thisway.
# vaoidx = VAO(
# {
# 'position' : [ 0,0,0, 1,0,0, 1,1,0, 0,1,0,],
# 'uv' : [ 0,0, 1,0, 1,1, 0,1 ],
# },
# indices = [0,1,2,0,2,3,]
# )#name
#hard to parse.
# vaoidx = VAO(
# position= [ 0,0,0, 1,0,0, 1,1,0, 0,1,0,],
# uv = [ 0,0, 1,0, 1,1, 0,1 ],
# indices = [0,1,2,0,2,3,]
# )#name
vao_attrs={
'position' : np.array([ 0,0,0, 1,0,0, 1,1,0, 0,1,0,]).astype('float32'),
'uv' : np.array([ 0,0, 1,0, 1,1, 0,1 ]).astype('float32'),
}
vao_indices = np.array([0,1,2,0,2,3,]).astype('uint')
# for i,nparr in vao_attrs.items():
# a = len(nparr)
# print(a)
class VAO:
"""indexed actually. hope we not use vao_notindexed."""
last = -1
#---namedict ver 0.2
namedict = {}
@classmethod
def get(cls, name):
if not 'default' in cls.namedict:
cls.default()
return cls.namedict.get(name)
@classmethod
def set(cls, name: str, item) -> str:
name = get_namekey(cls.namedict,name)
cls.namedict[name]=item
return name
@classmethod
def default(cls):
cls(vao_attrs,vao_indices,name='default')
def __repr__(self):
return f"vao name:{self.name}"
def __init__(self, attrdict,indices, name='vao'):
"""we need attrdict{'position':ndarr(f32)} !"""
#attrlist=[]
#for attrname, nparr in attrdict.items():
# attrlist.append(nparr)
#vertices = np.concatenate( attrlist ).astype('float32')
assert list(attrdict.keys())[0]=='position'
vert_count = len(attrdict['position'])//3
#attr_size_dict = {0:3,1:2}
attr_size_dict = {}
attridx = -1
for attrname, nparr in attrdict.items():
attridx+=1
attrN = len(nparr)//vert_count
attr_size_dict[attridx] = attrN
stride = sum(attr_size_dict.values())#5, of 3+2
vertices = []
for idx, nparr in enumerate(attrdict.values()):
vertlen = attr_size_dict[idx]
verted = nparr.reshape(-1,vertlen)
vertices.append(verted)
vertices = np.hstack(vertices).flatten().astype('float32')
#---old way
#vertices = np.array([ [0,0,0, 0,0], [1,0,0, 1,0], [1,1,0, 1,1], [0,1,0, 0,1] ]).astype('float32')
#indices = np.array([0,1,2,0,2,3,]).astype('uint')
#attr_size_dict = {0:3,1:2}
#stride = sum(attr_size_dict.values())#5, of 3+2
#pos 12, indices,,
#vert_count = len(indices)
datatype = GL_FLOAT
normalized = GL_FALSE #GL_TRUE
fsize = np.float32(0.0).nbytes #to ensure namespace-safe.
VAO = glGenVertexArrays(1) # create a VA. if 3, 3of VA got. #errs if no window.
VBO = glGenBuffers(1) #it's buffer, for data of vao.fine.
EBO = glGenBuffers(1) #indexed, so EBO also. yeah.
glBindVertexArray(VAO) #gpu bind VAO
glBindBuffer(GL_ARRAY_BUFFER, VBO) #gpu bind VBO in VAO
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)
pre_offset = 0
for attr_index, size in attr_size_dict.items():
if pre_offset==0:
offset = None
offset = ctypes.c_void_p(0)#maybe it seems works.
glVertexAttribPointer(attr_index, size, datatype, normalized, stride * fsize, offset)
glEnableVertexAttribArray(attr_index)
pre_offset = size
else:
offset = ctypes.c_void_p( pre_offset *fsize)
glVertexAttribPointer(attr_index, size, datatype, normalized, stride * fsize, offset)
glEnableVertexAttribArray(attr_index)
pre_offset +=size
self.ID = VAO
self.ID_VBO = VBO
self.ID_EBO = EBO
self.stride = stride
self.points = len(indices)
self.vertices = vertices
self.name = self.__class__.set(name,self)
def update_position(self,position):
vertices = self.vertices
#assume 0,1,2 is posxyz.
vertices[0::self.stride] = position[0::3]
vertices[1::self.stride] = position[1::3]
vertices[2::self.stride] = position[2::3]
VAO = self.ID
VBO = self.ID_VBO
glBindVertexArray(VAO)
glBindBuffer(GL_ARRAY_BUFFER, VBO) #gpu bind VBO in VAO
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
#GL_STREAM_DRAW for little change, if you want someday..
self.vertices = vertices
def update(self,vertices):
"""requires same shape kinds.."""
VAO = self.ID
VBO = self.ID_VBO
glBindVertexArray(VAO)
glBindBuffer(GL_ARRAY_BUFFER, VBO) #gpu bind VBO in VAO
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
#GL_STREAM_DRAW for little change, if you want someday..
#self.points = len(vertices)//self.stride #donno why do this..
def update_indices(self,vertices, indices):
"""hope we not use this.."""
VAO = self.ID
VBO = self.ID_VBO
EBO = self.ID_EBO
glBindVertexArray(VAO)
glBindBuffer(GL_ARRAY_BUFFER, VBO) #gpu bind VBO in VAO
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)
self.points = len(indices)#now we can really change it..
def bind(self):
if VAO.last != self.ID:
glBindVertexArray(self.ID)
VAO.last = self.ID
def unbind(self):
glBindVertexArray(0)
VAO.last = -1
def draw(self, MODE = 'triangles'):
"""requires bind first. it just draw command of VAO bound gpu."""
#simple mode changeable draw. we not prefer partial draw which is slow.
draw_dict = {'points':GL_POINTS,
'lines':GL_LINE_STRIP,
'triangles':GL_TRIANGLES,
}
MODE = draw_dict[MODE]
glDrawElements(MODE, self.points, GL_UNSIGNED_INT, None)
if __name__ == "__main__":
window = pyglet.window.Window()
VAO.default()
a = VAO.get('default')
print(a) | liltmagicbox/3dkatsu | objects/vao_123123.py | vao_123123.py | py | 6,982 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "common.get_namekey",
"line_nu... |
71345484349 | import logging
from typing import List
from DAL.ItemDAL.ItemDALInterface import ItemDALInterface
from Database.DBConnection import DBConnection
from Entities.Item import Item
class ItemDALImplementation(ItemDALInterface):
def create_item(self, item: Item) -> Item:
logging.info("Beginning DAL method create item with item: " + str(item))
sql = "INSERT INTO Designs.Item (item_name) VALUES (%s) RETURNING item_id;"
connection = DBConnection.db_connection()
cursor = connection.cursor()
cursor.execute(sql, (item.item_name,))
item.item_id = cursor.fetchone()[0]
cursor.close()
connection.commit()
connection.close()
logging.info("Finishing DAL method create item with item: " + str(item))
return item
def get_item(self, item_id: int) -> Item:
logging.info("Beginning DAL method get item with item ID: " + str(item_id))
sql = "SELECT * FROM Designs.Item WHERE item_id=%s;"
connection = DBConnection.db_connection()
cursor = connection.cursor()
cursor.execute(sql, (item_id,))
item_info = cursor.fetchone()
cursor.close()
connection.close()
if item_info is None:
item = Item(0, "")
logging.info("Finishing DAL method get item, item not found")
return item
else:
item = Item(*item_info)
logging.info("Finishing DAL method get item with item: " + str(item.convert_to_dictionary()))
return item
def get_all_items(self) -> List[Item]:
logging.info("Beginning DAL method get all items")
sql = "SELECT * FROM Designs.Item;"
connection = DBConnection.db_connection()
cursor = connection.cursor()
cursor.execute(sql)
item_records = cursor.fetchall()
cursor.close()
connection.close()
item_list = []
for item in item_records:
item = Item(*item)
item_list.append(item)
logging.info("Finishing DAL method get all items with items: " + str(item.convert_to_dictionary()))
return item_list
def update_item(self, item: Item) -> bool:
logging.info("Beginning DAL method update item with item: " + str(item.convert_to_dictionary()))
sql = "Update Designs.Item SET item_name=%s WHERE item_id=%s;"
connection = DBConnection.db_connection()
cursor = connection.cursor()
cursor.execute(sql, (item.item_name, item.item_id))
cursor.close()
connection.commit()
connection.close()
logging.info("Finishing DAL method update item")
return True
def delete_item(self, item_id: int) -> bool:
logging.info("Beginning DAL method delete item with item ID: " + str(item_id))
sql = "DELETE FROM Designs.Item WHERE item_id=%s;"
connection = DBConnection.db_connection()
cursor = connection.cursor()
cursor.execute(sql, (item_id,))
cursor.close()
connection.commit()
connection.close()
logging.info("Finishing DAL method delete item")
return True
| dmerc12/143Designs | back-end/DAL/ItemDAL/ItemDALImplementation.py | ItemDALImplementation.py | py | 3,158 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "DAL.ItemDAL.ItemDALInterface.ItemDALInterface",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "Entities.Item.Item",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 12,
"usage_type": "call"
},
{
"api_... |
4086684487 | import warnings
from functools import partial
from multiprocessing import Pool
import pandas as pd
import textacy
import textacy.preprocessing
import textacy.representations
import textacy.tm
from tqdm import tqdm
tqdm.pandas()
warnings.simplefilter(action="ignore", category=FutureWarning)
preproc = textacy.preprocessing.make_pipeline(
textacy.preprocessing.normalize.unicode,
textacy.preprocessing.normalize.bullet_points,
textacy.preprocessing.normalize.quotation_marks,
textacy.preprocessing.normalize.whitespace,
textacy.preprocessing.normalize.hyphenated_words,
textacy.preprocessing.remove.brackets,
textacy.preprocessing.replace.currency_symbols,
textacy.preprocessing.remove.html_tags,
)
def from_dict_to_frame(indexed_dict):
data = {k: [v] for k, v in indexed_dict.items()}
df = pd.DataFrame.from_dict(data).T
df.columns = ["text"]
df = df.explode("text")
return df
def extract_terms_df(
data,
text_var,
index_var,
ngs=True,
ents=True,
ncs=False,
sample_size=100000,
drop_emoji=True,
ngrams=(2, 2),
remove_punctuation=True,
include_pos=["NOUN"],
include_types=["PERSON", "ORG"],
language="en_core_web_sm",
):
load_lang = textacy.load_spacy_lang(language, disable=())
def extract_terms(
tuple, # (index, text)
ngs=True,
ents=True,
ncs=False,
ngrams=(2, 2),
drop_emoji=True,
remove_punctuation=False,
include_pos=["NOUN", "PROPN", "ADJ"],
include_types=["PERSON", "ORG"],
):
index = tuple[0]
text = tuple[1]
prepro_text = preproc(str(text))
if drop_emoji == True:
prepro_text = textacy.preprocessing.replace.emojis(prepro_text, repl="")
if remove_punctuation == True:
prepro_text = textacy.preprocessing.remove.punctuation(prepro_text)
doc = textacy.make_spacy_doc(prepro_text, lang=load_lang)
terms = []
if ngs:
ngrams_terms = list(
textacy.extract.terms(
doc,
ngs=partial(
textacy.extract.ngrams,
n=ngrams,
filter_punct=True,
filter_stops=True,
include_pos=include_pos,
),
dedupe=False,
)
)
terms.append(ngrams_terms)
if ents:
ents_terms = list(
textacy.extract.terms(
doc,
ents=partial(textacy.extract.entities, include_types=include_types),
dedupe=False,
)
)
terms.append(ents_terms)
if ncs:
ncs_terms = list(
textacy.extract.terms(
doc,
ncs=partial(textacy.extract.noun_chunks, drop_determiners=True),
dedupe=False,
)
)
noun_chunks = [x for x in ncs_terms if len(x) >= 3]
terms.append(noun_chunks)
final = [item for sublist in terms for item in sublist]
final = list(set(final))
df = [
(term.text, term.lemma_.lower(), term.label_, term.__len__())
for term in final
]
df = pd.DataFrame(df, columns=["text", "lemma", "ent", "ngrams"])
df["text_index"] = index
return df
"""
This function extracts terms from a column in a DataFrame. It can extract in a multiprocessing way
It outputs a dataframe with the list of terms and a table with the indexed terms
"""
data = data[data[text_var].notna()]
data = data.sample(min(sample_size, len(data)))
sentences = data[text_var].to_list()
indexes = data[index_var].to_list()
inputs = [(x, y) for x, y in zip(indexes, sentences)]
# else:
res = list(
tqdm(
map(
partial(
extract_terms,
ngs=ngs,
ents=ents,
ncs=ncs,
drop_emoji=drop_emoji,
remove_punctuation=remove_punctuation,
ngrams=ngrams,
include_pos=include_pos,
include_types=include_types,
),
inputs,
),
total=len(inputs),
)
)
final_res = pd.concat([x for x in res])
terms = (
final_res.groupby(["text", "lemma", "ent", "ngrams"])
.agg(count_terms=("text_index", "count"))
.reset_index()
)
# duplicates to get rid of
terms = terms.sort_values(["text", "ent"]).reset_index(drop=True)
terms = terms.drop_duplicates(["text"], keep="first")
terms = terms.sort_values("count_terms", ascending=False)
terms = terms.rename(columns={"text": "terms_indexed"})
terms = terms.set_index("terms_indexed")
terms_indexed = final_res[["text", "text_index"]].drop_duplicates()
terms_indexed = terms_indexed.rename(columns={"text_index": index_var})
terms_indexed = terms_indexed.groupby(index_var)["text"].apply(list)
terms_indexed = terms_indexed.reset_index()
terms_indexed = terms_indexed.rename(columns={"text": "terms_indexed"})
terms_indexed = terms_indexed.set_index(index_var)
return terms, terms_indexed
| charlesdedampierre/BunkaTopics | bunkatopics/functions/extract_terms.py | extract_terms.py | py | 5,458 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "tqdm.tqdm.pandas",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "warnings.simplefilter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "textacy.preprocessin... |
30358149781 | import contextlib
import datetime
import unittest
from traits.api import Date, HasTraits, List
from traitsui.api import DateEditor, View, Item
from traitsui.editors.date_editor import CellFormat
from traitsui.tests._tools import (
BaseTestMixin,
create_ui,
requires_toolkit,
reraise_exceptions,
ToolkitName,
)
class Foo(HasTraits):
dates = List(Date)
single_date = Date()
def single_select_custom_view():
view = View(
Item(
name="single_date",
style="custom",
editor=DateEditor(multi_select=False),
)
)
return view
def multi_select_custom_view():
view = View(
Item(
name="dates", style="custom", editor=DateEditor(multi_select=True)
)
)
return view
def multi_select_selected_color_view():
view = View(
Item(
name="dates",
style="custom",
editor=DateEditor(
multi_select=True,
selected_style=CellFormat(bold=True, bgcolor=(128, 10, 0)),
),
)
)
return view
@requires_toolkit([ToolkitName.qt])
class TestDateEditorCustomQt(BaseTestMixin, unittest.TestCase):
def setUp(self):
BaseTestMixin.setUp(self)
def tearDown(self):
BaseTestMixin.tearDown(self)
def test_single_select_qt(self):
with self.launch_editor(single_select_custom_view) as (foo, editor):
date = datetime.date(2018, 2, 3)
self.click_date_on_editor(editor, date)
self.assertEqual(foo.single_date, date)
def test_multi_select_dates_on_editor(self):
with self.launch_editor(multi_select_custom_view) as (foo, editor):
dates = [datetime.date(2018, 2, 3), datetime.date(2018, 2, 1)]
for date in dates:
self.click_date_on_editor(editor, date)
for date in dates:
self.check_select_status(
editor=editor, date=date, selected=True
)
self.assertEqual(foo.dates, sorted(dates))
def test_multi_select_qt_styles_reset(self):
with self.launch_editor(multi_select_custom_view) as (foo, editor):
date = datetime.date(2018, 2, 1)
self.click_date_on_editor(editor, date)
self.check_select_status(editor=editor, date=date, selected=True)
self.click_date_on_editor(editor, date)
self.check_select_status(editor=editor, date=date, selected=False)
def test_multi_select_qt_set_model_dates(self):
# Test setting the dates from the model object.
with self.launch_editor(multi_select_custom_view) as (foo, editor):
foo.dates = [datetime.date(2010, 1, 2), datetime.date(2010, 2, 1)]
for date in foo.dates:
self.check_select_status(
editor=editor, date=date, selected=True
)
def test_custom_selected_color(self):
view_factory = multi_select_selected_color_view
with self.launch_editor(view_factory) as (foo, editor):
date = datetime.date(2011, 3, 4)
foo.dates = [date]
self.check_date_bgcolor(editor, date, (128, 10, 0))
# --------------------
# Helper methods
# --------------------
@contextlib.contextmanager
def launch_editor(self, view_factory):
foo = Foo()
with create_ui(foo, dict(view=view_factory())) as ui:
(editor,) = ui._editors
yield foo, editor
def check_select_status(self, editor, date, selected):
from pyface.qt import QtCore, QtGui
qdate = QtCore.QDate(date.year, date.month, date.day)
textformat = editor.control.dateTextFormat(qdate)
if selected:
self.assertEqual(
textformat.fontWeight(),
QtGui.QFont.Weight.Bold,
"{!r} is not selected.".format(date),
)
self.check_date_bgcolor(editor, date, (0, 128, 0))
else:
self.assertEqual(
textformat.fontWeight(),
QtGui.QFont.Weight.Normal,
"{!r} is not unselected.".format(date),
)
self.assertEqual(
textformat.background().style(),
QtCore.Qt.BrushStyle.NoBrush,
"Expected brush to have been reset.",
)
self.check_date_bgcolor(editor, date, (0, 0, 0))
def click_date_on_editor(self, editor, date):
from pyface.qt import QtCore
# QCalendarWidget.setSelectedDate modifies internal state
# instead of triggering the click signal.
# So we call update_object directly
editor.update_object(QtCore.QDate(date.year, date.month, date.day))
def check_date_bgcolor(self, editor, date, expected):
from pyface.qt import QtCore
qdate = QtCore.QDate(date.year, date.month, date.day)
textformat = editor.control.dateTextFormat(qdate)
color = textformat.background().color()
actual = (color.red(), color.green(), color.blue())
self.assertEqual(
actual,
expected,
"Expected color: {!r}. Got color: {!r}".format(expected, actual),
)
# Run this test case against wx too once enthought/traitsui#752 is fixed.
@requires_toolkit([ToolkitName.qt])
class TestDateEditorInitDispose(unittest.TestCase):
"""Test the init and dispose of date editor."""
def check_init_and_dispose(self, view):
with reraise_exceptions(), create_ui(Foo(), dict(view=view)):
pass
def test_simple_date_editor(self):
view = View(
Item(
name="single_date",
style="simple",
)
)
self.check_init_and_dispose(view)
def test_custom_date_editor(self):
view = View(
Item(
name="single_date",
style="custom",
)
)
self.check_init_and_dispose(view)
| enthought/traitsui | traitsui/tests/editors/test_date_editor.py | test_date_editor.py | py | 6,064 | python | en | code | 290 | github-code | 6 | [
{
"api_name": "traits.api.HasTraits",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "traits.api.List",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "traits.api.Date",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "traits.api.D... |
12485496681 | # Create your views here.
id = int()
iditemcourant = int()
#-*- coding: utf-8 -*-
from django.http import HttpResponse
from django.shortcuts import render , redirect
from utilisateur.forms import *
from utilisateur.models import *
#import time
from django.core.urlresolvers import reverse
def utilisateur(request):
if request.method == 'POST':
form = formuser(request.POST)
if form.is_valid():
global id
id = int(form.cleaned_data['idUtilisateur'])
listeid = list()
for i in utilisateurtable.objects.all():
listeid.append(i.id)
if id in listeid:
return redirect("index")#ici lien vers acceuil du site
else:
return redirect("authentification")#ici lien vers acceuil du site
else:
form = formuser()
#return render(request, 'index.html', locals())
return render(request, 'index2.html', locals())
def index(request):
return render(request, 'index.html', {})
def genereid(request):
#iduser = str(time.time())
#list = iduser.split('.')
#iduser = "".join(list)
#iduser = 'votre nouveau id est :' + iduser
connecte = utilisateurtable.objects.create()
connecte.id = str(connecte.id)
return HttpResponse(connecte.id)
def listitemmath(request):
return render(request, 'listitemmath.html', locals())
#################################################################
def equation_du_premier_degre(request):
a = pedagogic_item.objects.get(id = iditemcourant)
exo = reverse(a.link_to_exo)
return render(request, 'eq1.html', locals())
def equation_du_second(request):
a = pedagogic_item.objects.get(id = iditemcourant)
exo = reverse(a.link_to_exo)
return render(request, 'eq2.html', locals())
def derivee(request):
a = pedagogic_item.objects.get(id = iditemcourant)
exo = reverse(a.link_to_exo)
return render(request, 'derivee.html', locals())
def primitive(request):
a = pedagogic_item.objects.get(id = iditemcourant)
exo = reverse(a.link_to_exo)
return render(request, 'primitive.html', locals())
def equation_differentiel(request):
a = pedagogic_item.objects.get(id = iditemcourant)
exo = reverse(a.link_to_exo)
return render(request, 'eqdiff.html', locals())
def equation_du_premier_degreexo(request ):
if request.method == 'POST':
form = formexo(request.POST)
if form.is_valid():
oui = form.cleaned_data['oui']
non = form.cleaned_data['non']
reussite = bool()
if oui is True and non is False:
reussite = True
else:
reussite = False
if reussite:#il a reussi lexercice ce qui veut dire deja qu'il a fini un item
#chargeons les informations de la base utilisateur
utilisateur = utilisateurtable.objects.get(id = id)
#decalage on a 4 items car 4 neuds constituent un chemin de 3arretes a retribuer
utilisateur.id_ante_ante_penultimate_item = utilisateur.id_ante_penultimate_item
utilisateur.id_ante_penultimate_item = utilisateur.id_penultimate_item
utilisateur.id_penultimate_item = utilisateur.id_last_item
utilisateur.id_last_item = iditemcourant
utilisateur.save()
#verifions que utilisateur.id_penultimate_item n'est pas nul
if utilisateur.id_penultimate_item != None :
last_link = link_to_next_possible_pi.objects.get(iditemfk=utilisateur.id_penultimate_item, iditemnextfk=utilisateur.id_last_item)
if last_link.d_a_pos_ph != None:
var = float(last_link.d_a_pos_ph)
var += 3
last_link.d_a_pos_ph = str(var)
last_link.save()
else:
var = 3.0
last_link.d_a_pos_ph = str(var)
last_link.save()
if utilisateur.id_ante_penultimate_item != None :
penultimate_link = link_to_next_possible_pi.objects.get(iditemfk=utilisateur.id_ante_penultimate_item, iditemnextfk=utilisateur.id_penultimate_item)
if penultimate_link.d_a_pos_ph != None:
var = float(penultimate_link.d_a_pos_ph)
var += 2
penultimate_link.d_a_pos_ph = str(var)
penultimate_link.save()
else:
var = 2.0
penultimate_link.d_a_pos_ph = str(var)
penultimate_link.save()
if utilisateur.id_ante_ante_penultimate_item != None and utilisateur.id_penultimate_item != None :
ante_penultimate_link = link_to_next_possible_pi.objects.get(iditemfk=utilisateur.id_ante_ante_penultimate_item, iditemnextfk=utilisateur.id_ante_penultimate_item)
if ante_penultimate_link.d_a_pos_ph != None:
var = float(ante_penultimate_link.d_a_pos_ph)
var += 1
ante_penultimate_link.d_a_pos_ph = str(var)
ante_penultimate_link.save()
else:
var = 1.0
ante_penultimate_link.d_a_pos_ph = str(var)
ante_penultimate_link.save()
return redirect("proposelien")
else: #c'est a dire il na valider le test
#chargeons les informations de la base utilisateur
utilisateur = utilisateurtable.objects.get(id = id)
#decalage on a 4 items car 4 neuds constituent un chemin de 3arretes a retribuer
utilisateur.id_ante_ante_penultimate_item = utilisateur.id_ante_penultimate_item
utilisateur.id_ante_penultimate_item = utilisateur.id_penultimate_item
utilisateur.id_penultimate_item = utilisateur.id_last_item
utilisateur.id_last_item = iditemcourant
utilisateur.save()
#verifions que utilisateur.id_penultimate_item n'est pas nul
if utilisateur.id_penultimate_item != None :
last_link = link_to_next_possible_pi.objects.get(iditemfk=utilisateur.id_penultimate_item, iditemnextfk=utilisateur.id_last_item)
if last_link.d_a_neg_ph != None:
var = float(last_link.d_a_neg_ph)
var -= 3
last_link.d_a_neg_ph = str(var)
last_link.save()
else:
var = -3.0
last_link.d_a_neg_ph = str(var)
last_link.save()
if utilisateur.id_ante_penultimate_item != None :
penultimate_link = link_to_next_possible_pi.objects.get(iditemfk=utilisateur.id_ante_penultimate_item, iditemnextfk=utilisateur.id_penultimate_item)
if penultimate_link.d_a_neg_ph != None:
var = float(penultimate_link.d_a_neg_ph)
var -= 2
penultimate_link.d_a_neg_ph = str(var)
penultimate_link.save()
else:
var = -2.0
penultimate_link.d_a_neg_ph = str(var)
penultimate_link.save()
if utilisateur.id_ante_ante_penultimate_item != None and utilisateur.id_penultimate_item != None :
ante_penultimate_link = link_to_next_possible_pi.objects.get(iditemfk=utilisateur.id_ante_ante_penultimate_item, iditemnextfk=utilisateur.id_ante_penultimate_item)
if ante_penultimate_link.d_a_neg_ph != None:
var = float(ante_penultimate_link.d_a_neg_ph)
var -= 1
ante_penultimate_link.d_a_neg_ph = str(var)
ante_penultimate_link.save()
else:
var = -1.0
ante_penultimate_link.d_a_neg_ph = str(var)
ante_penultimate_link.save()
return redirect("proposelien")
else:
form = formexo()
return render(request, 'exoeq1.html', locals())
def viewitem(request , iditem):
#il vient de clicquer sur un item independamment ou pas on sait pas mais on va regarder sil existe deja un chemin entre le dernier item parcouru et l'item courant
#gestion template
a = pedagogic_item.objects.get(id = iditem)
lien = reverse(a.link_to_courses)
global iditemcourant
iditemcourant = iditem#donc voila le nouveau item
#verifions si un chemin entre le dernier lien et le nouveau lien
utilisateur = utilisateurtable.objects.get(id = id)
#pour pouvoir avoir un chemin il faut qu'il y ait un noeud precedent au moins
if utilisateur.id_last_item != None :#on est sur notre premier item
precedent = utilisateur.id_last_item
suivant = iditemcourant
#si le chemin n'existe pas elle sera creer
i , boole = link_to_next_possible_pi.objects.get_or_create(iditemfk=precedent, iditemnextfk= suivant)
#return HttpResponse(boole)#reussite ,
listepossiblite = link_to_next_possible_pi.objects.filter(iditemfk = precedent)#si
for chemin in listepossiblite:
if chemin.d_a_pos_ph != None:#on ne dimunue pas
var = float(chemin.d_a_pos_ph)
var -= 1
chemin.d_a_pos_ph = str(var)
chemin.save()
else:
chemin.d_a_pos_ph = '-1'
chemin.save()
return render(request, 'viewitem.html', locals())
def proposelien(request):
#algorithme de trie des liens
utilisateur = utilisateurtable.objects.get(id = id)
liste = link_to_next_possible_pi.objects.filter(iditemfk = utilisateur.id_last_item)#si
nombrelien = len(liste)
if nombrelien == 1:
lienlist = pedagogic_item.objects.filter(id = liste[0].iditemnextfk)
nom_cour = lienlist[0].link_to_courses
lien = reverse(nom_cour)
return render(request, 'proposelien.html', locals())
| 3SCS/hackaton-130813 | hommilliere/utilisateur/views.py | views.py | py | 8,734 | python | fr | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.redirect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 29,
"usage_type": "call"
},
{
"api_n... |
13130316582 | from django.conf.urls import url, include
from django.contrib.auth.decorators import login_required
from frontend.views import SPAView, UserConfigSPAWebService
# spa view
VIEWS_PATTERNS = [
url(regex=r'$',
view=login_required(SPAView.as_view()),
name='spa'),
]
# config endpoint
API_PATTERNS = [
url(regex=r'user-config/$',
view=login_required(UserConfigSPAWebService.as_view()),
name='user-config'),
]
urlpatterns = [
url(r'^api/', include(API_PATTERNS)),
url(r'^', include(VIEWS_PATTERNS)),
] | victordelval/spa-design-basics | frontend/urls.py | urls.py | py | 549 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "frontend.views.SPAView.as_view",
"line_number": 10,
"usage_type": "call... |
32341699285 | import json
import numpy as np
import pandas as pd
import tensorflow as tf
class BalancedGroupSoftmax:
def __init__(self,
dataset_json,
category_map,
empty_class_id,
selected_locations=None,
n_groups=4,
sl_max_groups=[0, 10, 100, 1000, 2**100],
beta_others=8.0):
self.n_groups = n_groups
self.sl_max_groups = sl_max_groups
self.beta_others = beta_others
self.category_map = category_map
self.empty_class_id = empty_class_id
self.label2binlabel = {}
self.groups_counts = [1] * (n_groups + 1)
self.predict_tables = []
dataset_df = self._load_dataset(dataset_json, selected_locations)
self._generate_binlabel_idx(dataset_df)
self._generate_predict_tables()
def _load_dataset(self, dataset_json, selected_locations):
with tf.io.gfile.GFile(dataset_json, 'r') as json_file:
json_data = json.load(json_file)
images = pd.DataFrame(json_data['images'])
annotations = pd.DataFrame(json_data['annotations'])
images = pd.merge(images,
annotations[["image_id", "category_id"]],
how='left',
left_on='id',
right_on='image_id')
if selected_locations is not None:
images = images[images.location.isin(selected_locations)]
images = images.copy()
return images
def _get_group(self, instances_count):
for group, group_max in enumerate(self.sl_max_groups):
if instances_count < group_max:
return group
return 0
def _generate_binlabel_idx(self, dataset_df):
categories = list(range(self.category_map.get_num_classes()))
#group 0 is only bg/fg
self.groups_counts[0] = 2
empty_class = self.category_map.category_to_index(self.empty_class_id)
self.label2binlabel[empty_class] = [1] + [0] * (self.n_groups)
categories.remove(empty_class)
#nonempty categories
for categ in categories:
categ_id = self.category_map.index_to_category(categ)
instances_count = len(dataset_df[dataset_df.category_id == categ_id])
group_id = self._get_group(instances_count)
binlabel = [0] * (self.n_groups + 1)
binlabel[group_id] = self.groups_counts[group_id]
self.groups_counts[group_id] += 1
self.label2binlabel[categ] = binlabel
def _generate_predict_tables(self):
for i in range(self.n_groups + 1):
self.predict_tables.append(
np.zeros(shape=(self.groups_counts[i],
self.category_map.get_num_classes())))
for label, binlabel in self.label2binlabel.items():
group = np.asarray(binlabel).argmax()
self.predict_tables[group][binlabel[group]][label] = 1.0
def create_classif_header(self, head_features):
outputs = []
for group_count in self.groups_counts:
output = tf.keras.layers.Dense(group_count,
activation='softmax')(head_features)
outputs.append(output)
return outputs
def _create_map_layer(self, inputs, n_inputs, n_outputs, weights):
map_layer = tf.keras.layers.Dense(n_outputs, use_bias=False)
map_layer(tf.convert_to_tensor(np.ones((1, n_inputs)), dtype=tf.float32))
map_layer.set_weights([weights])
return map_layer(inputs)
def create_prediction_model(self, trained_model):
fg_prob_map = np.array([np.ones(self.category_map.get_num_classes()),
np.zeros(self.category_map.get_num_classes())])
fg_prob = self._create_map_layer(trained_model.outputs[0],
self.groups_counts[0],
self.category_map.get_num_classes(),
fg_prob_map)
mapped_predictions = []
for output, group_size, predict_tbl in zip(trained_model.outputs,
self.groups_counts,
self.predict_tables):
layer_map = self._create_map_layer(output,
group_size,
self.category_map.get_num_classes(),
predict_tbl)
mapped_predictions.append(layer_map)
scaled_mapped_predictions = [mapped_predictions[0]]
for map_pred in mapped_predictions[1:]:
scaled_map_pred = tf.keras.layers.Multiply()([map_pred, fg_prob])
scaled_mapped_predictions.append(scaled_map_pred)
preds = tf.keras.layers.Add()(scaled_mapped_predictions)
model = tf.keras.models.Model(inputs=trained_model.inputs, outputs=preds)
return model
def process_label(self, label):
def _get_idx_label(label):
categ_id = self.category_map.category_to_index(label.numpy())
binlabels = self.label2binlabel[categ_id]
binlabels_one_hot = []
for idx, binlabel in enumerate(binlabels):
one_hot = np.zeros(self.groups_counts[idx])
one_hot[binlabel] = 1
binlabels_one_hot.append(one_hot)
return binlabels_one_hot
labels = tf.py_function(func=_get_idx_label,
inp=[label],
Tout=([tf.float32]*(self.n_groups+1)))
labels = [tf.ensure_shape(label, shape=(self.groups_counts[i],))
for i, label in enumerate(labels)]
return tuple(labels)
def generate_balancing_mask(self, labels):
batch_size = tf.shape(labels[0])[0]
masks = []
#for the bg/fg group we use all instances
mask0 = tf.ones(shape=(batch_size,))
masks.append(mask0)
def _get_max(labels, batch_size):
labels = labels.numpy()
others = labels[:,0]
fg = 1.0 - others
fg_num = np.sum(fg)
if fg_num == 0:
return np.zeros(batch_size)
others_num = batch_size - fg_num
others_sample_num = int(fg_num * self.beta_others)
if others_sample_num > others_num:
return np.ones(batch_size)
else:
sample_idx = np.random.choice(others.nonzero()[0],
(others_sample_num, ), replace=False)
fg[sample_idx] = 1.0
return fg
for i in range(1, self.n_groups + 1):
mask = tf.py_function(func=_get_max,
inp=[labels[i], batch_size],
Tout=tf.float32)
masks.append(mask)
return tuple(masks)
| alcunha/iwildcam2021ufam | classification/bags.py | bags.py | py | 6,432 | python | en | code | 11 | github-code | 6 | [
{
"api_name": "tensorflow.io.gfile.GFile",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tensorflow.io",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.DataFra... |
74056208827 | import torch
import torch.nn as nn
import torch.nn.functional as F
class BiDirectionalTreeGRU(nn.Module):
def __init__(self, n_hidden=None, n_iters=1):
super().__init__()
self.n_hidden = n_hidden
self.n_iters = n_iters
self.down_root = nn.Linear(n_hidden, n_hidden)
self.down_gru = AnyBatchGRUCell(n_hidden, n_hidden)
self.up_leaf = nn.Linear(n_hidden, n_hidden)
self.up_gru = AnyBatchGRUCell(n_hidden, n_hidden)
def forward(self, up_embeddings, down_embeddings, levels, children, n_inners):
for _ in range(self.n_iters):
self.down_the_tree(states, up_embeddings, down_embeddings, levels, children, n_inners)
self.up_the_tree(states, up_embeddings, down_embeddings, levels, children, n_inners)
def up_the_tree(self, up_embeddings, down_embeddings, levels, children, n_inners):
zero = torch.zeros(1).long(); one = torch.ones(1).long()
if torch.cuda.is_available(): zero = zero.cuda(); one = one.cuda()
for i, nodes in enumerate(levels[::-1]):
j = n_levels - 1 - i
try:
inner = nodes[:n_inners[j]]
except ValueError:
inner = []
try:
outer = nodes[n_inners[j]:]
except ValueError:
outer = []
if len(inner) > 0:
try:
u_k_inners = u_k[:n_inners[j]]
except ValueError:
u_k_inners = []
try:
u_k_leaves = u_k[n_inners[j]:]
except ValueError:
u_k_leaves = []
h_L = embeddings[j+1][children[inner, zero]]
h_R = embeddings[j+1][children[inner, one]]
hhu = torch.cat((h_L, h_R, u_k_inners), 1)
r = self.fc_r(hhu)
if self.bn: r = self.bn_r(r)
r = F.sigmoid(r)
h_H = self.fc_h(r * hhu)
if self.bn: h_H = self.bn_h(h_H)
h_H = self.activation(h_H)
z = self.fc_z(torch.cat((h_H, hhu), -1))
if self.bn: z = self.bn_z(z)
z_H = z[:, :n_hidden] # new activation
z_L = z[:, n_hidden:2*n_hidden] # left activation
z_R = z[:, 2*n_hidden:3*n_hidden] # right activation
z_N = z[:, 3*n_hidden:] # local state
z = torch.stack([z_H,z_L,z_R,z_N], 2)
z = F.softmax(z)
h = ((z[:, :, 0] * h_H) +
(z[:, :, 1] * h_L) +
(z[:, :, 2] * h_R) +
(z[:, :, 3] * u_k_inners))
try:
embeddings.append(torch.cat((h, u_k_leaves), 0))
except AttributeError:
embeddings.append(h)
else:
embeddings.append(u_k)
def down_the_tree(self, up_embeddings, down_embeddings, levels, children, n_inners):
down_embeddings[0] = F.tanh(self.down_root(up_embeddings[0])) # root nodes
zero = torch.zeros(1).long(); one = torch.ones(1).long()
if torch.cuda.is_available(): zero = zero.cuda(); one = one.cuda()
for j, nodes in enumerate(levels[:-1]):
down_parent = down_embeddings[j]
up_L = up_embeddings[j+1][children[nodes, zero]]
up_R = up_embeddings[j+1][children[nodes, one]]
down_L = self.down_gru(up_L, down_parent)
down_R = self.down_gru(up_R, down_parent)
h = Variable(torch.zeros(down_L.size()[0] * 2, down_L.size()[1]))
h[children[nodes, zero]] = down_L
h[children[nodes, one]] = down_R
down_embeddings[j] = h
| isaachenrion/jets | src/architectures/utils/bidirectional_tree_gru.py | bidirectional_tree_gru.py | py | 3,809 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
9627877293 | from . import views
from django.urls import path, include
urlpatterns = [
path('', views.index, name="index"),
path('about/', views.about, name="about"),
path('contact/', views.contact, name="contact"),
path('services/', views.services, name="services"),
path('skill/', views.skill, name="skill"),
] | abrahammmmmmmm/dynamicPortfolio | portfolio/app1/urls.py | urls.py | py | 329 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
5478969307 | # A simple MLP network structure for point clouds,
#
# Added by Jiadai Sun
import torch
import torch.nn as nn
import torch.nn.functional as F
class PointRefine(nn.Module):
def __init__(self, n_class=3,
in_fea_dim=35,
out_point_fea_dim=64):
super(PointRefine, self).__init__()
self.n_class = n_class
self.PPmodel = nn.Sequential(
nn.BatchNorm1d(in_fea_dim),
nn.Linear(in_fea_dim, 64),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Linear(64, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, out_point_fea_dim)
)
self.logits = nn.Sequential(
nn.Linear(out_point_fea_dim, self.n_class)
)
def forward(self, point_fea):
# the point_fea need with size (b, N, c) e.g. torch.Size([1, 121722, 35])
# process feature
# torch.Size([124668, 9]) --> torch.Size([124668, 256])
processed_point_fea = self.PPmodel(point_fea)
logits = self.logits(processed_point_fea)
point_predict = F.softmax(logits, dim=1)
return point_predict
if __name__ == '__main__':
import time
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = PointRefine()
model.train()
# t0 = time.time()
# pred = model(cloud)
# t1 = time.time()
# print(t1-t0)
total = sum([param.nelement() for param in model.parameters()])
print("Number of PointRefine parameter: %.2fM" % (total/1e6))
# Number of PointRefine parameter: 0.04M
| haomo-ai/MotionSeg3D | modules/PointRefine/PointMLP.py | PointMLP.py | py | 1,699 | python | en | code | 206 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
29456763062 | import abc
import os.path
from oslo_config import cfg
from oslo_log import log
import requests
import requests.certs
import six
from atrope import exception
from atrope import ovf
from atrope import paths
from atrope import utils
opts = [
cfg.StrOpt('download_ca_file',
default=paths.state_path_def('atrope-ca-bundle.pem'),
help='Atrope will build a CA bundle for verifying the '
'HTTP servers when it is downloading the image, '
'concatenating the default OS CA bundle and the '
'CAs present in the $ca_path directory. This '
'is done as there may be certificates signed by '
'CAs that are trusted by the provider, but untrusted '
'by the default bundle and we need to trust both.'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
CONF.import_opt("ca_path", "atrope.smime")
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseImage(object):
@abc.abstractmethod
def __init__(self, image_info):
self.uri = None
self.sha512 = None
self.identifier = None
self.location = None
self.verified = False
@abc.abstractmethod
def download(self, dest):
"""Download the image.
:param dest: destionation directory.
"""
def get_file(self, mode="rb"):
"""Return a File object containing the downloaded file."""
return open(self.location, mode)
def get_kernel(self):
raise NotImplementedError()
def get_ramdisk(self):
raise NotImplementedError()
def get_disk(self):
"""Return the format and a 'ro' File-like object containing the disk.
Images can be stored in containers like OVA, this method will return a
tuple (format, fd) being 'format' a string containing the image disk
format and 'fd' File-like object containing the original image disk as
extracted from the container.
We assume that containers only store one image disk. We scan the file
in reverse order, as OVF specification states that files can be
appended so as to update the OVF file.
"""
if self.format.lower() != "ova":
return self.format, self.get_file()
ovf_file = ovf.get_ovf(self.location)
fmt, disk_filename = ovf.get_disk_name(ovf_file)
disk_fd = ovf.extract_file(self.location, disk_filename)
return fmt, disk_fd
def verify_checksum(self, location=None):
"""Verify the image's checksum."""
LOG.info("Image '%s' present in '%s', verifying checksum",
self.identifier, location)
location = location or self.location
if location is None:
raise exception.ImageNotFoundOnDisk(location=location)
sha512 = utils.get_file_checksum(location)
if sha512.hexdigest() != self.sha512:
raise exception.ImageVerificationFailed(
id=self.identifier,
expected=self.sha512,
obtained=sha512.hexdigest()
)
LOG.info("Image '%s' present in '%s', checksum OK",
self.identifier, location)
self.verified = True
class HepixImage(BaseImage):
field_map = {
"ad:group": "group",
"ad:mpuri": "mpuri",
"ad:user:fullname": "user_fullname",
"ad:user:guid": "user_guid",
"ad:user:uri": "user_uri",
"dc:description": "description",
"dc:identifier": "identifier",
"dc:title": "title",
"hv:hypervisor": "hypervisor",
"hv:format": "format",
"hv:size": "size",
"hv:uri": "uri",
"hv:version": "version",
"sl:arch": "arch",
"sl:checksum:sha512": "sha512",
"sl:comments": "comments",
"sl:os": "os",
"sl:osname": "osname",
"sl:osversion": "osversion",
}
required_fields = field_map.keys()
def __init__(self, image_info):
super(HepixImage, self).__init__(image_info)
image_dict = image_info.get("hv:image", {})
utils.ensure_ca_bundle(CONF.download_ca_file,
[requests.certs.where()],
CONF.ca_path)
for i in self.required_fields:
value = image_dict.get(i, None)
if value is None:
reason = "Invalid image definition, missing '%s'" % i
raise exception.InvalidImageList(reason=reason)
attr = self.field_map.get(i)
setattr(self, attr, value)
# add everything from hepix as 'extra', so it can be queried in glance
self.appliance_attributes = image_dict
def _download(self, location):
LOG.info("Downloading image '%s' from '%s' into '%s'",
self.identifier, self.uri, location)
with open(location, 'wb') as f:
try:
response = requests.get(self.uri, stream=True,
verify=CONF.download_ca_file)
except Exception as e:
LOG.error(e)
raise exception.ImageDownloadFailed(code=e.errno,
reason=e)
if not response.ok:
LOG.error("Cannot download image: (%s) %s",
response.status_code, response.reason)
raise exception.ImageDownloadFailed(code=response.status_code,
reason=response.reason)
for block in response.iter_content(1024):
if block:
f.write(block)
f.flush()
try:
self.verify_checksum(location=location)
except exception.ImageVerificationFailed as e:
LOG.error(e)
raise
else:
LOG.info("Image '%s' stored as '%s'",
self.identifier, location)
def download(self, basedir):
# The image has been already downloaded in this execution.
if self.location is not None:
raise exception.ImageAlreadyDownloaded(location=self.location)
location = os.path.join(basedir, self.identifier)
if not os.path.exists(location):
self._download(location)
else:
# Image exists, is it checksum valid?
try:
self.verify_checksum(location=location)
except exception.ImageVerificationFailed:
LOG.warning("Image '%s' present in '%s' is not valid, "
"downloading again",
self.identifier, location)
self._download(location)
self.location = location
| alvarolopez/atrope | atrope/image.py | image.py | py | 6,812 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "oslo_config.cfg.StrOpt",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "oslo_config.cfg",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "atrope.paths.state_path_def",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "at... |
17508200123 | from typing import List
def products_div(arr: List[int]) -> List[int]:
prod = 1
for x in arr:
prod *= x
res = [prod//x for x in arr]
return res
'''
arr: [2, 3, 4, 5,]
l: [2 23 234]
r: [5 54 543]
res: [.-345, 2-45, 23-5, 234-.]
l = []
prod = 1
for i in range(len(arr)-1):
prod *= arr[i]
l.append(prod)
prod = 1
for i in range(len(arr)-1, 0, -1):
prod *= arr[i]
r.append(prod)
res = [1] * len(arr)
for i in range(len(arr) - 1):
res[i] *= r[-i-1]
res[i+1] = l[i]
return res
'''
def products(arr: List[int]) -> List[int]:
l = []
prod = 1
for i in range(len(arr)-1):
prod *= arr[i]
l.append(prod)
prod = 1
r = []
for i in range(len(arr)-1, 0, -1):
prod *= arr[i]
r.append(prod)
res = [1] * len(arr)
for i in range(len(arr) - 1):
res[i] *= r[-i-1]
res[i+1] = l[i]
return res
arr = [2, 3, 4, 5,]
print(products(arr)) # [60, 40, 30, 24]
| soji-omiwade/cs | dsa/before_rubrik/array_of_array_of_products.py | array_of_array_of_products.py | py | 982 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 2,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 31,
"usage_type": "name"
}
] |
9626855200 | import pandas as pd
import numpy as np
import os
import cv2
import json
from sklearn.model_selection import train_test_split
from trainer import Trainer
from sklearn.metrics import accuracy_score, mean_absolute_error, mean_squared_error
from collections import Counter
# read the images in the same order specified by df["id"]
def read(corpus_path, df):
file_paths = [os.path.join(corpus_path, name) for name in df["id"]]
data = []
for file_path in file_paths:
image = cv2.imread(file_path)
data.append(image)
data = np.asarray(data, dtype=np.float32)
# collect the labels as well (if exist)
labels = None
if "label" in df:
labels = np.asarray(df["label"], dtype=np.uint8)
print("#### Histogram: {}".format(Counter(labels)))
return data, labels
# plot images per group (to better undestand the data)
def plot_per_group(data, labels, target_label):
import matplotlib.pyplot as plt
i, curr_idx = 0, 0
plt.figure(figsize=(10, 10))
while (i < len(data) and curr_idx < 9):
image = (data[i] * 255).astype("uint8")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
label = labels[i]
if label == target_label:
plt.subplot(3, 3, curr_idx + 1)
plt.imshow(image)
plt.title(label)
plt.axis("off")
curr_idx += 1
i += 1
if __name__ == "__main__":
with open('config_file.json') as json_file:
config_file = json.load(json_file)
os.environ["CUDA_VISIBLE_DEVICES"] = str(config_file["device"])
PROJECT_PATH = os.path.join(config_file["training_path"], "{}/".format(config_file["device"]))
if os.path.exists(PROJECT_PATH):
raise Exception("{} already exists. Try another path!".format(PROJECT_PATH))
os.makedirs(PROJECT_PATH, exist_ok=True)
config_file["checkpoint_args"]["filepath"] = os.path.join(PROJECT_PATH, config_file["checkpoint_args"]["filepath"])
with open(os.path.join(PROJECT_PATH, "config_file.json"), 'w+') as f:
json.dump(config_file, f, indent=4)
df_train = pd.read_csv("corpus/train.csv")
df_test = pd.read_csv("corpus/test.csv")
all_train_data, all_train_labels = read(corpus_path="corpus/train/", df=df_train) # <--- collect all the images from the training dataset
df_train = df_train.sample(frac=1).reset_index(drop=True) # <--- Shuffle the training data. Because of the augmentation step, we cannot shuffle during the `fit` call.
df_train, df_validation = train_test_split(df_train, test_size=0.2) # <--- split into train + validation
train_data, train_labels = read(corpus_path="corpus/train/", df=df_train) # <--- load the corresponding train images
validation_data, validation_labels = read(corpus_path="corpus/train/", df=df_validation) # <--- load the corresponding validation images
test_data, _ = read(corpus_path="corpus/test/", df=df_test) # <--- load the corresponding test images
print("##### [Train]: Initial: {} -> {}".format(len(df_train), train_data.shape))
print("##### [Validation]: Initial: {} -> {}".format(len(df_validation), validation_data.shape))
print("##### [Test]: Initial: {} -> {}".format(len(df_test), test_data.shape))
# for label in range(1, 6):
# plot_per_group(data=train_data, labels=train_labels, target_label=label)
# instantiate a trainer object
trainer = Trainer(all_train_data=all_train_data,
all_train_labels=all_train_labels,
train_data=train_data,
train_labels=train_labels,
validation_data=validation_data,
validation_labels=validation_labels,
loss_name=config_file["loss_name"],
model_type=config_file["model_type"],
learning_rate_decay=config_file["learning_rate_decay"],
enable_batchnorm=config_file["enable_batchnorm"],
batch_size=config_file["batch_size"],
epochs=config_file["epochs"],
early_stopping_args=config_file["early_stopping_args"],
checkpoint_args=config_file["checkpoint_args"],
task_type=config_file["task_type"],
learning_rate=config_file["learning_rate"],
weight_decay=config_file["weight_decay"],
num_classes=5)
# Cross Validation case
if config_file["type"] == "cross_validation":
mae_list, mse_list, accuracy_list = trainer.cross_validation(epochs=75)
history_cross_validation = {}
history_cross_validation["mae_list"] = str(mae_list)
history_cross_validation["mse_list"] = str(mse_list)
history_cross_validation["accuracy_list"] = str(accuracy_list)
with open(os.path.join(PROJECT_PATH, "history_cross_validation.json"), 'w+') as f:
json.dump(history_cross_validation, f, indent=4)
# Training case
elif config_file["type"] == "train":
history = trainer.train()
for key in history:
history[key] = str(history[key])
train_predicted_labels, train_predictions = trainer.get_predictions(train_data)
validation_predicted_labels, validation_predictions = trainer.get_predictions(validation_data)
test_predicted_labels, test_predictions = trainer.get_predictions(test_data)
# Accuracy score for train + validation
accuracy_train = accuracy_score(train_labels, train_predicted_labels)
accuracy_validation = accuracy_score(validation_labels, validation_predicted_labels)
print("#### [Train] Accuracy for the best model: {}".format(accuracy_train))
print("#### [Validation] Accuracy for the best model: {}".format(accuracy_validation))
history["train_accuracy"] = str(accuracy_train)
history["validation_accuracy"] = str(accuracy_validation)
# For regression, add MAE and MSE too
if config_file["task_type"] == "regression":
mae_train = mean_absolute_error(train_labels, train_predictions)
mae_validation = mean_absolute_error(validation_labels, validation_predictions)
mse_train = mean_squared_error(train_labels, train_predictions)
mse_validation = mean_squared_error(validation_labels, validation_predictions)
history["train_mae"] = str(mae_train)
history["validation_mae"] = str(mae_validation)
history["train_mse"] = str(mse_train)
history["validation_mse"] = str(mse_validation)
print("#### [Train] Mae for the best model: {}".format(mae_train))
print("#### [Validation] Mae for the best model: {}".format(mae_validation))
print("#### [Train] Mse for the best model: {}".format(mse_train))
print("#### [Validation] Mse for the best model: {}".format(mse_validation))
df = pd.DataFrame.from_dict({"id": df_test["id"], "label": list(test_predicted_labels)})
df.to_csv(os.path.join(PROJECT_PATH, "final_predictions.csv"), index=False)
df_train["prediction"] = list(train_predictions) # <--- add predictions
df_validation["prediction"] = list(validation_predictions) # <--- add predictions
df_test["prediction"] = list(test_predictions) # <--- add predictions
# merge back train + validation
merged_train_df = pd.concat([df_train, df_validation], ignore_index=True) # <--- merge train + validation predictions
merged_train_df = merged_train_df.sort_values(by=["id"]) # <--- sort by id
merged_train_df.to_csv(os.path.join(PROJECT_PATH, "train_validation_predictions.csv"), index=False)
df_test.to_csv(os.path.join(PROJECT_PATH, "test_predictions.csv"), index=False)
with open(os.path.join(PROJECT_PATH, "history.json"), 'w+') as f:
json.dump(history, f, indent=4)
print(df) | SebastianCojocariu/Detect-targets-in-radar-signals | src/main.py | main.py | py | 7,992 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number... |
35463005885 | from collections import namedtuple, Counter
import warnings
from math import sqrt
import numpy as np
from scipy.stats import special_ortho_group
import pytest
import kwant
from ... import lattice
from ...builder import HoppingKind, Builder, Site
from ...system import NoSymmetry
from .. import gauge
## Utilities
square_lattice = lattice.square(norbs=1, name='square')
honeycomb_lattice = lattice.honeycomb(norbs=1, name='honeycomb')
cubic_lattice = lattice.cubic(norbs=1, name='cubic')
def rectangle(W, L):
return (
lambda s: 0 <= s.pos[0] < L and 0 <= s.pos[1] < W,
(L/2, W/2)
)
def ring(r_inner, r_outer):
return (
lambda s: r_inner <= np.linalg.norm(s.pos) <= r_outer,
((r_inner + r_outer) / 2, 0)
)
def wedge(W):
return (
lambda s: (0 <= s.pos[0] < W) and (0 <= s.pos[1] <= s.pos[0]),
(0, 0)
)
def half_ring(r_inner, r_outer):
in_ring, _ = ring(r_inner, r_outer)
return (
lambda s: s.pos[0] <= 0 and in_ring(s),
(-(r_inner + r_outer) / 2, 0)
)
def cuboid(a, b, c):
return (
lambda s: 0 <= s.pos[0] < a and 0 <= s.pos[1] < b and 0 <= s.pos[2] < c,
(a/2, b/2, c/2)
)
def hypercube(dim, W):
return (
lambda s: all(0 <= x < W for x in s.pos),
(W / 2,) * dim
)
def circle(r):
return (
lambda s: np.linalg.norm(s.pos) < r,
(0, 0)
)
def ball(dim, r):
return (
lambda s: np.linalg.norm(s.pos) < r,
(0,) * dim
)
def model(lat, neighbors):
syst = Builder(lattice.TranslationalSymmetry(*lat.prim_vecs))
if hasattr(lat, 'sublattices'):
for l in lat.sublattices:
zv = (0,) * len(l.prim_vecs)
syst[l(*zv)] = None
else:
zv = (0,) * len(l.prim_vecs)
syst[lat(*zv)] = None
for r in range(neighbors):
syst[lat.neighbors(r + 1)] = None
return syst
def check_loop_kind(loop_kind):
(_, first_fam_a, prev_fam_b), *rest = loop_kind
for (_, fam_a, fam_b) in rest:
if prev_fam_b != fam_a:
raise ValueError('Invalid loop kind: does not close')
prev_fam_b = fam_b
# loop closes
net_delta = np.sum([hk.delta for hk in loop_kind])
if first_fam_a != fam_b or np.any(net_delta != 0):
raise ValueError('Invalid loop kind: does not close')
def available_loops(syst, loop_kind):
def maybe_loop(site):
loop = [site]
a = site
for delta, family_a, family_b in loop_kind:
b = Site(family_b, a.tag + delta, True)
if family_a != a.family or (a, b) not in syst:
return None
loop.append(b)
a = b
return loop
check_loop_kind(loop_kind)
return list(filter(None, map(maybe_loop, syst.sites())))
def loop_to_links(loop):
return list(zip(loop, loop[1:]))
def no_symmetry(lat, neighbors):
return NoSymmetry()
def translational_symmetry(lat, neighbors):
return lattice.TranslationalSymmetry(int((neighbors + 1)/2) * lat.prim_vecs[0])
## Tests
# Tests that phase around a loop is equal to the flux through the loop.
# First we define the loops that we want to test, for various lattices.
# If a system does not support a particular kind of loop, they will simply
# not be generated.
Loop = namedtuple('Loop', ('path', 'flux'))
square_loops = [([HoppingKind(d, square_lattice) for d in l.path], l.flux)
for l in [
# 1st nearest neighbors
Loop(path=[(1, 0), (0, 1), (-1, 0), (0, -1)], flux=1),
# 2nd nearest neighbors
Loop(path=[(1, 0), (0, 1), (-1, -1)], flux=0.5),
Loop(path=[(1, 0), (-1, 1), (0, -1)], flux=0.5),
# 3rd nearest neighbors
Loop(path=[(2, 0), (0, 1), (-2, 0), (0, -1)], flux=2),
Loop(path=[(2, 0), (-1, 1), (-1, 0), (0, -1)], flux=1.5),
]]
a, b = honeycomb_lattice.sublattices
honeycomb_loops = [([HoppingKind(d, a, b) for *d, a, b in l.path], l.flux)
for l in [
# 1st nearest neighbors
Loop(path=[(0, 0, a, b), (-1, 1, b, a), (0, -1, a, b), (0, 0, b, a),
(1, -1, a, b), (0, 1, b, a)],
flux=sqrt(3)/2),
# 2nd nearest neighbors
Loop(path=[(-1, 1, a, a), (0, -1, a, a), (1, 0, a, a)],
flux=sqrt(3)/4),
Loop(path=[(-1, 0, b, b), (1, -1, b, b), (0, 1, b, b)],
flux=sqrt(3)/4),
]]
cubic_loops = [([HoppingKind(d, cubic_lattice) for d in l.path], l.flux)
for l in [
# 1st nearest neighbors
Loop(path=[(1, 0, 0), (0, 1, 0), (-1, 0, 0), (0, -1, 0)], flux=1),
Loop(path=[(0, 1, 0), (0, 0, 1), (0, -1, 0), (0, 0, -1)], flux=0),
Loop(path=[(1, 0, 0), (0, 0, 1), (-1, 0, 0), (0, 0, -1)], flux=0),
# 2nd nearest neighbors
Loop(path=[(1, 0, 0), (-1, 1, 0), (0, -1, 0)], flux=0.5),
Loop(path=[(1, 0, 0), (0, 1, 0), (-1, -1, 0)], flux=0.5),
Loop(path=[(1, 0, 0), (-1, 0, 1), (0, 0, -1)], flux=0),
Loop(path=[(1, 0, 0), (0, 0, 1), (-1, 0, -1)], flux=0),
Loop(path=[(0, 1, 0), (0, -1, 1), (0, 0, -1)], flux=0),
Loop(path=[(0, 1, 0), (0, 0, 1), (0, -1, -1)], flux=0),
# 3rd nearest neighbors
Loop(path=[(1, 1, 1), (0, 0, -1), (-1, -1, 0)], flux=0),
Loop(path=[(1, 1, 1), (-1, 0, -1), (0, -1, 0)], flux=0.5),
]]
square = (square_lattice, square_loops)
honeycomb = (honeycomb_lattice, honeycomb_loops)
cubic = (cubic_lattice, cubic_loops)
def _test_phase_loops(syst, phases, loops):
for loop_kind, loop_flux in loops:
for loop in available_loops(syst, loop_kind):
loop_phase = np.prod([phases(a, b) for a, b in loop_to_links(loop)])
expected_loop_phase = np.exp(1j * np.pi * loop_flux)
assert np.isclose(loop_phase, expected_loop_phase)
@pytest.mark.parametrize("neighbors", [1, 2, 3])
@pytest.mark.parametrize("symmetry", [no_symmetry, translational_symmetry],
ids=['finite', 'infinite'])
@pytest.mark.parametrize("lattice, loops", [square, honeycomb, cubic],
ids=['square', 'honeycomb', 'cubic'])
def test_phases(lattice, neighbors, symmetry, loops):
"""Check that the phases around common loops are equal to the flux, for
finite and infinite systems with uniform magnetic field.
"""
W = 4
dim = len(lattice.prim_vecs)
field = np.array([0, 0, 1]) if dim == 3 else 1
syst = Builder(symmetry(lattice, neighbors))
syst.fill(model(lattice, neighbors), *hypercube(dim, W))
this_gauge = gauge.magnetic_gauge(syst.finalized())
phases = this_gauge(field)
_test_phase_loops(syst, phases, loops)
@pytest.mark.parametrize("neighbors", [1, 2, 3])
@pytest.mark.parametrize("lat, loops", [square, honeycomb],
ids=['square', 'honeycomb'])
def test_phases_composite(neighbors, lat, loops):
"""Check that the phases around common loops are equal to the flux, for
composite systems with uniform magnetic field.
"""
W = 4
dim = len(lat.prim_vecs)
field = np.array([0, 0, 1]) if dim == 3 else 1
lead = Builder(lattice.TranslationalSymmetry(-lat.prim_vecs[0]))
lead.fill(model(lat, neighbors), *hypercube(dim, W))
# Case where extra sites are added and fields are same in
# scattering region and lead.
syst = Builder()
syst.fill(model(lat, neighbors), *ball(dim, W + 1))
extra_sites = syst.attach_lead(lead)
assert extra_sites # make sure we're testing the edge case with added sites
this_gauge = gauge.magnetic_gauge(syst.finalized())
# same field in scattering region and lead
phases, lead_phases = this_gauge(field, field)
# When extra sites are added to the central region we need to select
# the correct phase function.
def combined_phases(a, b):
if a in extra_sites or b in extra_sites:
return lead_phases(a, b)
else:
return phases(a, b)
_test_phase_loops(syst, combined_phases, loops)
_test_phase_loops(lead, lead_phases, loops)
@pytest.mark.parametrize("neighbors", [1, 2])
def test_overlapping_interfaces(neighbors):
"""Test composite systems with overlapping lead interfaces."""
lat = square_lattice
def _make_syst(edge, W=5):
syst = Builder()
syst.fill(model(lat, neighbors), *rectangle(W, W))
leadx = Builder(lattice.TranslationalSymmetry((-1, 0)))
leadx[(lat(0, j) for j in range(edge, W - edge))] = None
for n in range(1, neighbors + 1):
leadx[lat.neighbors(n)] = None
leady = Builder(lattice.TranslationalSymmetry((0, -1)))
leady[(lat(i, 0) for i in range(edge, W - edge))] = None
for n in range(1, neighbors + 1):
leady[lat.neighbors(n)] = None
assert not syst.attach_lead(leadx) # sanity check; no sites added
assert not syst.attach_lead(leady) # sanity check; no sites added
return syst, leadx, leady
# edge == 0: lead interfaces overlap
# edge == 1: lead interfaces partition scattering region
# into 2 disconnected components
for edge in (0, 1):
syst, leadx, leady = _make_syst(edge)
this_gauge = gauge.magnetic_gauge(syst.finalized())
phases, leadx_phases, leady_phases = this_gauge(1, 1, 1)
_test_phase_loops(syst, phases, square_loops)
_test_phase_loops(leadx, leadx_phases, square_loops)
_test_phase_loops(leady, leady_phases, square_loops)
def _make_square_syst(sym, neighbors=1):
lat = square_lattice
syst = Builder(sym)
syst[(lat(i, j) for i in (0, 1) for j in (0, 1))] = None
for n in range(1, neighbors + 1):
syst[lat.neighbors(n)] = None
return syst
def test_unfixable_gauge():
"""Check error is raised when we cannot fix the gauge."""
leadx = _make_square_syst(lattice.TranslationalSymmetry((-1, 0)))
leady = _make_square_syst(lattice.TranslationalSymmetry((0, -1)))
# 1x2 line with 2 leads
syst = _make_square_syst(NoSymmetry())
del syst[[square_lattice(1, 0), square_lattice(1, 1)]]
syst.attach_lead(leadx)
syst.attach_lead(leadx.reversed())
with pytest.raises(ValueError):
gauge.magnetic_gauge(syst.finalized())
# 2x2 square with leads attached from all 4 sides,
# and nearest neighbor hoppings
syst = _make_square_syst(NoSymmetry())
# Until we add the last lead we have enough gauge freedom
# to specify independent fields in the scattering region
# and each of the leads. We check that no extra sites are
# added as a sanity check.
assert not syst.attach_lead(leadx)
gauge.magnetic_gauge(syst.finalized())
assert not syst.attach_lead(leady)
gauge.magnetic_gauge(syst.finalized())
assert not syst.attach_lead(leadx.reversed())
gauge.magnetic_gauge(syst.finalized())
# Adding the last lead removes our gauge freedom.
assert not syst.attach_lead(leady.reversed())
with pytest.raises(ValueError):
gauge.magnetic_gauge(syst.finalized())
# 2x2 square with 2 leads, but 4rd nearest neighbor hoppings
syst = _make_square_syst(NoSymmetry())
del syst[(square_lattice(1, 0), square_lattice(1, 1))]
leadx = _make_square_syst(lattice.TranslationalSymmetry((-1, 0)))
leadx[square_lattice.neighbors(4)] = None
for lead in (leadx, leadx.reversed()):
syst.attach_lead(lead)
with pytest.raises(ValueError):
gauge.magnetic_gauge(syst.finalized())
def _test_disconnected(syst):
with pytest.raises(ValueError) as excinfo:
gauge.magnetic_gauge(syst.finalized())
assert 'unit cell not connected' in str(excinfo.value)
def test_invalid_lead():
"""Check error is raised when a lead unit cell is not connected
within the unit cell itself.
In order for the algorithm to work we need to be able to close
loops within the lead. However we only add a single lead unit
cell, so not all paths can be closed, even if the lead is
connected.
"""
lat = square_lattice
lead = _make_square_syst(lattice.TranslationalSymmetry((-1, 0)),
neighbors=0)
# Truly disconnected system
# Ignore warnings to suppress Kwant's complaint about disconnected lead
with warnings.catch_warnings():
warnings.simplefilter('ignore')
_test_disconnected(lead)
# 2 disconnected chains (diagonal)
lead[(lat(0, 0), lat(1, 1))] = None
lead[(lat(0, 1), lat(1, 0))] = None
_test_disconnected(lead)
lead = _make_square_syst(lattice.TranslationalSymmetry((-1, 0)),
neighbors=0)
# 2 disconnected chains (horizontal)
lead[(lat(0, 0), lat(1, 0))] = None
lead[(lat(0, 1), lat(1, 1))] = None
_test_disconnected(lead)
# System has loops, but need 3 unit cells
# to express them.
lead[(lat(0, 0), lat(1, 1))] = None
lead[(lat(0, 1), lat(1, 0))] = None
_test_disconnected(lead)
# Test internal parts of magnetic_gauge
@pytest.mark.parametrize("shape",
[rectangle(5, 5), circle(4),
half_ring(5, 10)],
ids=['rectangle', 'circle', 'half-ring']
)
@pytest.mark.parametrize("lattice", [square_lattice, honeycomb_lattice],
ids=['square', 'honeycomb'])
@pytest.mark.parametrize("neighbors", [1, 2, 3])
def test_minimal_cycle_basis(lattice, neighbors, shape):
"""Check that for lattice models on genus 0 shapes, nearly
all loops have the same (minimal) length. This is not an
equality, as there may be weird loops on the edges.
"""
syst = Builder()
syst.fill(model(lattice, neighbors), *shape)
syst = syst.finalized()
loops = gauge._loops_in_finite(syst)
loop_counts = Counter(map(len, loops))
min_loop = min(loop_counts)
# arbitrarily allow 1% of slightly longer loops;
# we don't make stronger guarantees about the quality
# of our loop basis
assert loop_counts[min_loop] / len(loops) > 0.99, loop_counts
def random_loop(n, max_radius=10, planar=False):
"""Return a loop of 'n' points.
The loop is in the x-y plane if 'planar is False', otherwise
each point is given a random perturbation in the z direction
"""
theta = np.sort(2 * np.pi * np.random.rand(n))
r = max_radius * np.random.rand(n)
if planar:
z = np.zeros((n,))
else:
z = 2 * (max_radius / 5) * (np.random.rand(n) - 1)
return np.array([r * np.cos(theta), r * np.sin(theta), z]).transpose()
def test_constant_surface_integral():
field_direction = np.random.rand(3)
field_direction /= np.linalg.norm(field_direction)
loop = random_loop(7)
integral = gauge._surface_integral
I = integral(lambda r: field_direction, loop)
assert np.isclose(I, integral(field_direction, loop))
assert np.isclose(I, integral(lambda r: field_direction, loop, average=True))
def circular_field(r_vec):
return np.array([r_vec[1], -r_vec[0], 0])
def test_invariant_surface_integral():
"""Surface integral should be identical if we apply a random
rotation to loop and vector field.
"""
integral = gauge._surface_integral
# loop with random orientation
orig_loop = loop = random_loop(7)
I = integral(circular_field, loop)
for _ in range(4):
rot = special_ortho_group.rvs(3)
loop = orig_loop @ rot.transpose()
assert np.isclose(I, integral(lambda r: rot @ circular_field(rot.transpose() @ r), loop))
@pytest.fixture
def system_and_gauge():
def hopping(a, b, peierls):
return -1 * peierls(a, b)
syst = Builder()
syst[(square_lattice(i, j) for i in range(3) for j in range(10))] = 4
syst[square_lattice.neighbors()] = hopping
lead = Builder(lattice.TranslationalSymmetry((-1, 0)))
lead[(square_lattice(0, j) for j in range(10))] = 4
lead[square_lattice.neighbors()] = hopping
syst.attach_lead(lead.substituted(peierls='peierls_left'))
syst.attach_lead(lead.reversed().substituted(peierls='peierls_right'))
syst = syst.finalized()
magnetic_gauge = gauge.magnetic_gauge(syst)
return syst, magnetic_gauge
@pytest.mark.parametrize('B',[0, 0.1, lambda r: 0.1 * np.exp(-r[1]**2)])
def test_uniform_magnetic_field(system_and_gauge, B):
syst, gauge = system_and_gauge
peierls, peierls_left, peierls_right = gauge(B, B, B)
params = dict(peierls=peierls, peierls_left=peierls_left,
peierls_right=peierls_right)
s = kwant.smatrix(syst, energy=0.6, params=params)
t = s.submatrix(1, 0)
assert t.shape > (0, 0) # sanity check
assert np.allclose(np.abs(t)**2, np.eye(*t.shape))
def test_phase_sign(system_and_gauge):
syst, gauge = system_and_gauge
peierls, peierls_left, peierls_right = gauge(0.1, 0.1, 0.1)
params = dict(peierls=peierls, peierls_left=peierls_left,
peierls_right=peierls_right)
cut = [(square_lattice(1, j), square_lattice(0, j))
for j in range(10)]
J = kwant.operator.Current(syst, where=cut)
J = J.bind(params=params)
psi = kwant.wave_function(syst, energy=0.6, params=params)(0)[0]
# Electrons incident from the left travel along the *top*
# edge of the Hall bar in the presence of a magnetic field
# out of the plane
j = J(psi)
j_bottom = sum(j[0:5])
j_top = sum(j[5:10])
assert np.isclose(j_top + j_bottom, 1) # sanity check
assert j_top > 0.9
| kwant-project/kwant | kwant/physics/tests/test_gauge.py | test_gauge.py | py | 17,404 | python | en | code | 76 | github-code | 6 | [
{
"api_name": "numpy.linalg.norm",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
... |
27228829856 | """This module implements classes and independent functions related to feature extraction
module of our work.
To be specific, this module helps identify handful of best features out of humongous number
of features; created from raw data """
import numpy as np
import pandas as pd
from namedlist import namedlist
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit as split
from sklearn.ensemble import BaggingRegressor
# from sklearn.ensemble import RandomForestRegressor
import warnings
warnings.filterwarnings('ignore')
def rmse(labels, predictions):
"""
This function returns the root mean squared error.
Note: If predictions is an integer, we interpret it as a constant prediction.
"""
""" If predictions is an integer, we make it a array to comply with sklearn API """
if isinstance(predictions, int):
# create an array same as labels and fill it with constant prediction
predictions = np.full(labels.shape, predictions)
""" mean_squared_error is an error metric; imported from sklearn. """
mse = mean_squared_error(labels, predictions)
return np.sqrt(mse)
def train_test_split(data, test_ratio = 0.5, n_splits=10, best_split = True):
"""
This function splits the data into two using stratified sampling in ratios as determined by test_ratio.
The strata is constructed out of creating quartile splits on the target variable, i.e., sales_volume.
If the best_split is True, The split which yields the minimum differen in the means of target is
returned. Else, the last split is returned as it is.
Note the number of splits are determined by n_splits"""
# Object for stratified sampling
split_obj = split(n_splits=n_splits, test_size=test_ratio, random_state=180)
# Discretizing the target volume to guide stratified sampling
data['categories'] = pd.qcut(data['sales_volume'], 4, labels=["low", "low mid",'high mid',"high"])
# best split is one that yields least difference in mean sales_volume of both folds
least_diff_in_means = None
best_split = None, None
# Looping over each split
for idx_train, idx_test in split_obj.split(data, data['categories']):
train = data.iloc[idx_train]
test = data.iloc[idx_test]
diff_in_means = abs(train.sales_volume.mean() - test.sales_volume.mean())
""" Update the best split if best_split=True and
either the current split is the first split or the best split.
"""
if best_split and ((least_diff_in_means is None) or (least_diff_in_means > diff_in_means)):
least_diff_in_means = diff_in_means
best_split = idx_train, idx_test
if best_split[0] is None:
best_split = idx_train, idx_test
del data['categories']
idx_train, idx_test = best_split
train = data.iloc[idx_train]
test = data.iloc[idx_test]
return train, test
class IterVarModel:
""" This class iteratively find best features one by one iteratively; starting from no features """
"""
At a particular iteration, all the candidate features are evaluated and the features that
yields the best 2-fold cross validation performance are added to the model (best features).
Number of best features to extract is determined by max_features_to_extract.
If none of the features improves performance beyond already obtained in the previous
iteration, The feature search process stops even before finding max_features_to_extract features.
"""
"""
This class maintains two folds for performance evaluation and comparison.
"""
"Train fold 1 and evaluate fold 2 and call it performance over fold 2"
"Train fold 2 and evaluate fold 1 and call it performance over fold 1"
"Note that a specified model is used for all kind of training, testing purposes."
class RMSEFolds:
""" A nested class that we define to maintain and compare RMSE results over both folds """
def __init__(self, rmse_1, rmse_2):
self.fold_1 = rmse_1 # RMSE over fold 1
self.fold_2 = rmse_2 # RMSE over fold 2
def __lt__(self,other):
"""
__lt__ is a Special method that can define < operator on class instances.
We define RMSE1 < RMSE2 if and only if the RMSE1 is strictly lower than RMSE2
in both the folds.
"""
# defining < condition.
# Condition 1 - RMSE_1 < RMSE_2 if results of both folds in RMSE_1 are less than that in RMSE_2
# cond_1 = (self.fold_1 < other.fold_1) and (self.fold_2 < other.fold_2)
# Condition 2 - RMSE_1 < RMSE_2 if the sum of rmse in both folds of RMSE_1 is less than that in RMSE_2
# cond = (self.fold_1 ** 2 + self.fold_2 ** 2) < (other.fold_1 ** 2 + other.fold_2 ** 2)
cond = (self.fold_1 < other.fold_1) and (self.fold_2 < other.fold_2)
# RMSE_1 < RMSE_2 if either condition is true
return cond# _1 or cond_2
# Special method that gets run on object instantiation.
def __init__(self, data, model, max_features_to_extract):
# data over which we create folds and extract best features.
self.data = data
# maximum feautres to extract
self.max_features_to_extract = max_features_to_extract
# model to be used in feature evaluations
self.model = model
# input columns are all the columns in the dataframe data except the target
self.input_variables = [col for col in self.data.columns if col not in ['sales_volume']]
# maintaining data for the folds. This attribute holds data related to folds.
self.folds = None
# Maintains a list of useful features
self.extracted_features = []
# Stops the feature extraction process if it becomes True
self.stop_feature_extraction = False
# create 2 folds out of all the data. Basically, split data into folds and also create additional variables.
self.create_folds()
def standardize_folds_inputs(self):
""" Standardize inputs in fold 1 from parameters obtained in fold 2 and
Standardize inputs in fold 2 from parameters obtained in fold 1
Logic: test data cannot know her own mean and variance;
hence has to be standardize with training set parameters
"""
fold_1_X = self.folds[1].input
fold_2_X = self.folds[2].input
# get parameters from fold 1 and standardize and update fold 2
model = StandardScaler() # standard scalar
model.fit(fold_1_X) # get parameters from inputs in fold 1
self.folds[2]._update(input=pd.DataFrame(model.transform(fold_2_X), columns=fold_1_X.columns)) # transform inputs in fold 2
# get parameters from fold 2 and standardize and update fold 1
model = StandardScaler() # standard scalar
model.fit(fold_2_X) # get parameters from inputs in fold 2
self.folds[1]._update(input=pd.DataFrame(model.transform(fold_1_X), columns=fold_1_X.columns)) # transform inputs in fold 1
def add_data_in_folds(self):
data = self.data
# We use stratified sampling to split the data; see function train_test_split() for details
fold_1_data, fold_2_data = train_test_split(data, test_ratio = 0.5, n_splits=10, best_split = True)
# inputs
input_features = self.input_variables
## Now we add inputs and outputs to each fold.
# update inputs
self.folds[1]._update(input=fold_1_data[input_features])
self.folds[2]._update(input=fold_2_data[input_features])
# update outputs
self.folds[1]._update(output=fold_1_data['sales_volume'])
self.folds[2]._update(output=fold_2_data['sales_volume'])
def create_folds(self):
"""
This function uses stratified sampling to split data into two equal-sized folds
and maintains these folds using class attribute of folds.
We use namedlist; one for each fold to hold its data
"""
"""
namedlist is a factory function for creating mutable collections of list items;
it is similar to python's list but enables us to name each component and access using
dot notation.
"""
Fold = namedlist('Fold', 'input output rmse')
"""
class attribute folds is a dictionary with 2 keys;
key=1, refers to namedlist that holds data related to fold 1
key=2, refers to namedlist that holds data related to fold 2
"""
self.folds = dict()
for i in [1,2]:
self.folds[i] = Fold(input=None, output=None,rmse=None)
# add inputs and outputs to the folds by intelligently splitting data; see class method add_data_in_folds()
self.add_data_in_folds()
# Standardize inputs in the folds for better ML performance; see class method standardize_folds_inputs()
self.standardize_folds_inputs()
"""
Now after having inputs and outputs in both folds, we update RMSE.
As of now, we have not extracted any feature.
Hence, we consider a base model i.e., one that spits out mean of its training target.
"""
# predictions of base model over fold 1 is a constant; mean of target variable in fold 2
# predictions of base model over fold 2 is a constant; mean of target variable in fold 1
# updating RMSE based on this logic.
self.folds[1]._update(rmse=rmse(np.abs(self.folds[1].output - self.folds[2].output.mean()), 0))
self.folds[2]._update(rmse=rmse(np.abs(self.folds[2].output - self.folds[1].output.mean()), 0))
def eval_fold(self, eval_fold_number, features):
"""
This function evaluates a fold specified by eval_fold_number based on features
and returns RMSE
"""
"fold 1 is evaluated by training over fold 2 and evaluating over fold 1"
train_fold = 1 if eval_fold_number == 2 else 2
test_fold = 2 if eval_fold_number == 2 else 1
model = self.model
# training data from train_fold
X, Y = self.folds[train_fold].input[features], self.folds[train_fold].output
# learning
model.fit(X, Y)
# test data
test_X, test_Y = self.folds[test_fold].input[features], self.folds[test_fold].output
# prediction
test_predict = model.predict(test_X)
# evaluate predictions and compute rmse
tmp_rmse = rmse(test_Y, test_predict)
return tmp_rmse
def is_new_feature_good(self, features):
"""
This function evaluates fold 1 and fold 2 with features
and determines if features leads to better performance
compared to extracted best features.
"""
# class method eval_fold() is used to evaluate a fold and returns RMSE. see eval_fold()
rmse_1 = self.eval_fold(1, features)
rmse_2 = self.eval_fold(2, features)
# Construct an RMSE object comprising RMSEs of folds resulted from current features.
RMSE = self.RMSEFolds(rmse_1, rmse_2)
## Construct an RMSE object comprising RMSEs of folds resulted best features obtained so far
RMSE_current = self.RMSEFolds(self.folds[1].rmse, self.folds[2].rmse)
result = False
"""if RMSE is better than RMSE_current, then set of variables in features are better
than ones in best features extracted so far
"""
if RMSE < RMSE_current:
# do advanced analysis on residuals.
result = True
return result, RMSE
def add_var(self):
"""
This method search for the variable; if such a variable exists that when
added can improve performance
"""
#
# We define best_RMSE to be RMSE reached in previous iteration.
best_RMSE = self.RMSEFolds(self.folds[1].rmse, self.folds[2].rmse)
# Initially a None, best_var indicates the candidate variable that can be included.
best_var = None # maintain the best variable found in this iteration
# Looping over the candidate variables
for col in self.input_variables:
# candidate variable should not be already in the best extracted features.
if col not in self.extracted_features:
# make a temporary list of features by adding candidate feature to the existing best features.
tmp_features = self.extracted_features + [col]
"""
Evaluate the goodness of candidate feature;
see class method is_new_feature_good() for further details.
is_good=True indicates that the candidate variable can improve the performance.
"""
is_good, RMSE = self.is_new_feature_good(tmp_features)
# Update the best_var if is_good=True and RMSE is better than best RMSE so far.
if (is_good and (RMSE < best_RMSE)):
best_RMSE = RMSE
best_var = col
# If we find a variable that can improve performance.
if best_var is not None:
print('adding_variable: {}'.format(best_var))
self.extracted_features.append(best_var)
# Update rmse and residuals
self.folds[1]._update(rmse=best_RMSE.fold_1)
self.folds[2]._update(rmse=best_RMSE.fold_2)
# If we cannot find a variable that can improve performance.
else:
# Turning this to True stops feature extraction.
self.stop_feature_extraction = True
# print('new features cannot be added')
def extract_features(self):
"""
This function runs feature extraction routines until either
the maximum allowed features is reached or when none of the
variables can improve the performance by getting added.
"""
for _ in range(self.max_features_to_extract):
# running until stop_feature_extraction=False
if not self.stop_feature_extraction:
# add_var() is a function that looks for best variable to add; see method add_var() for details.
self.add_var()
return self.extracted_features
def extract_model_features(model, df, max_features=5):
"""
This function extracts features in the dataframe df based on specified model;
this function initializes instance of class IterVarModel with specified model
and the number of features to extract.
It uses class method extract_features() to extract best features and returns
these features
"""
# class instance.
tmp = IterVarModel(df, model, max_features)
return tmp.extract_features()
def bagging_feature_extraction(model, df, max_features):
"""
This function firstly defines a bagging model with specified base model
and uses this bagging model to extract features using function extract_model_features()
Bagging helps shortlisting best features and avoids unstable features.
base model is subsequently used over the extracted features to extract features.
"""
# Bagging model with each estimator utilizing 60% of the data; adding some randomness.
bagging_model = BaggingRegressor(base_estimator=model, max_samples=0.6, random_state=25)
# Extract bagging features
print('-'*18)
print('Bagging Features')
print('-'*18)
bagging_features = extract_model_features(bagging_model, df, max_features=15) # 15
# Modify the dataframe to include only the bagging features.
features_to_retain = bagging_features + ['sales_volume']
df = df[features_to_retain]
# feature extraction from the remaining features using base model
print('-'*18)
print('Final Features')
print('-'*18)
base_model_features = extract_model_features(model, df, max_features=max_features)
return base_model_features
def feature_extraction(models, df, max_features = 5, precede_bagging=False):
"""
This function uses multiple base models; stored in dictionary models and runs feature extract for each.
It may or may not include bagging based on precede_bagging.
The record for each model is stored in a namedlist.
The function returns a list of namedlists; each holding a record for a model.
"""
# A named list to maintain data for each model
"""
namedlist is a factory function for creating mutable collections of list items;
it is similar to python's list but enables us to name each component and access using
dot notation.
"""
Model = namedlist('Model', 'name sklearn_form extracted_features')
# a list of model features is created in which each component correspond to a model.
# list to hold namedlists.
models_features = []
# iterate over each model in models
for model_name, model in models.items():
print('\n')
print('*'*18)
print(model_name)
print('*'*18)
# If the features list should be made smaller with bagging
if precede_bagging:
tmp_features = bagging_feature_extraction(model, df, max_features)
else:
tmp_features = extract_model_features(model, df, max_features=max_features)
# creating namedlist to store records for the model.
tmp_model = Model(name=model_name, sklearn_form=model, extracted_features=tmp_features)
# adding record to the list
models_features.append(tmp_model)
return models_features
if __name__ == '__main__':
print('This file is not run as a module') | waqasbukhari/optimal_pos_placement | best_feature_extraction.py | best_feature_extraction.py | py | 18,314 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": ... |
34736461093 | # -*- coding: utf-8 -*-
import scrapy
import re
import json
from video_scrapy.items import *
import hashlib
from video_scrapy.settings import my_defined_urls
class YoutubeDlSpider(scrapy.Spider):
name = 'video'
youtube_dl_not_you_get = False
handle_httpstatus_list = [404]
def __init__(self, my_url=None, my_playlist=False,*args, **kwargs):
super(YoutubeDlSpider, self).__init__(*args, **kwargs)
if my_url is None:
self.start_urls = []
else:
self.start_urls = ["%s"%my_url]
if isinstance(my_playlist, str):
if my_playlist=="True":
my_playlist=True
else:
my_playlist=False
self.get_playlist=my_playlist
def start_requests(self):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
if len(self.iqiyi_id)==0:
if self.youtube_dl_not_you_get:
parameter = ['-g', "--rm-cache-dir"]
for i in self.start_urls:
if "bilibili" in i:
yield scrapy.Request(url=i, callback=self.bili_parse)
else:
parameter.append(i)
if len(parameter) == 2:
pass
else:
# from video_scrapy.youtube_dl.YoutubeDL import my_defined_urls
from video_scrapy.youtube_dl import main
print("waiting for youtube_dl get urls")
main(parameter)
print("get youtube_dl urls")
for i in my_defined_urls:
my_url_dict = my_defined_urls[i]
for j in my_url_dict:
name = str(j).rsplit(".", 1)[0]
filetype = str(j).rsplit(".", 1)[-1]
yield scrapy.Request(url=my_url_dict[j], callback=self.savefile, meta={"name": name, "filetype": filetype, "fileid": None, "id": None, "end": None})
else:
iqiyi_url = []
for i in self.start_urls:
if "bilibili" in i:
yield scrapy.Request(url=i, callback=self.bili_parse)
self.start_urls.remove(i)
elif "iqiyi.com" in i:
iqiyi_url.append(i)
self.start_urls.remove(i)
if len(iqiyi_url) == 0:
pass
else:
for i in self.iqiyi_url_process(iqiyi_url):
yield i
if len(self.start_urls) == 0:
pass
else:
from video_scrapy.you_get.common import main
# from video_scrapy.you_get import common
print("waiting for you_get get urls")
main(my_own_urls=self.start_urls,
my_own_playlist=self.get_playlist)
print("get you_get urls finish")
if "error" in my_defined_urls:
print(
"can't get urls for some videos,please look at error.txt for more information!")
error = my_defined_urls.pop("error")
import datetime
nowTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
with open("error.txt", "a") as f:
f.write('\n')
f.write(str(nowTime))
f.write('\n')
f.write("\n".join(error))
for i in my_defined_urls:
my_url_dict = my_defined_urls[i]
name = i
filetype = my_url_dict.pop("filetype")
end_id = len(my_url_dict)
if end_id == 1:
url = my_url_dict.popitem()[1]
filetype = re.search(r"\.(\w+?)\?", url).group(1)
if filetype == "m3u8":
yield scrapy.Request(url=url, callback=self.parse_m3u8, meta={"name": name})
else:
for j in my_url_dict:
if int(j) == int(end_id):
end = True
else:
end = False
yield scrapy.Request(url=my_url_dict[j], callback=self.savefile, meta={"name": name, "filetype": filetype, "fileid": j, "id": None, "end": end})
else:
pass
def check_iqiyi_has_error(self, name):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
print(self.iqiyi_id)
self.iqiyi_id[name].setdefault("get_num", 0)
if "send_num" in self.iqiyi_id[name]:
if int(self.iqiyi_id[name]["send_num"]) == int(self.iqiyi_id[name]["get_num"]):
if len(self.iqiyi_id[name].setdefault("error", [])) == 0:
pass
else:
self.iqiyi_id[name]["get_num"] = 0
self.iqiyi_id[name]["error_num"] = len(
self.iqiyi_id[name]["error"])
self.iqiyi_id[name].pop("send_num")
return True
return False
def iqiyi_url_process(self, my_iqiyi_url):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
print("waiting for you_get get iqiyi_urls")
from video_scrapy.you_get.common import main
for iqiyi_url in my_iqiyi_url:
iqiyi_url = [iqiyi_url]
main(my_own_urls=iqiyi_url,
my_own_playlist=self.get_playlist)
print("get iqiyi_urls finish")
if "error" in my_defined_urls:
print(
"can't get urls for some videos,please look at error.txt for more information!")
error = my_defined_urls.pop("error")
import datetime
nowTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
with open("error.txt", "a") as f:
f.write('\n')
f.write(str(nowTime))
f.write('\n')
f.write("\n".join(error))
my_temp = list(my_defined_urls.keys())
for i in my_temp:
my_url_dict = my_defined_urls[i]
name = str(i)
self.iqiyi_id.setdefault(name, {}).setdefault("url", iqiyi_url)
filetype = my_url_dict.pop("filetype")
end_id = len(my_url_dict)
if end_id == 1:
url = my_url_dict.popitem()[1]
filetype = re.search(r"\.(\w+?)\?", url).group(1)
if filetype == "m3u8":
yield scrapy.Request(url=url, callback=self.parse_m3u8, meta={"name": name, "iqiyi": None})
else:
for j in my_url_dict:
if int(j) == int(end_id):
end = True
else:
end = False
yield scrapy.Request(url=my_url_dict[j], callback=self.savefile, meta={"name": name, "filetype": filetype, "fileid": j, "id": None, "end": end})
my_defined_urls.pop(i)
def parse_m3u8(self, response):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
url = response.url
name = response.meta['name']
if isinstance(response.body, bytes):
page = response.body.decode('ascii')
else:
page = str(response.body)
file_line = page.split("\n")
if file_line[0] != "#EXTM3U":
raise BaseException("非M3U8的链接")
else:
unknow = True # 用来判断是否找到了下载的地址
i = 1
for index, line in enumerate(file_line):
if "EXTINF" in line:
unknow = False
if file_line[index + 1][0:4] == "http":
pd_url = file_line[index + 1]
else:
if file_line[index + 1][0] != '/':
pd_url = url.rsplit(
"/", 1)[0] + "/" + file_line[index + 1]
else:
pd_url = url.rsplit(
"/", 1)[0] + file_line[index + 1]
if "iqiyi" in response.meta:
if len(self.iqiyi_id.setdefault(name, {}).setdefault("error", [])) == 0:
if self.iqiyi_id.setdefault(name, {}).setdefault("error_num", 0) == 0:
yield scrapy.Request(pd_url, callback=self.savefile,
meta={'fileid': int(i), 'name': name, 'end': False, "id": None, "filetype": "ts", "iqiyi": None})
else:
pass
else:
if int(i) in self.iqiyi_id.setdefault(name, {}).setdefault("error", []):
yield scrapy.Request(pd_url, callback=self.savefile,
meta={'fileid': int(i), 'name': name, 'end': False, "id": None, "filetype": "ts", "iqiyi": None})
else:
pass
else:
yield scrapy.Request(pd_url, callback=self.savefile,
meta={'fileid': int(i), 'name': name, 'end': False, "id": None, "filetype": "ts"})
i = i + 1
if "ENDLIST" in line:
if "iqiyi" in response.meta:
if self.iqiyi_id.setdefault(name, {}).setdefault("error_num", 0) != 0:
self.iqiyi_id[name]["send_num"] = self.iqiyi_id[
name]["error_num"]
else:
self.iqiyi_id[name]["send_num"] = i - 1
if self.check_iqiyi_has_error(name):
for k in self.iqiyi_url_process(self.iqiyi_id[name]["url"]):
yield k
item = FileItem()
item["id"] = None
item['fileid'] = i
item['name'] = name
item['end'] = True
item['content'] = b''
item['filetype'] = 'ts'
yield item
if unknow:
raise BaseException("未找到对应的下载链接")
else:
print("下载请求完成 m3u8 %s" % name)
def parse(self, response):
pass
def bili_parse(self, response):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
if isinstance(response.body, bytes):
file = str(response.body.decode("utf8"))
else:
file = str(response.body)
temp = re.search(r"__INITIAL_STATE__=(\{.*\});\(fun", file, re.S)
temp = str(temp.group(1))
temp = json.loads(temp)
url = "https://www.kanbilibili.com/api/video/%d/download?cid=%d&quality=64&page=%d"
if "videoData" in temp:
videodata = temp['videoData']
pagelist = videodata['pages']
aid = videodata["aid"]
for item in pagelist:
page = item['page']
cid = item['cid']
name = item['part']
new_url = url % (int(aid), int(cid), int(page))
yield scrapy.Request(url=new_url, callback=self.bili_get_json, meta={"name": name, "id": page, "Referer": response.url})
else:
title = temp["mediaInfo"]["title"]
pagelist = temp["epList"]
name = str(title) + "%03d"
for item in pagelist:
aid = item["aid"]
cid = str(item["cid"])
page = item["index"]
access_id = int(item["episode_status"])
if access_id == 2:
if len(item["index_title"]) == 0:
new_name = name % (int(page))
else:
new_name = title + "_" + item["index_title"]
if "bangumi" in response.url:
secretkey = "9b288147e5474dd2aa67085f716c560d"
temp = "cid=%s&module=bangumi&otype=json&player=1&qn=112&quality=4" % (
str(cid))
sign_this = hashlib.md5(
bytes(temp + secretkey, 'utf-8')).hexdigest()
new_url = "https://bangumi.bilibili.com/player/web_api/playurl?" + \
temp + '&sign=' + sign_this
else:
new_url = url % (int(aid), int(cid), int(page))
yield scrapy.Request(url=new_url, callback=self.bili_get_json, meta={"name": new_name, "id": page, "Referer": response.url})
else:
pass
def bili_get_json(self, response):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
if isinstance(response.body, bytes):
temp_dict = json.loads(response.body.decode("utf8"))
else:
temp_dict = json.loads(str(response.body))
if "err" in temp_dict:
if temp_dict['err'] is None:
my_url_list = temp_dict["data"]["durl"]
filetype = temp_dict["data"]["format"][0:3]
end_id = len(my_url_list)
for i in my_url_list:
fileid = i["order"]
link_url = i["url"]
if int(fileid) == int(end_id):
end = True
else:
end = False
yield scrapy.Request(url=link_url, callback=self.savefile, headers={"Origin": "https://www.bilibili.com", "Referer": response.meta["Referer"]},
meta={"name": response.meta["name"], "id": response.meta["id"], "filetype": filetype, "fileid": fileid, "end": end})
else:
my_url_list = temp_dict["durl"]
filetype = temp_dict["format"][0:3]
end_id = len(my_url_list)
for i in my_url_list:
fileid = i["order"]
link_url = i["url"]
if int(fileid) == int(end_id):
end = True
else:
end = False
yield scrapy.Request(url=link_url, callback=self.savefile, headers={"Origin": "https://www.bilibili.com", "Referer": response.meta["Referer"]},
meta={"name": response.meta["name"], "id": response.meta["id"], "filetype": filetype, "fileid": fileid, "end": end})
def savefile(self, response):
try:
self.state
try:
if "iqiyi_id" in self.state:
self.iqiyi_id = self.state["iqiyi_id"]
else:
self.iqiyi_id
except:
self.iqiyi_id = {}
self.state["iqiyi_id"] = self.iqiyi_id
except:
self.iqiyi_id = {}
item = FileItem()
if response.meta['fileid'] is None and response.meta['end'] is None:
print("get %s" % (response.meta['name']))
item['fileid'] = None
item['end'] = None
else:
print("get %s__%d" %
(response.meta['name'], int(response.meta['fileid'])))
item['fileid'] = int(response.meta['fileid'])
item['end'] = response.meta['end']
if response.meta['id'] is None:
item['id'] = None
else:
item['id'] = int(response.meta['id'])
item['name'] = str(response.meta['name']).encode(
).translate(None, b'\\/:*?"<>|').decode()
item['filetype'] = response.meta['filetype']
if "iqiyi" in response.meta:
self.iqiyi_id[response.meta["name"]]["get_num"] = self.iqiyi_id[
response.meta["name"]].setdefault("get_num", 0) + 1
if int(response.status) == 404:
if int(response.meta['fileid']) in self.iqiyi_id[response.meta["name"]].setdefault("error", []):
pass
else:
self.iqiyi_id[response.meta["name"]].setdefault(
"error", []).append(int(response.meta["fileid"]))
else:
if int(response.meta["fileid"]) in self.iqiyi_id[response.meta["name"]].setdefault("error", []):
self.iqiyi_id[response.meta["name"]][
"error"].remove(int(response.meta["fileid"]))
item['content'] = response.body
yield item
if self.check_iqiyi_has_error(response.meta["name"]):
for i in self.iqiyi_url_process(self.iqiyi_id[response.meta["name"]]["url"]):
yield i
else:
item['content'] = response.body
yield item
| yllgl/my_video_scrapy | video_scrapy/spiders/video_spider.py | video_spider.py | py | 19,315 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "video_scrapy.youtube_dl.main",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "video_... |
41584655028 | import json
import time
import requests
# 크롤링 대상 URL 리스트
PAGE_URL_LIST = [
'http://example.com/1.page'
'http://example.com/2.page',
'http://example.com/3.page',
]
def fetch_pages():
"""페이지의 내용을 추출합니다"""
# 처리 기록 전용 로그 파일을 append 모드로 엽니다
f_info_log = open('crawler_info.log', 'a')
# 오류 기록 전용 로그 파일을 append 모드로 엽니다
f_error_log = open('crawler_error.log', 'a')
# 추출 내용을 저장할 딕셔너리
page_contents = {}
# 터미널에 처리 시작을 출력하고, 로그 파일에도 메시지를 출력합니다.
msg = "크롤링을 시작합니다\n"
print(msg)
f_info_log.write(msg)
for page_url in PAGE_URL_LIST:
r = requests.get(page_url, timeout=30)
try:
r.raise_for_status() # 응답에 문제가 있으면 예외를 발생시킵니다.
except requests.exceptions.RequestException as e:
# requests와 관련된 예외가 발생하면
# 터미널과 오류 로그에 오류를 출력합니다.
msg = "[ERROR] {exception}\n".format(exception=e)
print(msg)
f_error_log.write(msg)
continue # 예외가 발생하면 반복을 중지하는게 아니라 건너 뜁니다.
# 정상적으로 내용을 추출했다면 딕셔너리에 내용을 저장합니다.
page_contents[page_url] = r.text
time.sleep(1) # 상대 사이트에 대한 부하를 고려해서 요청 간격을 설정합니다.
f_info_log.close()
f_error_log.close()
return page_contents
if __name__ == '__main__':
page_contents = fetch_pages()
f_page_contents = open('page_contents.json', 'w')
json.dump(page_contents, f_page_contents, ensure_ascii=False)
f_page_contents.close()
| JSJeong-me/2021-K-Digital-Training | Web_Crawling/python-crawler/chapter_5/get_example_domain_pages.3.py | get_example_domain_pages.3.py | py | 1,939 | python | ko | code | 7 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "json.dump",
"lin... |
42641405385 | #!/usr/bin/env python3
# Simple Script to replace cron for Docker
import argparse
import sys
from subprocess import CalledProcessError, run
from time import sleep, time
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("interval", help="Time in seconds between runs", type=int)
args = parser.parse_args()
print("Running gem mirror every %ds" % args.interval, file=sys.stderr)
try:
while True:
start_time = time()
try:
run(['/tmp/run.sh'], check=True)
except CalledProcessError as cpe:
return cpe.returncode
run_time = time() - start_time
if run_time < args.interval:
sleep_time = args.interval - run_time
print("Sleeping for %ds" % sleep_time, file=sys.stderr)
sleep(sleep_time)
except KeyboardInterrupt:
pass
return 0
if __name__ == "__main__":
sys.exit(main())
| osssanitizer/maloss | registries/rubygems/runner.py | runner.py | py | 985 | python | en | code | 98 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
... |
33120873994 | import json
import re
from typing import Any
from aiohttp import ClientSession
from .exceptions import FailedToParseIntialData
class Client:
"""YouTube API client."""
_session: ClientSession
@classmethod
async def new(cls, host: str = "https://www.youtube.com"):
"""Create a new YouTube client."""
self = cls()
self._session = ClientSession(base_url=host, raise_for_status=True)
return self
async def get_search_results(self, search_query: str) -> str:
"""Get YouTube search results."""
async with self._session.get(
"/results", params={"search_query": search_query}
) as response:
return await response.text()
async def close(self) -> None:
"""Close client session."""
await self._session.close()
def get_initial_data(search_results: str) -> dict[str, Any]:
"""Get YouTube initial data."""
initial_data_regex = re.compile(r"(var\ ytInitialData\ =\ )(.*);</script><script")
match = initial_data_regex.search(search_results)
if not match:
raise FailedToParseIntialData
return json.loads(match.group(2))
| Flowrey/youtube-bz | youtube_bz/api/youtube/api.py | api.py | py | 1,161 | python | en | code | 12 | github-code | 6 | [
{
"api_name": "aiohttp.ClientSession",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "aiohttp.ClientSession",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "exceptions.Fai... |
34529799093 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='quantile_transformer_tf',
version='1.2',
description='An implementation of QuantileTransformer in tensorflow',
long_description=long_description,
url='https://github.com/yandexdataschool/QuantileTransformerTF',
author='Nikita Kazeev',
author_email='nikita.kazeev@cern.ch',
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
],
packages=find_packages(),
install_requires=['numpy>=1.13',
'tensorflow>=1.9']
)
| yandexdataschool/QuantileTransformerTF | setup.py | setup.py | py | 905 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number":... |
71184167229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pl
from mpl_toolkits.basemap import Basemap
# llcrnrlat,llcrnrlon,urcrnrlat,urcrnrlon
# are the lat/lon values of the lower left and upper right corners
# of the map.
# lat_ts is the latitude of true scale.
# resolution = 'c' means use crude resolution coastlines.
pl.clf()
m = Basemap(projection='merc', llcrnrlat=-80, urcrnrlat=80, \
llcrnrlon=-180, urcrnrlon=180, lat_ts=20, resolution='c')
m.drawcoastlines()
m.fillcontinents(color='coral', lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-90., 91., 30.))
m.drawmeridians(np.arange(-180., 181., 60.))
m.drawmapboundary(fill_color='aqua')
m.drawstates()
pl.title("Mercator Projection")
pl.show()
pl.savefig('basemap.png') | ddboline/programming_tests | numpy/basemap_test.py | basemap_test.py | py | 847 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "mpl_toolkits.ba... |
33557423336 | from django.conf import settings
from django.contrib.sites.models import Site
from .models import SiteSettings
class SiteSettingsMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
try:
request.wf_site = Site.objects._get_site_by_request(request)
settings.SITE_ID = request.wf_site.id
except Site.DoesNotExist:
request.wf_site = None
try:
request.sitesettings = SiteSettings.objects.get(site=request.wf_site)
except SiteSettings.DoesNotExist:
request.sitesettings = None
return self.get_response(request)
| phildini/wordfugue | wordfugue/sitesettings/middleware.py | middleware.py | py | 684 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.sites.models.Site.objects._get_site_by_request",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.contrib.sites.models.Site.objects",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.sites.models.Site",
... |
17435637259 | import pytest
from collections import defaultdict
class UndergroundSystem:
def __init__(self):
self.user_table = defaultdict(list)
self.course_table = defaultdict(list)
def checkIn(self, id: int, stationName: str, t: int) -> None:
assert len(self.user_table[id]) == 0
self.user_table[id].append((stationName, t))
def checkOut(self, id: int, stationName: str, t: int) -> None:
assert len(self.user_table[id]) == 1
source = self.user_table[id].pop()
travel_t = t - source[1]
course = f"{source[0]}_{stationName}"
self.course_table[course].append(travel_t)
def getAverageTime(self, startStation: str, endStation: str) -> float:
course = f"{startStation}_{endStation}"
travel_ts = self.course_table[course]
return round(sum(travel_ts) / len(travel_ts), 5)
@pytest.mark.parametrize(
"action, value, expected",
[
(
[
"UndergroundSystem",
"checkIn",
"checkIn",
"checkIn",
"checkOut",
"checkOut",
"checkOut",
"getAverageTime",
"getAverageTime",
"checkIn",
"getAverageTime",
"checkOut",
"getAverageTime",
],
[
[],
[45, "Leyton", 3],
[32, "Paradise", 8],
[27, "Leyton", 10],
[45, "Waterloo", 15],
[27, "Waterloo", 20],
[32, "Cambridge", 22],
["Paradise", "Cambridge"],
["Leyton", "Waterloo"],
[10, "Leyton", 24],
["Leyton", "Waterloo"],
[10, "Waterloo", 38],
["Leyton", "Waterloo"],
],
[
None,
None,
None,
None,
None,
None,
None,
14.00000,
11.00000,
None,
11.00000,
None,
12.00000,
],
),
(
[
"UndergroundSystem",
"checkIn",
"checkOut",
"getAverageTime",
"checkIn",
"checkOut",
"getAverageTime",
"checkIn",
"checkOut",
"getAverageTime",
],
[
[],
[10, "Leyton", 3],
[10, "Paradise", 8],
["Leyton", "Paradise"],
[5, "Leyton", 10],
[5, "Paradise", 16],
["Leyton", "Paradise"],
[2, "Leyton", 21],
[2, "Paradise", 30],
["Leyton", "Paradise"],
],
[None, None, None, 5.00000, None, None, 5.50000, None, None, 6.66667],
),
],
)
def test(action, value, expected):
print()
outputs = []
obj = None
for act, values in zip(action, value):
if act == "UndergroundSystem":
obj = UndergroundSystem()
outputs.append(None)
elif act == "checkIn":
outputs.append(obj.checkIn(values[0], values[1], values[2]))
elif act == "checkOut":
outputs.append(obj.checkOut(values[0], values[1], values[2]))
elif act == "getAverageTime":
outputs.append(obj.getAverageTime(values[0], values[1]))
assert expected == outputs
if __name__ == "__main__":
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
| naubull2/codingtests | leetcode/solved/1512_Design_Underground_System/solution.py | solution.py | py | 3,711 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": ... |
6182705632 | import urllib.request
from bs4 import BeautifulSoup
f = open("Newyork_articles.txt", 'w')
html = 'http://www.nytimes.com/'
open_url = urllib.request.urlopen(html)
soup = BeautifulSoup(open_url, 'html.parser')
article_headings = soup.find_all(class_="indicate-hover")
head = "Articles for Today:\n"
i = 0
f.write(head)
for heading in article_headings:
i += 1
f.write("\n"+str(i)+"."+heading.string+"\n")
f.close()
| Jsid2022/Python | decode_web_page.py | decode_web_page.py | py | 437 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.request.request.urlopen",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 6,
"usage_type": "name"
},
{
"api_name":... |
19109148247 | import asyncio
import async_timeout
import aiohttp
class DiscordAttachmentHandler:
def __init__(self):
self.loop = asyncio.get_event_loop()
@staticmethod
async def fetch_json(session, url):
async with async_timeout.timeout(10):
async with session.get(url) as response:
return await response.json()
async def get_content(self, url):
async with aiohttp.ClientSession() as session:
return await self.fetch_json(session, url)
def get_ship_list(self, file_url, logger):
try:
return self.loop.run_until_complete(self.get_content(file_url))
except asyncio.TimeoutError:
logger.error("Could not download attachment. Asyncio timeout error.")
| Mirdalan/discord_astro_bot | dastro_bot/attachments_downloader.py | attachments_downloader.py | py | 762 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "asyncio.get_event_loop",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "async_timeout.timeout",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "aiohttp.ClientSession",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "asy... |
39215055594 | import boto3
import random
import json
topic = 'arn:aws:sns:us-east-1:511086078676:temps'
client = boto3.client('sns')
def lambda_handler(event, context):
#id de la persona (editar con el id real)
#id = '11223344'
#simulador de aparato de medir la temperatura
temp = random.uniform(36,40)
print(temp)
mensaje = {
#'id' : id,
'temp' : '%.2f' % round(temp, 2)
}
#publica mensaje en el topic
response = client.publish(
TopicArn = topic,
Message = json.dumps(mensaje)
)
print(response)
| ansmartin/Proyecto-PYGITIC-2020 | Pruebas/Ansel/temp_to_topic.py | temp_to_topic.py | py | 598 | python | es | code | 1 | github-code | 6 | [
{
"api_name": "boto3.client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 25,
"usage_type": "call"
}
] |
70400872827 | """
Snake game view.
"""
import sys
import pygame
from snake_controller import get_mouse_position
def play_eaten_sound():
"""
Plays a crunch sound.
"""
food_eaten_sound = pygame.mixer.Sound('sounds/snake_eat_sound.wav')
pygame.mixer.Sound.play(food_eaten_sound)
def play_click_sound():
"""
Plays a click sound
"""
click_sound = pygame.mixer.Sound('sounds/click_sound.wav')
pygame.mixer.Sound.play(click_sound)
class View():
"""
Pygame based view of a snake game.
Attributes:
_START_MENU: A png image of the start menu.
_END_MENU: A png image of the end menu.
_board: A Board instance representing the snake game to
display.
_screen: A display surface representing the window to display
the rest of the game components on.
_head_image: A png image representing the head of the snake.
_start_menu_surface: A surface representing the start menu.
_end_menu_surface: A surface representing the end menu.
_start_menu_rect: A rect representing the start menu.
_end_menu_rect: A rect representing the end menu.
"""
_START_MENU = pygame.image.load('images/start_menu.png')
_END_MENU = pygame.image.load('images/end_menu.png')
def __init__(self, board):
"""
Create a new view of a snake game.
Args:
_board: A Board instance representing the snake game to
display.
_screen: A display surface representing the window to display
the rest of the game components on.
_head_image: A png image representing the head of the snake.
_start_menu_surface: A surface representing the start menu.
_end_menu_surface: A surface representing the end menu.
_start_menu_rect: A rect representing the start menu.
_end_menu_rect: A rect representing the end menu.
"""
self._board = board
self._screen = pygame.display.set_mode((self._board.LENGTH + \
self._board.BORDER_WIDTH,self._board.HEIGHT+self._board.BORDER_WIDTH))
self._head_image = pygame.image.load('images/snake_left.png')
self._start_menu_surface = pygame.display.set_mode((self._board.LENGTH + \
self._board.BORDER_WIDTH,self._board.HEIGHT+self._board.BORDER_WIDTH))
self._end_menu_surface = pygame.display.set_mode((self._board.LENGTH + \
self._board.BORDER_WIDTH,self._board.HEIGHT+self._board.BORDER_WIDTH))
self._start_menu_rect = self._START_MENU.get_rect(center = (300,300))
self._end_menu_rect = self._END_MENU.get_rect(center = (300,300))
def get_head_image(self):
"""
Gets the correct image for the snake head.
Gets the correct orientation of the snake head based on
the snake's head position in relation to its body.
"""
head_up = pygame.image.load('images/snake_up.png')
head_down = pygame.image.load('images/snake_down.png')
head_right = pygame.image.load('images/snake_right.png')
head_left = pygame.image.load('images/snake_left.png')
# figures out what image of the head to use based on
# the relative position of the body and head
head_orientation_x = self._board.snake.coordinates[1][0] - \
self._board.snake.coordinates[0][0]
head_orientation_y = self._board.snake.coordinates[1][1] - \
self._board.snake.coordinates[0][1]
if head_orientation_x == self._board.snake.GRID_SIZE and head_orientation_y == 0:
self._head_image = head_left
elif head_orientation_x == -self._board.snake.GRID_SIZE and head_orientation_y == 0:
self._head_image = head_right
elif head_orientation_x == 0 and head_orientation_y == self._board.snake.GRID_SIZE:
self._head_image = head_up
elif head_orientation_x == 0 and head_orientation_y == -self._board.snake.GRID_SIZE:
self._head_image = head_down
def draw(self, ate_potion):
"""
Display a representation of the snake game.
"""
pygame.init()
pygame.mixer.init()
pygame.display.set_caption("Ultimate Snake Game")
icon = pygame.image.load('images/snake_icon.png')
pygame.display.set_icon(icon)
self._screen.fill('white')
self.draw_apple()
self.draw_border()
self.draw_potion()
self.draw_speed()
if ate_potion:
self.draw_invisible_snake()
elif not ate_potion:
self.draw_snake()
self.draw_menus()
self.draw_score()
pygame.display.update()
def draw_apple(self):
"""
Displays the apple item.
"""
# blit an image of an apple for the food
apple_image = pygame.image.load('images/apple.png').convert_alpha()
apple_rect = apple_image.get_rect()
apple_rect.x = self._board.food.item_location[0]
apple_rect.y = self._board.food.item_location[1]
self._screen.blit(apple_image, apple_rect)
def draw_potion(self):
"""
Displays the potion item.
"""
# blit an image of the potion for the invisibility potion
potion_image = pygame.image.load('images/potion.png').convert_alpha()
potion_rect = potion_image.get_rect()
potion_rect.x = self._board.potion.item_location[0]
potion_rect.y = self._board.potion.item_location[1]
self._screen.blit(potion_image, potion_rect)
def draw_speed(self):
"""
Draws the speed boost item.
"""
# blit an image of the speed boost for the speed boost item
lightning_image = pygame.image.load(
'images/lightning.png').convert_alpha()
lightning_rect = lightning_image.get_rect()
lightning_rect.x = self._board.speed_boost.item_location[0]
lightning_rect.y = self._board.speed_boost.item_location[1]
self._screen.blit(lightning_image, lightning_rect)
def draw_snake(self):
"""
Displays the snake head and body.
"""
# get head image and blit
self.get_head_image()
head_rect = self._head_image.get_rect()
head_rect.x = self._board.snake.coordinates[0][0]
head_rect.y = self._board.snake.coordinates[0][1]
self._screen.blit(self._head_image, head_rect)
# get surface for each snake body chunk, and blit each one
for segment in self._board.snake.coordinates[1:]:
surface = pygame.Surface((30, 30))
surface.fill(pygame.Color('blue'))
segment_rect = surface.get_rect()
segment_rect.x = segment[0]
segment_rect.y = segment[1]
self._screen.blit(surface, segment_rect)
def draw_invisible_snake(self):
"""
Displays invisible snake.
Draws the invisible snake by not displaying anything.
"""
def draw_border(self):
"""
Displays the border frame around the screen.
"""
# create frame around the game window
# top line
pygame.draw.rect(self._screen, (169, 169, 169), [
0, 0, self._board.LENGTH, self._board.BORDER_WIDTH*2])
# bottom line
pygame.draw.rect(self._screen, (169, 169, 169), [
0, self._board.HEIGHT, self._board.LENGTH, self._board.BORDER_WIDTH])
# left line
pygame.draw.rect(self._screen, (169, 169, 169), [
0, 0, self._board.BORDER_WIDTH, self._board.HEIGHT])
# right line
pygame.draw.rect(self._screen, (169, 169, 169), [
self._board.LENGTH, 0, self._board.BORDER_WIDTH, self._board.LENGTH +
self._board.BORDER_WIDTH])
def draw_score(self):
"""
Displays the score.
"""
# display score
score = str(self._board.score)
font = pygame.font.SysFont(None, 60)
score_text = font.render(f'Score: {score}', True, 'black')
self._screen.blit(score_text, (30, 10))
pygame.display.update()
def draw_start_menu(self):
"""
Displays the start menu.
"""
self._start_menu_surface.blit(self._START_MENU, self._start_menu_rect)
def draw_game_over(self):
"""
Displays the game over menu.
"""
self._end_menu_surface.blit(self._END_MENU, self._end_menu_rect)
def draw_menus(self):
"""
Draws each menu as needed.
"""
# Draws starts screen.
while self._board.start_game is False:
self.draw_start_menu()
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# Will exit start screen if mouse cursor clicks on start button.
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if self._start_menu_rect.collidepoint(get_mouse_position()):
play_click_sound()
self._board.start_game = True
# Draws game over screen.
while self._board.game_over is True:
self._screen.fill('white')
self.draw_game_over()
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# Will trigger new game if mouse cursor clicks on restart button.
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if self._end_menu_rect.collidepoint(get_mouse_position()):
play_click_sound()
self._board.new_game = True
break
| olincollege/ultimate-snake | snake_view.py | snake_view.py | py | 9,976 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.mixer.Sound",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound.play",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.m... |
4601203522 | import subprocess
import py_compile
import os
from hashlib import md5
from datetime import datetime
from django.db import models
from django.conf import settings
from .constants import TYPES
def get_filename(instance, filename):
now = datetime.now()
base = now.strftime('utility_files/%Y/%m/%d')
hash = md5(str(now).encode('utf-8'))
return os.path.join(base, '{}_{}'.format(hash.hexdigest(), filename))
class Utility(models.Model):
class Meta:
verbose_name = 'verktyg'
verbose_name_plural = 'verktyg'
def __str__(self):
return self.name
name = models.CharField('namn', max_length=100)
description = models.CharField('beskrivning', blank=True, null=True, max_length=100)
type = models.CharField('typ', choices=TYPES, max_length=100)
class PythonUtility(Utility):
class Meta:
verbose_name = 'pythonverktyg'
verbose_name_plural = verbose_name
def execute(self):
filename = self.code.name
compiled_filename = filename + 'c'
path = os.path.join(settings.MEDIA_ROOT, filename)
compiled_path = os.path.join(settings.MEDIA_ROOT, compiled_filename)
utility_root = os.path.join(settings.MEDIA_ROOT, 'utility_files')
for root, dirs, files in os.walk(utility_root):
if not compiled_filename in files:
try:
# Append file at path to 'utility_files/base.py' and then compile the resulting file
py_compile.compile(path, cfile=compiled_path, doraise=True)
except py_compile.PyCompileError:
return None
return compiled_filename
code = models.FileField('pythonkod', upload_to=get_filename, blank=True, null=True, help_text='En .py-fil med interpreterbar Pythonkod.')
class CommandLineUtility(Utility):
class Meta:
verbose_name = 'kommandoradsverktyg'
verbose_name_plural = verbose_name
def execute(self):
cmd = self.command_line.split()
output = None
try:
output = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
except subprocess.CalledProcessError as e:
return {
'success': False,
'exit_status': e.returncode,
'output': e.output,
}
return {
'success': True,
'exit_status': 0,
'output': output,
}
command_line = models.CharField('kommandorad', max_length=100) | viljan/intraweb | viljan/utility/models.py | models.py | py | 2,595 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "hashlib.md5",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
... |
14011684325 | import numpy as np
from tqdm import tqdm
from collections import Counter
import pandas as pd
class PMI():
def __init__(self, text, lang):
self.text = text
self.lang = lang
self.p_1, self.c_1 = self.get_unigram_probs()
self.bigram = self.get_joint_probs()
self.distant = self.get_distant_probs()
def get_unigram_probs(self):
''' compute all unigram probabilities '''
text_size = len(self.text)
counter_1 = Counter(self.text)
# p_1(w_i) = c_1(w_i)/T
p_1 = {word: count/text_size for word, count in counter_1.items()}
return p_1, counter_1
def get_joint_probs(self):
'''
calculate joint probability p(a,b)
for all neighbouring words in text
'''
counts = Counter()
# get counts of pairs of words
for i, j in zip(self.text, self.text[1:]):
counts[i, j] += 1
total = sum(counts.values())
# probability based on counts
joint_probs = {k: v/total for k, v in counts.items()}
return joint_probs
def get_distant_probs(self):
'''
calculate joint probability p(a,b) for all words
with distance at most 50 in either direction
'''
counts = Counter()
for (i,a) in tqdm(enumerate(self.text)):
if i < 50: ran, start = self.text[:i+50], 0
elif i > len(self.text) - 50: ran, start = self.text[i-50:], i-50
else: ran, start = self.text[i-50:i+50], i-50
# ran, start = self.text[:i+50], i
for (j,b) in enumerate(ran, start):
if i != j:
counts[a, b] += 1
total = sum(counts.values())
# probability based on counts
joint_probs = {k: v/total for k, v in counts.items()}
return joint_probs
def pmi(self, a, b, probs):
'''
I'(a,b) = log_2(p(a,b)/p(a)p(b)) = log_2(p(a|b)/p(a))
'''
joint_prob = probs[a, b]
p_a = self.p_1[a]
p_b = self.p_1[b]
return np.log2(joint_prob/(p_a * p_b))
def write_to_file(self, label, results):
''' save results '''
with open('pmi_results'+self.lang+'.txt', 'a') as f:
for (a, b), pmi in reversed(results):
f.write('%s\t%s\t%s\t%1.4f\n' % (label, a, b, pmi))
def run(self):
results = {}
for (i,a),(j,b) in zip(enumerate(self.text),enumerate(self.text[1:],1)):
# disregard pairs in which one or both words
# appear less than 10 times in the corpus
if self.c_1[a] >= 10 and self.c_1[b] >= 10:
results[a, b] = self.pmi(a, b, self.bigram)
sorted_results = sorted(results.items(), key=lambda x:x[1])
self.write_to_file('bigram', sorted_results)
results = {}
for (i,a) in enumerate(self.text):
if i < 50: ran, start = self.text[:i+50], 0
elif i > len(self.text) - 50: ran, start = self.text[i-50:], i-50
else: ran, start = self.text[i-50:i+50], i-50
for (j,b) in enumerate(ran, start):
if i != j:
if self.c_1[a] >= 10 and self.c_1[b] >= 10:
results[a, b] = self.pmi(a, b, self.distant)
sorted_results = sorted(results.items(), key=lambda x:x[1])
self.write_to_file('distant', sorted_results)
print(list(reversed(sorted_results[-50:])))
if __name__ == '__main__':
for lang in ['EN', 'CZ']:
with open('TEXT'+lang+'1.txt', 'r', encoding='iso-8859-2') as f:
text = [word.strip() for word in f.readlines()]
pmi = PMI(text, lang)
# prepare results file
with open('pmi_results'+lang+'.txt', 'w') as f:
f.write('WORD1\tWORD2\tPMI\n')
# run experiments
results = pmi.run()
| awmcisaac/charles | winter/npfl067/hw2/best_friends.py | best_friends.py | py | 3,872 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.Counter",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm... |
70497736189 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 25 20:15:51 2021
@author: blgnm
"""
import george
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import minimize
from scipy.optimize import curve_fit
import random
from astropy.stats import biweight_location
import numpy as np
import pandas as pd
from imblearn.over_sampling import SMOTE
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import sklearn
import matplotlib.pyplot as plt
import pywt
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from antares_client.search import search
from antares_client._api.models import Locus
from antares_client.search import get_by_ztf_object_id
from tqdm import tqdm
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from imblearn.pipeline import Pipeline as imbpipeline
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import BorderlineSMOTE
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets, metrics, model_selection, svm
from sklearn.model_selection import RepeatedStratifiedKFold
from imblearn.ensemble import BalancedRandomForestClassifier
class ZTFData:
def __init__(self,ZTF_id = None):
self.ZTF_id = ZTF_id
def get_id(self):
return self.ZTF_id
def get_raw_data(self):
for i in self.ZTF_id:
locus = get_by_ztf_object_id(i)
return locus.lightcurve
def get_lightcurve(self):
"""
Returns
-------
Light Curve of all ztf objects stored within ZTFData()
"""
for i in self.ZTF_id:
plt.figure(num=None, figsize=(12, 5), dpi=100)
locus = get_by_ztf_object_id(i)
lc = locus.lightcurve
lc['alert_type'] = lc['alert_id'].apply(lambda x: x[:x.index(':')])
for (pb, at), df in lc.groupby(['ant_passband', 'alert_type']):
is_candidate = at == 'ztf_candidate'
plt.errorbar(
x=df['ant_mjd'],
y=df['ant_mag'] if is_candidate else df['ant_maglim'],
yerr=df['ant_magerr'],
# uplims=(at!='ztf_candidate'),
label=pb + ' ' + at[4:],
color=pb,
fmt=('o' if is_candidate else '^') + pb.lower(),
alpha=1 if is_candidate else 0.3)
plt.plot(df['ant_mjd'], df['ant_mag'])
plt.title(str(i))
plt.xlabel('MJD')
plt.ylabel('Magnitude')
plt.legend()
plt.gca().invert_yaxis()
plt.show()
def Data(self, labels = None):
"""
Parameters
----------
labels : Pandas Series, optional
Classification labels, if applicable (I don't remember if that part works tbh). The default is None.
Returns
-------
Pandas data frame of processed event information for all provided ZTF ID's.
"""
Supernovadf = pd.DataFrame(columns = ["mjd", "band", "magnitude", "error", "event","class"])
#plt.title(locus.properties['ztf_object_id'])
loop = tqdm(total = len(self.ZTF_id), position =0, leave = False)
for i in self.ZTF_id:
locus = get_by_ztf_object_id(i)
try:
Data = locus.lightcurve
except Exception:
pass
Data_frame1 = pd.DataFrame.from_dict(Data[Data['ant_passband']=='g'])
Data_frame2 = pd.DataFrame.from_dict(Data[Data['ant_passband']=='R'])
Data_frame1['ant_mag'] = Data_frame1['ant_mag'].replace(np.nan, 0)
Data_frame2['ant_mag'] = Data_frame2['ant_mag'].replace(np.nan, 0)
Data_frame1 = Data_frame1[Data_frame1.ant_mag > 0]
Data_frame2 = Data_frame2[Data_frame2.ant_mag > 0]
MJD1 = Data_frame1['ant_mjd']
MJD2 = Data_frame2['ant_mjd']
MagnitudeG = Data_frame1['ant_mag']
MagnitudeR = Data_frame2['ant_mag']
MJD1 = MJD1 - (MJD1.min() - 1)
MJD2 = MJD2 - (MJD2.min() - 1)
GBand = pd.DataFrame(columns = ["mjd", "band", "magnitude", "error", "event"])
GBand["mjd"] = Data_frame1["ant_mjd"]
GBand["band"] = pd.Series(np.zeros([len(MagnitudeG)]))
GBand["magnitude"] = MagnitudeG
GBand['band'] = GBand['band'].replace(np.nan, 0)
GBand['error'] = Data_frame1["ant_magerr"]
RBand = pd.DataFrame(columns = ["mjd", "band", "magnitude", "error", "event"])
RBand["mjd"] = Data_frame2["ant_mjd"]
RBand["band"] = pd.Series(np.ones([len(MagnitudeR)]))
RBand["magnitude"] = MagnitudeR
RBand['band'] = RBand['band'].replace(np.nan, 1)
RBand['error'] = Data_frame2['ant_magerr']
num = np.zeros(len(RBand))
num1 = np.zeros(len(GBand))
GBand['event'] = num1
RBand['event'] = num
GBand['class'] = num1
RBand['class'] = num
GBand['event'] = GBand['event'].replace([0], [str(i)])
RBand['event'] = RBand['event'].replace([0], [str(i)])
Band = pd.concat([GBand, RBand], axis = 0, ).reset_index(drop=True)
Supernovadf = pd.concat([Supernovadf, Band], axis = 0).reset_index(drop=True)
loop.set_description("Fetching Data...".format(len(i)))
loop.update(1)
loop.close()
return(Supernovadf)
def GaussianRegression(self, data = None, classification = True, DateRange = None, n_samples = 100):
"""
Parameters
----------
data : Pandas Data Frame, optional
Pandas Data Frame with info from Data(); if None, it will pull data based off stored ZTF ID's.
The default is None.
classification : Boolean, optional
If you are making a training set to True (I always keep it True personally, not sure if it works otherwise).
The default is True.
DateRange : Integer, optional
How many days you want the classifier to look at. The default is None.
n_samples : Integer, optional
The number of samples GP Regression takes from the data. The default is 100.
Returns
-------
Pandas Data Frame
Pandas Data Frame of GP Regression Data.
"""
def Multi_Band_GP(x_range, x, y, y_err, dim, n_samples = False, sampling = False):
""" Considers cross corrolations between multiple bands as dims, prone to holding the order of the bands too rigidly """
""" Will optimize for 'best' parameters when given no parameters """
""" x = mjd, y and y_err = measurment, dim and dim_err = wavelength in nm """
length_scale = 20
signal_to_noises = (np.abs(y) / np.sqrt(np.power(y_err,2) + (1e-2 * np.max(y))**2))
scale = np.abs(y[signal_to_noises.argmax()])
kernel = ((0.5 * scale)**2 * george.kernels.Matern32Kernel([length_scale**2, 6000**2], ndim=2))
kernel.freeze_parameter('k2:metric:log_M_1_1')
kernel.freeze_parameter('k1:log_constant') #Fixed Scale
x_data = np.vstack([x, dim]).T
gp = george.GP(kernel, mean = biweight_location(y))
guess_parameters = gp.get_parameter_vector()
gp.compute(x_data, y_err)
x_pred = np.linspace(x.min(), x.max(), n_samples)
x_pred = np.vstack([x, dim]).T
pred, pred_var = gp.predict(y, x_pred, return_var=True)
# bounds = [(0, np.log(1000**2))]
def neg_ln_like(p):
gp.set_parameter_vector(p)
return -gp.log_likelihood(y)
def grad_neg_ln_like(p):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(y)
result = minimize(
neg_ln_like,
gp.get_parameter_vector(),
jac=grad_neg_ln_like,
# bounds=bounds
)
if result.success:
gp.set_parameter_vector(result.x)
else:
gp.set_parameter_vector(guess_parameters)
gp.set_parameter_vector(result.x)
# print(kernel.get_parameter_vector(True))
#print("\nFinal ln-likelihood: {0:.2f}".format(gp.log_likelihood(y)))
if n_samples != False:
x_pred = np.vstack([np.array(list(np.linspace(x_range.min(), x_range.max(), n_samples))*np.unique(dim).size),
np.array(np.sort(list(np.unique(dim))*n_samples))]).T
# x_pred = np.vstack([np.array(list(np.linspace(x_range.min(), x_range.max(), n_samples))*6),
# np.array(np.sort([357, 476, 621, 754, 870, 1004]*n_samples))]).T
pred, pred_var = gp.predict(y, x_pred, return_var=True)
output = [x_pred[:,0], pred, np.sqrt(pred_var), x_pred[:,1], []]
return output
elif sampling != False:
x_pred = np.vstack([np.array(sampling[0]),
np.array(sampling[1])]).T
pred, pred_var = gp.predict(y, x_pred, return_var=True)
output = [x_pred[:,0], pred, np.sqrt(pred_var), x_pred[:,1], []]
return output
def band_to_color(inp):
labels = [357, 476, 621, 754, 870, 1004]
# labels = [0,1,2,3,4,5]
labels_2=['green', 'red', 'goldenrod', 'blue', 'pink', 'grey']
outlst = []
for x in inp:
out = labels.index(int(x))
out = labels_2[out]
outlst.append(out)
return outlst
def band_to_wvlg(inp):
labels = [0,1,2,3,4,5]
labels_2=[357.0, 476.7, 621.5, 754.5, 870.75, 1004.0]
outlst = []
for x in inp:
out = labels.index(int(x))
out = labels_2[out]
outlst.append(out)
return outlst
def expfun(x, a, b):
return np.multiply(np.exp(np.multiply(x, b)), a)
def randomoff(inp, off = 0.25):
outlist = []
for i in inp:
value = random.random()
outlist += [i+value*off]
return outlist
def Spectra_Model():
return 0
if data is None:
sf = self.Data()
else:
sf = data
Gaus = pd.DataFrame()
pd.options.mode.chained_assignment = None # default='warn'
SN_uqlst = sf.event.unique()
loop = tqdm(total = len(SN_uqlst), position =0, leave = False)
for i in SN_uqlst:
SNdf = sf[sf['event']==i]
SNdf['mjd'] = SNdf['mjd'] - (SNdf['mjd'].min() -1)
if DateRange is not None:
SNdf = SNdf[SNdf['mjd'] < DateRange]
b = SNdf['band'].unique() == np.array([0.0, 1.0])
if b[0] != True or b[1] != True:
continue
mjdrange = np.asarray([min(SNdf['mjd'].tolist()),max(SNdf['mjd'].tolist())])
D = Multi_Band_GP(x_range = mjdrange, x=SNdf['mjd'].to_numpy(),
y=SNdf['magnitude'].to_numpy(), y_err=SNdf['error'].to_numpy(),
dim=band_to_wvlg(SNdf['band'].to_numpy()),
n_samples= n_samples)
GaussianFitted = pd.DataFrame()
GaussianFitted['mjd'] = D[0]
GaussianFitted['magnitude'] = D[1]
GaussianFitted['error'] = D[2]
GaussianFitted['band'] = D[3]
y = pd.Series(data=np.zeros(1000)).astype(int)
y = y.replace(0,i)
GaussianFitted['event'] = y
if classification == True:
x = pd.Series(data = np.zeros(1000)).astype(int)
x = x.replace(0, SNdf['class'].unique()[0])
GaussianFitted['class'] = x
Gaus = pd.concat([Gaus, GaussianFitted])
loop.set_description("Computing GPR...".format(len(i)))
loop.update(1)
loop.close()
return Gaus
def Gpr_Graph(self, DateRange = None ,n_samples = 100):
def band_to_color(inp):
labels = [357, 476, 621, 754, 870, 1004]
# labels = [0,1,2,3,4,5]
labels_2=['green', 'red', 'goldenrod', 'blue', 'pink', 'grey']
outlst = []
for x in inp:
out = labels.index(int(x))
out = labels_2[out]
outlst.append(out)
return outlst
Gaussian = self.GaussianRegression(DateRange = DateRange,n_samples = n_samples)
for i in Gaussian['event'].unique():
plt.errorbar(Gaussian[Gaussian['event']==i]['mjd'],Gaussian[Gaussian['event']==i]['magnitude'],Gaussian[Gaussian['event']==i]['error'], c=band_to_color(Gaussian[Gaussian['event']==i]['band']), alpha = 0.2, ls = 'None')
#plt.errorbar(x = SNdf['mjd'].to_numpy(), y = SNdf['magnitude'].to_numpy(), yerr = SNdf['error'].to_numpy(), ls = 'None')
plt.xlabel("MJD")
plt.ylabel("Magnitude")
plt.title(i)
plt.gca().invert_yaxis()
plt.show()
plt.close()
def Wavelet(self, Data = None, WaveletType = 'sym2', classification = True, Date = None, length = 150):
"""
Parameters
----------
Note: This version Processes both bands seperately, see Wavelet2() for multiband processing
Data : Pandas Data Frame, optional
Pandas DataFrame processed by Data(). The default is None.
WaveletType : Str, optional
Type of Wavelet transformation to be used. The default is 'sym2'.
classification : Boolean, optional
If you are making a training set to True (I always keep it True personally, not sure if it works otherwise).
The default is True.
Date : Integer, optional
How many days you want the classifier to look at. The default is None. The default is None.
length : Integer, optional
Set maximum event length; all events longer than set length are filtered out. The default is 150.
Returns
-------
Function_Parameters : Pandas DataFrame
Event Information such as ZTF ID and classification.
Coefficients : Numpy Array
List of Wavelet transformation Coefficients.
"""
from tqdm import tqdm
Function_Parameters = pd.DataFrame()
Coefficients = list()
if Data is None:
Data = self.Data()
Gaussian = self.GaussianRegression(data = Data, DateRange = Date)
Data_uqlst = Data['event'].unique()
loop = tqdm(total = len(Data['event'].unique()), position =0, leave = False)
for i in Data_uqlst:
b = Data[(Data['event']==i)]['band'].unique() == np.array([0.0, 1.0])
if b[0] != True or b[1] != True:
continue
if max(Data[Data['event']==i]['mjd'])-min(Data[Data['event']==i]['mjd']) > length:
#print(len(Data[Data['event']==i]['mjd']))
continue
else:
GaussianFitted = Gaussian[Gaussian['event']==i]
Uqe_Bands = GaussianFitted['band'].unique()
for UBand in Uqe_Bands:
if classification == True:
Class = GaussianFitted[GaussianFitted['band']==UBand]['class']
x = GaussianFitted[GaussianFitted['band']==UBand]['mjd'].astype(float)
y = GaussianFitted[GaussianFitted['band']==UBand]['magnitude'].astype(float)
y_err = GaussianFitted[GaussianFitted['band']==UBand]['error'].astype(float)
signal = y.values.squeeze()
ca = np.array(pywt.swt(np.array(signal), WaveletType, level = 2, axis = 0))
npoints=len(ca[0, 0, :])
coefficients =ca.reshape(2*2, npoints)
Features = pd.DataFrame(data = {'band': [UBand], 'event': str(i),
'delta':y.values.max()-y.values.min(), 'variance':y.var(),
'duration': max(Data[Data['event']==i]['mjd'])-min(Data[Data['event']==i]['mjd'])})
if classification == True:
Features['class'] = Class.unique()[0]
Coefficients.append(coefficients.flatten())
Function_Parameters = pd.concat([Function_Parameters, Features], axis =0 )
Function_Parameters = Function_Parameters.reset_index(drop=True)
loop.set_description("Computing Wavelet Transform...".format(len(i)))
loop.update(1)
loop.close()
Function_Parameters['class'] = Function_Parameters['class'].replace(['SN Ia', 'SN II', 'SN Ib/c', 'SLSN'], [0,1,2,3])
#Function_Parameters['class'] = Function_Parameters['class'].replace(['SN Ia', 'SN II', 'SN IIn', 'SN IIP', 'SN Ia-91T', 'SLSN-I', 'SLSN-II', 'SN Ic', 'SN Ib', 'SN Ic-BL', 'SN IIb', 'SN Ia-pec', 'SN Ibn', 'SN Ia-91bg'], [0,1,2,3,4,5,6,7,8,9, 10,11,12,13])
return Function_Parameters, Coefficients
def Wavelet2(self, Data = None, WaveletType = 'sym2', classification = True, Date = None, length = 150):
"""
Parameters
----------
Note: This version Processes both bands together, see Wavelet() for seperate band processing
Data : Pandas Data Frame, optional
Pandas DataFrame processed by Data(). The default is None.
WaveletType : Str, optional
Type of Wavelet transformation to be used. The default is 'sym2'.
classification : Boolean, optional
If you are making a training set to True (I always keep it True personally, not sure if it works otherwise).
The default is True.
Date : Integer, optional
How many days you want the classifier to look at. The default is None. The default is None.
length : Integer, optional
Set maximum event length; all events longer than set length are filtered out. The default is 150.
Returns
-------
Function_Parameters : Pandas DataFrame
Event Information such as ZTF ID and classification.
Coefficients : Numpy Array
List of Wavelet transformation Coefficients.
"""
from tqdm import tqdm
Function_Parameters = pd.DataFrame()
Coefficients = list()
if Data is None:
Data = self.Data()
Gaussian = self.GaussianRegression(data = Data, DateRange = Date)
Data_uqlst = Data['event'].unique()
loop = tqdm(total = len(Data['event'].unique()), position =0, leave = False)
for i in Data_uqlst:
b = Data[(Data['event']==i)]['band'].unique() == np.array([0.0, 1.0])
if b[0] != True or b[1] != True:
continue
if max(Data[Data['event']==i]['mjd'])-min(Data[Data['event']==i]['mjd']) > length:
#print(len(Data[Data['event']==i]['mjd']))
continue
GaussianFitted = Gaussian[Gaussian['event']==i]
if classification == True:
Class = GaussianFitted['class']
x = GaussianFitted['mjd'].astype(float)
y = GaussianFitted['magnitude'].astype(float)
y_err = GaussianFitted['error'].astype(float)
signal = y.values.squeeze()
if len(signal) == 0:
continue
from scipy import integrate
Area = integrate.simpson(y, x)
ca = np.array(pywt.swt(np.array(signal), WaveletType, level = 2, axis = 0))
npoints=len(ca[0, 0, :])
coefficients =ca.reshape(2*2, npoints)
Features = pd.DataFrame(data = {'event': str(i),
'delta':y.values.max()-y.values.min(), 'variance':y.var(),
'duration': max(Data[Data['event']==i]['mjd'])-min(Data[Data['event']==i]['mjd']),
'area':Area}, index=[0])
if classification == True:
Features['class'] = Class.unique()[0]
Coefficients.append(coefficients.flatten())
Function_Parameters = pd.concat([Function_Parameters, Features], axis =0 )
Function_Parameters = Function_Parameters.reset_index(drop=True)
loop.set_description("Computing Wavelet Transform...".format(len(i)))
loop.update(1)
loop.close()
Function_Parameters['class'] = Function_Parameters['class'].replace(['SN Ia', 'SN II', 'SN Ib/c', 'SLSN'], [0,1,2,3])
#Function_Parameters['class'] = Function_Parameters['class'].replace(['SN Ia', 'SN II', 'SN IIn', 'SN IIP', 'SN Ia-91T', 'SLSN-I', 'SLSN-II', 'SN Ic', 'SN Ib', 'SN Ic-BL', 'SN IIb', 'SN Ia-pec', 'SN Ibn', 'SN Ia-91bg'], [0,1,2,3,4,5,6,7,8,9, 10,11,12,13])
return Function_Parameters, Coefficients
def DimensionalityReduction2(self, Coefficients =None, labels=None, smot = False, n = 20, Trainset = True):
"""
Parameters
----------
Use this version if you used Wavelet2() (Multiband processing)
Coefficients : Pandas Data Frame, optional
Provide your own wavelet coefficients. The default is None.
labels : Pandas Data Frame, optional
Provide your own labels. The default is None.
smot : Boolean, optional
Choose whether or not to use SMOTE. The default is False.
n : Integer, optional
Output Dimension. The default is 20.
Trainset : Boolean, optional
Specify if this is the training set or if its unlabeled data. The default is True.
Returns
-------
Pandas Data Frame
Pandas Data Frame of PCA reduced Wavelet Coefficients.
Function
If Trainset = True, returns PCA() to use on unlabeled data.
"""
if Coefficients is not None:
labels = labels
Coefficients = Coefficients
else:
labels, Coefficients = self.Wavelet()
Coefficients = pd.concat([pd.DataFrame(data=labels),pd.DataFrame(data=Coefficients)],axis=1)
Coeff = Coefficients.iloc[:,6:]
pca = PCA(n_components = n, whiten = True)
if smot == True:
sm = SMOTE()
Coeff, labels= sm.fit_resample(Coeff, Coefficients['class'].ravel())
print(Coeff)
final = pca.fit_transform((Coeff))
#RBand2, GBand2 = pd.DataFrame(data = {'Rdelta': RBand['delta'], 'Rvariance': RBand['variance']}), pd.DataFrame(data = {'Gdelta':GBand['delta'], 'Gvariance': GBand['variance']})
if smot == True:
events =pd.DataFrame(data = {'class': labels}).reset_index(drop=True)
if smot == False:
events =pd.DataFrame(data = {'event': Coefficients['event'], 'class': Coefficients['class']}).reset_index(drop=True)
if Trainset == True:
return pd.concat([events, pd.DataFrame(final)],axis=1), pca
if Trainset == False:
return pd.concat([events, pd.DataFrame(data = Coeff).reset_index(drop=True)],axis=1)
def DimensionalityReduction(self, Coefficients =None, labels=None, smot = False, n = 20, Trainset = True):
"""
Parameters
----------
Use this version if you used Wavelet() (One band at a time processing)
Coefficients : Pandas Data Frame, optional
Provide your own wavelet coefficients. The default is None.
labels : Pandas Data Frame, optional
Provide your own labels. The default is None.
smot : Boolean, optional
Choose whether or not to use SMOTE. The default is False.
n : Integer, optional
Output Dimension. The default is 20.
Trainset : Boolean, optional
Specify if this is the training set or if its unlabeled data. The default is True.
Returns
-------
Pandas Data Frame
Pandas Data Frame of PCA reduced Wavelet Coefficients.
Function
If Trainset = True, returns PCA() to use on unlabeled data.
"""
if Coefficients is not None:
labels = labels
Coefficients = Coefficients
else:
labels, Coefficients = self.Wavelet()
Coefficients = pd.concat([pd.DataFrame(data=labels),pd.DataFrame(data=Coefficients)],axis=1)
GBand, RBand = Coefficients[Coefficients['band']==357.0].reset_index(drop=True), Coefficients[Coefficients['band']==476.7].reset_index(drop=True)
print(RBand)
pca = PCA(n_components = n, whiten = True)
RBand = pd.concat([RBand.iloc[:,6:].reset_index(drop=True),GBand.iloc[:,6:].reset_index(drop=True)],axis=1, ignore_index=True)
if smot == True:
sm = SMOTE()
RBand, labels= sm.fit_resample(RBand, GBand['class'].ravel())
final = pca.fit_transform((RBand))
#RBand2, GBand2 = pd.DataFrame(data = {'Rdelta': RBand['delta'], 'Rvariance': RBand['variance']}), pd.DataFrame(data = {'Gdelta':GBand['delta'], 'Gvariance': GBand['variance']})
if smot == True:
events =pd.DataFrame(data = {'class': labels}).reset_index(drop=True)
if smot == False:
events =pd.DataFrame(data = {'event': GBand['event'], 'class': GBand['class']}).reset_index(drop=True)
if Trainset == True:
return pd.concat([events, pd.DataFrame(final)],axis=1), pca
if Trainset == False:
return pd.concat([events, pd.DataFrame(data = RBand).reset_index(drop=True)],axis=1)
def SupernovaTrainer(self, Train = None, y = None, **kwargs):
"""
Parameters
----------
Trains for Supernova vs Bogus Classification
Train : Pandas Data Frame, optional
Provide your own wavelet coefficients. The default is None.
y : Pandas Data Frame, optional
Provide data labels. The default is None.
**kwargs : Classifier arguments
Input arguments for classifier.
Returns
-------
Function
Trained Classifier.
"""
if Train is None:
Data = self.DimensionalityReduction()
Train = Data.iloc[:,2:].reset_index(drop=True)
y = Data['class'].reset_index(drop=True)
svc = RandomForestClassifier(random_state=iterations, n_estimators = 30, min_samples_split = 6)
if kwargs:
classifier=AdaBoostClassifier(**kwargs)
else:
classifier=AdaBoostClassifier(base_estimator=svc,n_estimators=30, learning_rate =4)
if evaluate == True:
pipeline = imbpipeline(steps = [['classifier', classifier]])
stratified_kfold = StratifiedKFold(n_splits=3, shuffle=True)
print(cross_validate(pipeline, np.array(Train),np.array(y).ravel(),scoring = 'accuracy'))
y_pred = cross_val_predict(pipeline, np.array(Train),np.array(y), cv = stratified_kfold)
conf_mat = confusion_matrix(y, y_pred)
plot_confusion_matrix1(conf_mat, ['SN', 'Bogus'], cmap = 'Blues')
return classifier.fit(Train, y)
def SupernovaTypeClassifierTrainer(self, Train, y, evaluate = True , smot = True,
Ada = True, KNN = False,roc = True, Rand = False,
grid = False, n = 1, fold = 3,n_components = 20,
metric = 'accuracy', param_grid = None,**kwargs):
"""
Parameters
----------
Trains for Supernova Type Classification
Train : Pandas Data Frame, optional
Provide your own wavelet coefficients. The default is None.
y : Pandas Data Frame, optional
Provide data labels. The default is None.
evaluate : Boolean, optional
Choose whether or not to show model performance. The default is True.
Ada : Boolean, optional
Choose to use Ada Boosted Random Forrest. The default is True.
KNN : Boolean, optional
Choose to use K-nearest neighbors. The default is False.
**kwargs : TYPE
DESCRIPTION.
Raises
------
Exception
If you set both KNN and Ada to false, raises an error.
Returns
-------
Function
Trained Classifier.
"""
if Train is not None:
TrainingData, u = pd.concat([pd.DataFrame(data=y),pd.DataFrame(data=Train)],axis=1).reset_index(drop=True), y
#else *** Remember to make this load in default training data
svc = RandomForestClassifier(n_estimators = 30, min_samples_split = 6)
TrainingData = TrainingData.sample(frac = 1).reset_index(drop=True)
if kwargs:
if Ada ==True:
classifier = AdaBoostClassifier(**kwargs)
if KNN == True:
classifier = KNeighborsClassifier(**kwargs)
if Rand == True:
#classifier = RandomForestClassifier(**kwargs)
classifier = BalancedRandomForestClassifier(**kwargs)
else:
classifier=AdaBoostClassifier(base_estimator=svc,n_estimators=30, learning_rate =2)
#classifier = KNeighborsClassifier(n_neighbors=1500)
if evaluate == True:
if smot == True:
pipeline = imbpipeline(steps = [['scale',MinMaxScaler()],['smote', SMOTE()],['classifier', classifier]])
if smot == False:
pipeline = imbpipeline(steps = [['scale',MinMaxScaler()],['classifier', classifier]])
stratified_kfold = StratifiedKFold(n_splits=fold, shuffle=True)
repeatstratified_kfold = RepeatedStratifiedKFold(n_splits=fold, n_repeats=n)
cross = cross_validate(pipeline, np.array(TrainingData.iloc[:,1:]),np.array(TrainingData.iloc[:,0]),scoring = metric, cv = repeatstratified_kfold, n_jobs = -1)
print(f'The mean {metric} over {fold} fold stratified crossvalidation repeated {n} times is {np.mean(cross["test_score"])}, with a standard deviation of {np.std(cross["test_score"])}')
y_pred = cross_val_predict(pipeline, np.array(TrainingData.iloc[:,1:]),np.array(TrainingData.iloc[:,0]), cv = stratified_kfold, n_jobs = -1)
#conf_mat = confusion_matrix(y, y_pred)
conf_mat = confusion_matrix(TrainingData.iloc[:,0], y_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=conf_mat)
disp.plot(cmap = 'Blues')
if grid == True:
clf = GridSearchCV(pipeline, param_grid, n_jobs = -1, cv = stratified_kfold, scoring = 'f1_micro', verbose = 1)
clf.fit(TrainingData.iloc[:,1:], TrainingData.iloc[:,0])
plot_confusion_matrix1(conf_mat, ['Type 1a','Type 2', 'Type 1b/c', 'SLSN'], cmap = 'Blues')
Classifier = pipeline.fit(TrainingData.iloc[:,1:], TrainingData.iloc[:,0])
if grid == False:
return Classifier
if grid == True:
return clf
def plot_confusion_matrix1(cm,
target_names,
title='Confusion matrix',
cmap=None,
normalize=True):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
import matplotlib.pyplot as plt
import numpy as np
import itertools
accuracy = np.trace(cm) / np.sum(cm).astype('float')
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="black" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="black" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
plt.show()
| Feliconut/PurduePHYS324-LSST | SupernovaClassification.py | SupernovaClassification.py | py | 36,879 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "antares_client.search.get_by_ztf_object_id",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
... |
34429142094 | import glfw
from OpenGL.GL import *
from OpenGL.GLU import *
import numpy as np
gCamAng = 0.
def myLookAt(eye,at,up):
w = (eye-at)/np.sqrt(np.dot(eye-at,eye-at))
u = np.cross(up, w)/np.sqrt(np.dot(np.cross(up, w), np.cross(up,w)))
v = np.cross(w, u)
M= np.array([[u[0], u[1], u[2], -np.dot(u, eye)],
[v[0], v[1], v[2], np.dot(-v, eye)],
[w[0], w[1], w[2], np.dot(-w, eye)],
[0, 0, 0, 1]])
glMultMatrixf(M.T)
def myOrtho(l, r, b, t, n, f):
Morth = np.array([[2/(r-l),0 , 0,-(r+l)/(r-l)],
[ 0, 2/(t-b), 0, -(t+b)/(t-b)],
[ 0, 0, -2/(f-n), -(f+n)/(f-n)],
[ 0, 0, 0, 1]])
def render(camAng):
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
# set the current matrix to the identity matrix
glLoadIdentity()
# use orthogonal projection (right-multiply the current matrix by "projection" matrix - we'll see details later)
myOrtho(left, right, bottom, top, near, far)
#glOrtho(-1,1, -1,1, -10,10)
# rotate "camera" position (right-multiply the current matrix by viewing matrix)
# try to change parameters
eye = np.array([1*np.sin(camAng),.5,1*np.cos(camAng)])
up = np.array([0, 1, 0])
at = np.array([0, 0, 0])
myLookAt(eye, at, up)
myOrtho()
# draw coordinates
glBegin(GL_LINES)
glColor3ub(255, 0, 0)
glVertex3fv(np.array([0.,0.,0.]))
glVertex3fv(np.array([1.,0.,0.]))
glColor3ub(0, 255, 0)
glVertex3fv(np.array([0.,0.,0.]))
glVertex3fv(np.array([0.,1.,0.]))
glColor3ub(0, 0, 255)
glVertex3fv(np.array([0.,0.,0]))
glVertex3fv(np.array([0.,0.,1.]))
glEnd()
def key_callback(window, key, scancode, action, mods):
global gCamAng
# rotate the camera when 1 or 3 key is pressed or repeated
if action==glfw.PRESS or action==glfw.REPEAT:
if key==glfw.KEY_1:
gCamAng += np.radians(-10)
elif key==glfw.KEY_3:
gCamAng += np.radians(10)
def main():
if not glfw.init():
return
window = glfw.create_window(640,640,'Lecture8', None,None)
if not window:
glfw.terminate()
return
glfw.make_context_current(window)
glfw.set_key_callback(window, key_callback)
while not glfw.window_should_close(window):
glfw.poll_events()
render(gCamAng)
glfw.swap_buffers(window)
glfw.terminate()
if __name__ == "__main__":
main()
| vctr7/Computer_Graphics | hw8.py | hw8.py | py | 2,584 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.sqrt",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.cross",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 10,
... |
71951318589 | from flask import Flask, render_template, request
import os
from Prediction import deep_ocr, easy_ocr
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField
from wtforms.validators import InputRequired, Email, Length
# webserver gateway interface
app = Flask(__name__)
Bootstrap(app)
BASIC_PATH = os.getcwd()
UPLOAD_PATH = os.path.join(BASIC_PATH, 'static/upload/')
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
# get the uploaded file
uploaded_file = request.files['image']
# get the name of the uploaded file
file_name = uploaded_file.filename
# create the path for saving the uploaded file
save_path = os.path.join(UPLOAD_PATH, file_name)
# saving the uploaded file
uploaded_file.save(save_path)
print(file_name, 'was uploaded successfully!')
#plate_number = deep_ocr(save_path, file_name)
plate_number = easy_ocr(save_path, file_name)
#print(plate_number)
return render_template('index.html', upload=True, uploaded_image=file_name, text=plate_number)
return render_template('index.html', upload=False)
if __name__ == "__main__":
app.run(debug=True)
| babakmbm/Optical-Character-Recognition-OCR-SYSTEM | App.py | App.py | py | 1,289 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_bootstrap.Bootstrap",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"l... |
9535093806 | #!/usr/bin/env python3
"""
RBE/CS549 Spring 2023: Computer Vision
Author(s):
Uday Sankar (usankar@wpi.edu)
Mechatronics, Robotics and Automation Engineering,
Worcester Polytechnic Institute
"""
# Dependencies:
# opencv, do (pip install opencv-python)
# skimage, do (apt install python-skimage)
# termcolor, do (pip install termcolor)
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
from torch.optim import AdamW
from torchvision.datasets import CIFAR10
import cv2
import sys
import os
import numpy as np
import random
import skimage
import PIL
import os
import glob
import random
from skimage import data, exposure, img_as_float
import matplotlib.pyplot as plt
import time
from torchvision.transforms import ToTensor
import argparse
import shutil
import string
from termcolor import colored, cprint
import math as m
from tqdm.notebook import tqdm
# import Misc.ImageUtils as iu
from Network.Network import CIFAR10Model
from Misc.MiscUtils import *
from Misc.DataUtils import *
# setting the device as 'cuda'
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(f'Device being used: {device}')
# global variables for testing
ModelNumber = 1
CheckPointPath = 'Checkpoints/BasicNet/14model.ckpt' # BasiNet by default
# Don't generate pyc codes
sys.dont_write_bytecode = True
def GenerateBatch(TrainSet, TrainLabels, ImageSize, MiniBatchSize):
"""
Inputs:
TrainSet - Variable with Subfolder paths to train files
NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
TrainLabels - Labels corresponding to Train
NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
ImageSize is the Size of the Image
MiniBatchSize is the size of the MiniBatch
Outputs:
I1Batch - Batch of images
LabelBatch - Batch of one-hot encoded labels
"""
I1Batch = []
LabelBatch = []
ImageNum = 0
while ImageNum < MiniBatchSize:
# Generate random image
RandIdx = random.randint(0, len(TrainSet)-1)
ImageNum += 1
##########################################################
# Add any standardization or data augmentation here!
##########################################################
I1, Label = TrainSet[RandIdx]
# Append All Images and Mask
I1Batch.append(I1)
LabelBatch.append(torch.tensor(Label))
return torch.stack(I1Batch).to(device), torch.stack(LabelBatch).to(device)
def PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, LatestFile):
"""
Prints all stats with all arguments
"""
print('Number of Epochs Training will run for ' + str(NumEpochs))
print('Factor of reduction in training data is ' + str(DivTrain))
print('Mini Batch Size ' + str(MiniBatchSize))
print('Number of Training Images ' + str(NumTrainSamples))
if LatestFile is not None:
print('Loading latest checkpoint with the name ' + LatestFile)
def TrainOperation(TrainLabels, NumTrainSamples, ImageSize, ModelNumber,
NumEpochs, MiniBatchSize, SaveCheckPoint, CheckPointPath,
DivTrain, LatestFile, TrainSet, LogsPath, TestSet, TestLabels):
"""
Inputs:
TrainLabels - Labels corresponding to Train/Test
NumTrainSamples - length(Train)
ImageSize - Size of the image
NumEpochs - Number of passes through the Train data
MiniBatchSize is the size of the MiniBatch
SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
CheckPointPath - Path to save checkpoints/model
DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
LatestFile - Latest checkpointfile to continue training
TrainSet - The training dataset
LogsPath - Path to save Tensorboard Logs
Outputs:
Saves Trained network in CheckPointPath and Logs to LogsPath
"""
# Initialize the model
model = CIFAR10Model(ModelNumber)
model = model.to(device)
###############################################
# Fill your optimizer of choice here!
###############################################
Optimizer = AdamW(model.parameters(), lr=1e-3)
# Tensorboard
# Create a summary to monitor loss tensor
Writer = SummaryWriter(LogsPath)
# adding the graph of the model to tensorboard for visualization
Writer.add_graph(model, GenerateBatch(TrainSet, TrainLabels, ImageSize, MiniBatchSize)[0])
if LatestFile is not None:
CheckPoint = torch.load(CheckPointPath + LatestFile + '.ckpt')
# Extract only numbers from the name
StartEpoch = int(''.join(c for c in LatestFile.split('a')[0] if c.isdigit()))
model.load_state_dict(CheckPoint['model_state_dict'])
print('Loaded latest checkpoint with the name ' + LatestFile + '....')
else:
StartEpoch = 0
print('New model initialized....')
for Epochs in tqdm(range(StartEpoch, NumEpochs)):
train_acc_plot = 0
train_loss_plot = 0
NumIterationsPerEpoch = int(NumTrainSamples/MiniBatchSize/DivTrain)
for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
Batch = GenerateBatch(TrainSet, TrainLabels, ImageSize, MiniBatchSize)
# Predict output with forward pass
LossThisBatch = model.training_step(Batch)
Optimizer.zero_grad()
LossThisBatch.backward()
Optimizer.step()
# Save checkpoint every some SaveCheckPoint's iterations
if PerEpochCounter % SaveCheckPoint == 0:
# Save the Model learnt in this epoch
SaveName = CheckPointPath + str(Epochs) + 'a' + str(PerEpochCounter) + 'model.ckpt'
torch.save({'epoch': Epochs,'model_state_dict': model.state_dict(),'optimizer_state_dict': Optimizer.state_dict(),'loss': LossThisBatch}, SaveName)
print('\n' + SaveName + ' Model Saved...')
result = model.validation_step(Batch)
train_acc_plot += result['acc']
train_loss_plot += result['loss']
model.epoch_end(Epochs*NumIterationsPerEpoch + PerEpochCounter, result)
# Tensorboard
# Writer.add_scalar('LossEveryIter', result["loss"], Epochs*NumIterationsPerEpoch + PerEpochCounter)
# Writer.add_scalar('Accuracy', result["acc"], Epochs*NumIterationsPerEpoch + PerEpochCounter)
# # If you don't flush the tensorboard doesn't update until a lot of iterations!
# Writer.flush()
model.eval()
test_loss_plot = 0
test_acc_plot = 0
with torch.no_grad():
Batch_ = GenerateBatch(TestSet, TestLabels, ImageSize, MiniBatchSize)
test_result = model.validation_step(Batch_)
test_loss_plot += test_result["loss"]
test_acc_plot += test_result["acc"]
Writer.add_scalars('Training/Testing Loss', {'TrainLossPerEpoch': train_loss_plot/NumIterationsPerEpoch, 'TestLossPerEpoch': test_loss_plot}, Epochs)
Writer.add_scalars('Training/Testing Accuracy', {'TrainAccuracyPerEpoch': train_acc_plot/NumIterationsPerEpoch, 'TestAccuracyPerEpoch': test_acc_plot}, Epochs)
Writer.flush()
# Save model every epoch
SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
torch.save({'epoch': Epochs,'model_state_dict': model.state_dict(),'optimizer_state_dict': Optimizer.state_dict(),'loss': LossThisBatch}, SaveName)
print('\n' + SaveName + ' Model Saved...')
def main():
"""
Inputs:
None
Outputs:
Runs the Training and testing code based on the Flag
"""
# Parse Command Line arguments
Parser = argparse.ArgumentParser()
Parser.add_argument('--CheckPointPath', default='./Checkpoints/', help='Path to save Checkpoints, Default: ../Checkpoints/')
Parser.add_argument('--NumEpochs', type=int, default=15, help='Number of Epochs to Train for, Default:15')
Parser.add_argument('--DivTrain', type=int, default=1, help='Factor to reduce Train data by per epoch, Default:1')
Parser.add_argument('--MiniBatchSize', type=int, default=32, help='Size of the MiniBatch to use, Default:32')
Parser.add_argument('--LoadCheckPoint', type=int, default=0, help='Load Model from latest Checkpoint from CheckPointsPath?, Default:0')
Parser.add_argument('--LogsPath', default='./Logs/', help='Path to save Logs for Tensorboard, Default=Logs/')
Parser.add_argument('--ModelNumber', default=1, help='Model Type: 1-BasicNet, 2-BatchNormNet, 3-ResNet, 4-ResNeXt, 5-DenseNet, Default:1')
Args = Parser.parse_args()
NumEpochs = Args.NumEpochs
DivTrain = float(Args.DivTrain)
MiniBatchSize = Args.MiniBatchSize
LoadCheckPoint = Args.LoadCheckPoint
CheckPointPath = Args.CheckPointPath
LogsPath = Args.LogsPath
model_number_dict = {1: 'BasicNet', 2: 'BatchNormNet', 3: 'ResNet', 4: 'ResNeXt', 5: 'DenseNet'}
global ModelNumber
ModelNumber = int(Args.ModelNumber)
# ModelNumber = int(input("Select model type (1-BasicNet, 2-BatchNormNet, 3-ResNet, 4-ResNeXt, 5-Densenet): "))
# print(f"Selected Model Type: {model_number_dict[ModelNumber]}")
# # setting the checkpoint and logs path based on the model selected
CheckPointPath = CheckPointPath + model_number_dict[ModelNumber] + '/'
LogsPath = LogsPath + model_number_dict[ModelNumber] + '/'
# Default Hyperparameters
NumEpochs = 15
TrainSet = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=ToTensor())
TestSet = CIFAR10(root='data/', train=False, transform=ToTensor())
TestLabelsPath = "./TxtFiles/LabelsTest.txt"
TestLabelsPath = "./TxtFiles/LabelsTest.txt"
TestLabels = ReadLabels(TestLabelsPath)
# Setup all needed parameters including file reading
# SaveCheckPoint, ImageSize, NumTrainSamples, TrainLabels, NumClasses = SetupAll(TestLabelsPath, CheckPointPath)
SaveCheckPoint, ImageSize, NumTrainSamples, TrainLabels, NumClasses = SetupAll(TestLabelsPath, CheckPointPath)
# Find Latest Checkpoint File
if LoadCheckPoint==1:
LatestFile = FindLatestModel(CheckPointPath)
else:
LatestFile = None
# Pretty print stats
PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, LatestFile)
TrainOperation(TrainLabels, NumTrainSamples, ImageSize, ModelNumber,
NumEpochs, MiniBatchSize, SaveCheckPoint, CheckPointPath,
DivTrain, LatestFile, TrainSet, LogsPath, TestSet, TestLabels)
if __name__ == '__main__':
main()
| udaysankar01/Image-Classification-using-ResNet-ResNeXt-and-DenseNet | Train.py | Train.py | py | 11,349 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sys.dont_write_b... |
70276275707 |
"""
多头注意力机制:
每个头开始从词义层面分割输出的张量,也就是每个头都想获得一组Q,K,V进行注意力机制的计算,
但是句子中的每个词的表示只获得一部分,
也就是只分割了最后一维的词嵌入向量. 这就是所谓的多头.
将每个头的获得的输入送到注意力机制中, 就形成了多头注意力机制
多头注意力机制的作用:
这种结构设计能让每个注意力机制去优化每个词汇的不同特征部分,从而均衡同一种注意力机制可能产生的偏差,
让词义拥有来自更多元的表达,从而提升模型效果
"""
import copy
import torch
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class MultiHeadAttention(nn.Module):
"""在类的初始化时, 会传入三个参数,head代表头数,embedding_dim代表词嵌入的维度,
dropout代表进行dropout操作时置0比率,默认是0.1."""
def __init__(self, head, embedding_dim, dropout=0.1):
super(MultiHeadAttention, self).__init__()
# 在函数中,首先使用了一个测试中常用的assert语句,判断h是否能被d_model整除,
# 这是因为我们之后要给每个头分配等量的词特征.也就是embedding_dim/head个.
assert embedding_dim % head == 0
# 得到每个头获得的分割词向量维度d_k
self.d_k= embedding_dim // head
self.head=head
# 然后获得线性层对象,通过nn的Linear实例化,它的内部变换矩阵是embedding_dim x embedding_dim,然后使用clones函数克隆四个,
# 为什么是四个呢,这是因为在多头注意力中,Q,K,V各需要一个,最后拼接的矩阵还需要一个,因此一共是四个.
self.linears=self.clones(nn.Linear(embedding_dim,embedding_dim),4)
self.attn=None
self.dropout=nn.Dropout(p=dropout)
# 首先需要定义克隆函数, 因为在多头注意力机制的实现中, 用到多个结构相同的线性层.
# 我们将使用clone函数将他们一同初始化在一个网络层列表对象中. 之后的结构中也会用到该函数.
"""用于生成相同网络层的克隆函数, 它的参数module表示要克隆的目标网络层, N代表需要克隆的数量"""
def clones(self,module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
"""生成向后遮掩的掩码张量, 参数size是掩码张量最后两个维度的大小, 它的最后两维形成一个方阵"""
def subsequentMask(self,size):
# 在函数中, 首先定义掩码张量的形状
attnShape=(1,size,size)
# 然后使用np.ones方法向这个形状中添加1元素,形成上三角阵, 最后为了节约空间,
# 再使其中的数据类型变为无符号8位整形unit8
subsequent_mask=np.triu(np.ones(attnShape),k=1).astype('uint8')
# 最后将numpy类型转化为torch中的tensor, 内部做一个1 - 的操作,
# 在这个其实是做了一个三角阵的反转, subsequent_mask中的每个元素都会被1减,
# 如果是0, subsequent_mask中的该位置由0变成1
# 如果是1, subsequent_mask中的该位置由1变成0
return torch.from_numpy(1-subsequent_mask)
"""注意力计算规则"""
def attention(self,query,key,value,mask=None,dropout=None):
# 在函数中, 首先取query的最后一维的大小, 一般情况下就等同于我们的词嵌入维度, 命名为d_k
d_k = query.size(-1)
# 按照注意力公式, 将query与key的转置相乘, 这里面key是将最后两个维度进行转置, 再除以缩放系数根号下d_k, 这种计算方法也称为缩放点积注意力计算.
# 得到注意力得分张量scores
scores=torch.matmul(query,key.transpose(-2,-1)) / math.sqrt(d_k) # 最后两个维度做矩阵乘积
# print("scores before masked: ", scores,scores.shape)
# 接着判断是否使用掩码张量
if mask is not None:
# 使用tensor的masked_fill方法, 将掩码张量和scores张量每个位置一一比较, 如果掩码张量处为0
# 则对应的scores张量用-1e9这个值来替换
scores=scores.masked_fill(mask==0,-1e9)
# print("scores after masked: ",scores,scores.shape)
# 对scores的最后一维进行softmax操作, 使用F.softmax方法, 第一个参数是softmax对象, 第二个是目标维度.
# 这样获得最终的注意力张量
p_attn=F.softmax(scores,dim=-1)
# 判断是否使用dropout进行随机置0
if dropout is not None:
p_attn=dropout(p_attn)
# print("value : ",value,value.shape)
# 最后, 根据公式将p_attn与value张量相乘获得最终的query注意力表示, 同时返回注意力张量
return torch.matmul(p_attn,value),p_attn
"""前向逻辑函数, 它的输入参数有四个,前三个就是注意力机制需要的Q, K, V,
最后一个是注意力机制中可能需要的mask掩码张量,默认是None. """
def forward(self,query,key,value,mask=None):
if mask is not None:
mask=mask.unsqueeze(0)
# 接着,我们获得一个batch_size的变量,他是query尺寸的第1个数字,代表有多少条样本.
batch_size=query.size(0)
# 之后就进入多头处理环节
# 首先利用zip将输入QKV与三个线性层组到一起,然后使用for循环,将输入QKV分别传到线性层中,
# 做完线性变换后,开始为每个头分割输入,这里使用view方法对线性变换的结果进行维度重塑,多加了一个维度h,代表头数,
# 这样就意味着每个头可以获得一部分词特征组成的句子,其中的-1代表自适应维度,
# 计算机会根据这种变换自动计算这里的值.然后对第二维和第三维进行转置操作,
# 为了让代表句子长度维度和词向量维度能够相邻,这样注意力机制才能找到词义与句子位置的关系,
# 从attention函数中可以看到,利用的是原始输入的倒数第一和第二维.这样我们就得到了每个头的输入.
query,key,value=[model(x).view(batch_size,-1,self.head,self.d_k).transpose(1,2) for model,x in zip(self.linears,(query,key,value))]
# print("query.shape before attention",query.shape)
# 得到每个头的输入后,接下来就是将他们传入到attention中,
# 这里直接调用我们之前实现的attention函数.同时也将mask和dropout传入其中.
x,self.attn=self.attention(query,key,value,mask,dropout=self.dropout)
# 通过多头注意力计算后,我们就得到了每个头计算结果组成的4维张量,我们需要将其转换为输入的形状以方便后续的计算,
# 因此这里开始进行第一步处理环节的逆操作,先对第二和第三维进行转置,然后使用contiguous方法,
# 这个方法的作用就是能够让转置后的张量应用view方法,否则将无法直接使用,
# 所以,下一步就是使用view重塑形状,变成和输入形状相同.
# print("x shape",x.shape)
x=x.transpose(1,2).contiguous().view(batch_size,-1,self.head*self.d_k)
# print("x shape",x.shape) # 这一步,x维度回到最初的
# 最后使用线性层列表中的最后一个线性层对输入进行线性变换得到最终的多头注意力结构的输出.
return self.linears[-1](x)
| Jacquelin803/Transformers | transformerArc/MultiHeadAttention.py | MultiHeadAttention.py | py | 7,709 | python | zh | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
35765418423 | '''
Created on Apr 21, 2014
@author: Borja
'''
import os.path
import xlrd
import __data__
class XslReader(object):
def __init__(self):
if not os.path.exists(__data__.path()):
os.makedirs(__data__.path())
self._data_path = __data__.path();
def load_indicator_sheet(self, file_name):
inpath = os.path.join(self._data_path, os.path.basename(file_name))
workbook = xlrd.open_workbook(inpath)
worksheet = workbook.sheet_by_name("Indicators")
data_dictionary = {}
for curr_col in range (0, worksheet.ncols):
field_name = worksheet.cell_value(0, curr_col).decode("UTF-8")
data_dictionary[field_name] = worksheet.cell_value(1, curr_col);
return data_dictionary
def load_organization_sheet(self, file_name):
inpath = os.path.join(self._data_path, os.path.basename(file_name))
workbook = xlrd.open_workbook(inpath)
worksheet = workbook.sheet_by_name("Organization")
data_dictionary = {}
data_dictionary["Name"] = worksheet.cell_value(1, 1).decode("UTF-8")
data_dictionary["Description_EN"] = worksheet.cell_value(2, 1).decode("UTF-8")
data_dictionary["Description_ES"] = worksheet.cell_value(3, 1).decode("UTF-8")
data_dictionary["Description_FR"] = worksheet.cell_value(4, 1).decode("UTF-8")
data_dictionary["URL"] = worksheet.cell_value(5, 1).decode("UTF-8")
data_dictionary["Logo"] = worksheet.cell_value(6, 1).decode("UTF-8")
data_dictionary["License_Name"] = worksheet.cell_value(9, 1).decode("UTF-8")
data_dictionary["License_Description"] = worksheet.cell_value(10, 1).decode("UTF-8")
data_dictionary["License_Republish"] = worksheet.cell_value(11, 1)
data_dictionary["License_URL"] = worksheet.cell_value(12, 1).decode("UTF-8")
return data_dictionary
def load_xsl(self, file_name):
inpath = os.path.join(self._data_path, os.path.basename(file_name))
workbook = xlrd.open_workbook(inpath)
worksheet = workbook.sheet_by_name("Values")
data_matrix = [[0 for x in xrange(worksheet.ncols)] for x in xrange(worksheet.nrows)]
for curr_row in range (0, worksheet.nrows):
for curr_col in range (0, worksheet.ncols):
#print "%s,%s ---- %s" %(curr_row, curr_col, worksheet.cell_value(curr_row, curr_col))
if worksheet.cell_type(curr_row, curr_col) == 1: # text cell
data_matrix[curr_row][curr_col] = worksheet.cell_value(curr_row, curr_col).encode("UTF-8");
else:
data_matrix[curr_row][curr_col] = worksheet.cell_value(curr_row, curr_col);
return data_matrix
| weso/landportal-importers | RAWImporter/es/weso/raw/ExcelManagement/excel_reader.py | excel_reader.py | py | 2,930 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.path.exists",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "__data__.path",
"li... |
8261658572 | import functions
import utils
from functions import greet_user
# import ecommerce.shipping
# from ecommerce.shipping import calc_shipping
from ecommerce import shipping
shipping.calc_shipping()
import math
# Google search python 3 math module
price = 10
# variable must be lower case - booleans must be capitalized
is_published = False
# birth_year = input('Birth year: ')
# age = 2019 - birth_year
# # print(age)
# print(type(birth_year))
# weight = input('What is your weight: ')
# converted = weight * .45
# print(converted)
# message = '''
# This is a mutli line string
# From John
# '''
course = 'Python for Beginners'
# This returns 0,1 and 2 index
# print(course[0:3])
# print(course[1:-1])
# string formatting
first = 'John'
last = 'Smith'
message = first + last + ' is a coder'
message = first + ' [' + last + '] is a coder'
# msg = f'{first} [{last}] is a coder'
# print(message)
course = "This is the Pourse"
# print(len(course))
# print(course.upper())
# print(course.find('P'))
# print(course.replace('P', 'C'))
# print(course.replace('P', 'Best C'))
print('Pourse' in course)
# Returns division without decimal
print(10 // 3)
# modulas operator returns remainder
print(10 % 3)
# 10 to the power of 3
print(10 ** 3)
# Rounds Up
x = 2.9
print(round(x))
numbers5 = [7, 16, 3, 1, 11]
numbers5.append(20)
numbers5.insert(1,33)
print("index", numbers5.index(3))
# numbersNew = numbers5.copy()
# print(numbersNew)
print(numbers5)
greet_user("John", 18)
functions.greet_user("Sally", 20)
max = utils.find_max([7,10,11,2,9,6])
print(max)
| Rosenmatt1/Python-101 | App.py | App.py | py | 1,575 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "ecommerce.shipping.calc_shipping",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ecommerce.shipping",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "functions.greet_user",
"line_number": 76,
"usage_type": "call"
},
{
"api_name":... |
36691623039 | import streamlit as st
from streamlit_chat import message
from streamlit_extras.colored_header import colored_header
from streamlit_extras.add_vertical_space import add_vertical_space
from hugchat import hugchat
from document_processing import index, chain
st.set_page_config(page_title="HugChat - An LLM-powered Streamlit app")
st.title('🎈 PF1-Chatbot')
st.write('Hello student!')
with st.sidebar:
st.title('🤗💬 HugChat App')
st.markdown('''
''')
add_vertical_space(5)
st.write('Made by Matt')
if 'generated' not in st.session_state:
st.session_state['generated'] = ["I'm HugChat, How may I help you?"]
if 'past' not in st.session_state:
st.session_state['past'] = ['Hi!']
input_container = st.container()
colored_header(label='', description='', color_name='blue-30')
response_container = st.container()
# User input
## Function for taking user provided prompt as input
def get_text():
input_text = st.text_input("You: ", "", key="input")
return input_text
## Applying the user input box
with input_container:
user_input = get_text()
# Response output
## Function for taking user prompt as input followed by producing AI generated responses
def generate_response(prompt):
chatbot = hugchat.ChatBot()
response = chatbot.chat(prompt)
return response
def get_similar_docs(query, k=2, score=False):
if score:
similar_docs = get_similar_docs_with_score(query, k=k)
else:
similar_docs = get_similar_docs_without_score(query, k=k)
return similar_docs
def get_similar_docs_with_score(query, k=3):
similar_docs = index.similarity_search_with_score(query, k=k)
return similar_docs
def get_similar_docs_without_score(query, k=3):
similar_docs = index.similarity_search(query, k=k)
return similar_docs
def get_answer(query):
similar_docs = get_similar_docs(query)
answer = chain.run(input_documents=similar_docs, question=query)
return answer
if user_input:
answer = get_answer(user_input)
with response_container:
st.write(answer) # Use st.write() to display the message
else:
st.write("Please enter a prompt.") # Use st.write() for the prompt message
| Ubond-edu/PF1-Chatbot | streamlit_app.py | streamlit_app.py | py | 2,211 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamlit.si... |
9773551663 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 19:36:10 2020
@author: lnajt
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 11:57:08 2018
@author: MGGG
"""
import networkx as nx
import random
import numpy as np
##############
'''Wilsons Algorithm'''
def random_spanning_tree_wilson(graph):
'''The David Wilson random spanning tree algorithm'''
tree_edges = []
root = random.choice(list(graph.nodes()))
hitting_set = set ( [ root])
allowable_set = set(graph.nodes()).difference(hitting_set)
len_graph = len(graph)
len_hitting_set = 1
while len_hitting_set < len_graph:
start_node = random.choice(list(allowable_set))
trip = random_walk_until_hit(graph, start_node, hitting_set)
new_branch, branch_length = loop_erasure(trip)
for i in range(branch_length - 1):
tree_edges.append( [ new_branch[i], new_branch[i + 1]])
for v in new_branch[:-1]:
hitting_set.add(v)
len_hitting_set += 1
allowable_set.remove(v)
tree = nx.DiGraph()
for node in graph.nodes:
node_attributes = list(graph.nodes[node].keys())
tree.add_node(node)
for attr in node_attributes:
tree.nodes[node][attr] = graph.nodes[node][attr]
tree.add_edges_from(tree_edges)
return tree
def simple_random_walk_variable_length(graph,node, walk_length):
'''does a random walk of length walk_length'''
wet = set([node])
trip = [node]
while len(wet) < walk_length:
next_step = random.choice(list(graph.neighbors(node)))
wet.add(next_step)
trip.append(next_step)
node = next_step
return trip, wet
def forward_tree_variable_length(graph,node, walk_length):
'''builds the forward tree in Broders algorithm, using a walk of length
walk_length'''
walk, wet = simple_random_walk_variable_length(graph, node, walk_length)
edges = []
for vertex in list(wet):
if (vertex != walk[0]):
first_occurance = walk.index(vertex)
edges.append( [walk[first_occurance], walk[first_occurance-1]])
return edges, wet
def random_tree_variable_length(graph, walk_length):
'''runs Broders algorithm to produce a tree of length walk_length'''
tree_edges, wet = forward_tree_variable_length(graph, random.choice(list(graph.nodes())), walk_length)
tree = nx.DiGraph()
for node in list(wet):
tree.add_node(node)
tree.add_edges_from(tree_edges)
return tree
def random_spanning_tree_wilson_with_starting(graph, starting_tree):
#The David Wilson random spanning tree algorithm
tree_edges = list(starting_tree.edges())
hitting_set = set(starting_tree.nodes())
allowable_set = set(graph.nodes()).difference(hitting_set)
len_graph = len(graph)
len_hitting_set = len(hitting_set)
while len_hitting_set < len_graph:
start_node = random.choice(list(allowable_set))
trip = random_walk_until_hit(graph, start_node, hitting_set)
new_branch, branch_length = loop_erasure(trip)
#print(branch_length)
for i in range(branch_length - 1):
tree_edges.append( [ new_branch[i], new_branch[i + 1]])
for v in new_branch[:-1]:
hitting_set.add(v)
len_hitting_set += 1
allowable_set.remove(v)
tree = nx.DiGraph()
tree.add_edges_from(tree_edges)
return tree
def random_walk_until_hit(graph, start_node, hitting_set):
'''Does a random walk from start_node until it hits the hitting_set
:graph: input graph
:start_node: the node taht the graph starts at
:hitting_set: the set to stop at, i.e. the tree we are building up
'''
current_node = start_node
trip = [current_node]
while current_node not in hitting_set:
current_node = random.choice(list(graph.neighbors(current_node)))
trip.append(current_node)
return trip
def loop_erasure(trip):
'''erases loops from a trip
:trip: input of node names...
'''
n = len(trip)
loop_erased_walk_indices = []
last_occurance = n - trip[::-1].index(trip[0]) - 1
loop_erased_walk_indices.append(last_occurance)
branch_length = 0
while trip[loop_erased_walk_indices[-1]] != trip[-1]:
last_occurance = n - trip[::-1].index(trip[loop_erased_walk_indices[-1]]) -1
loop_erased_walk_indices.append(last_occurance + 1)
branch_length += 1
loop_erased_trip = [trip[i] for i in loop_erased_walk_indices]
return (loop_erased_trip, branch_length + 1)
def statistics():
samples = 100
graph = nx.grid_graph([20,20])
W_trees = []
for i in range(samples):
W_trees.append( nx.to_undirected(random_spanning_tree_wilson(graph)))
| ElleNajt/TinyProjects | boundaryofUST.py | boundaryofUST.py | py | 4,806 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "random.choice",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "networkx.DiGraph",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "random.choice",
"lin... |
20538374789 | # https://leetcode.com/problems/counting-bits/
"""
Time complexity:- O(N)
Space Complexity:- O(N)
"""
from typing import List
class Solution:
def countBits(self, n: int) -> List[int]:
# Initialize a list 'dp' to store the number of 1 bits for each integer from 0 to 'n'.
dp = [0] * (n + 1)
offset = 1 # Initialize an 'offset' variable to keep track of the power of 2.
# Iterate through integers from 1 to 'n'.
for i in range(1, n + 1):
# Check if 'i' is a power of 2, in which case update the 'offset'.
if offset * 2 == i:
offset = i
# Calculate the number of 1 bits for 'i' using 'offset'.
dp[i] = 1 + dp[i - offset]
return dp # Return the list 'dp' containing the counts.
| Amit258012/100daysofcode | Day51/counting_bits.py | counting_bits.py | py | 800 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
}
] |
42890816740 | import time
from pathlib import Path
import shutil
import torch
import torch.nn
from torch.utils.tensorboard import SummaryWriter
import torch.backends.cudnn as cudnn
from . import utils
class Bone:
def __init__(self,
model,
datasets,
criterion,
optimizer,
scheduler=None,
scheduler_after_ep=True,
early_stop_epoch=None,
metric_fn=None,
metric_increase=False,
batch_size=8,
num_workers=4,
resume=False,
data_parallel=False,
seed=0,
weights_path='weights/best_model.pth',
log_dir='logs/experiment'):
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.scheduler = scheduler
self.scheduler_after_ep = scheduler_after_ep
self.early_stop_epoch = early_stop_epoch
self.metric_fn = criterion if metric_fn is None else metric_fn
self.metric_increase = metric_increase
self.batch_size = batch_size
self.num_workers = num_workers
self.resume = resume
self.data_parallel = data_parallel
self.seed = seed
self.weights_path = Path(weights_path)
self.log_dir = Path(log_dir)
self.epochs_count = 0
self.logger = utils.get_logger()
self.recreate_experiment_folders(from_scratch=False)
utils.set_seed(seed)
self.dataloaders = { # TODO: automatically handel all in loop
'train': torch.utils.data.DataLoader(datasets['train'],
batch_size=batch_size,
shuffle=True,
num_workers=num_workers),
'val': torch.utils.data.DataLoader(datasets['val'],
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
}
if self.resume:
if not self.weights_path.exists():
self.logger.error('Resume is not possible, no weights')
else:
self.logger.info(f'Resuming from {self.weights_path}')
checkpoint = torch.load(self.weights_path)
self.model.load_state_dict(checkpoint)
# TODO: bug, move model to device
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
if self.device == 'cuda' and data_parallel:
self.model = torch.nn.DataParallel(self.model)
def recreate_experiment_folders(self, from_scratch=False):
if from_scratch:
if self.weights_path.parent.exists():
self.weights_path.unlink()
if self.log_dir.exists():
shutil.rmtree(self.log_dir)
self.weights_path.parent.mkdir(exist_ok=True)
self.log_dir.mkdir(parents=True, exist_ok=True)
self.phase_writer = {
'train': SummaryWriter(self.log_dir / 'train'),
'val': SummaryWriter(self.log_dir / 'val')
}
def step(self, inputs, labels, phase):
inputs = inputs.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
metric = self.metric_fn(outputs, labels)
if phase == 'train':
loss.backward()
self.optimizer.step()
return loss.cpu().data.numpy(), metric.cpu().data.numpy()
def epoch(self, epoch_num, phase):
running_loss = 0
running_metric = 0
pbar = utils.get_pbar(self.dataloaders[phase],
f'{phase} {epoch_num + 1}/{self.epochs_count}')
if phase == 'val' and self.scheduler and not self.scheduler_after_ep:
self.scheduler.step()
for i, (inputs, labels) in enumerate(self.dataloaders[phase]):
loss, metric = self.step(inputs, labels, phase)
running_loss += loss * inputs.size(0)
running_metric += metric * inputs.size(0)
utils.update_pbar(pbar, loss, metric)
step = epoch_num * len(self.dataloaders['train']) + i
self.phase_writer[phase].add_scalar('batch/loss', loss,
global_step=step)
self.phase_writer[phase].add_scalar('batch/metric', metric,
global_step=step)
running_loss /= len(self.dataloaders[phase].dataset)
running_metric /= len(self.dataloaders[phase].dataset)
utils.update_pbar(pbar, running_loss, running_metric)
pbar.close()
self.phase_writer[phase].add_scalar('epoch/loss', running_loss,
global_step=epoch_num)
self.phase_writer[phase].add_scalar('epoch/metric', running_metric,
global_step=epoch_num)
if phase == 'val':
if self.scheduler and self.scheduler_after_ep:
self.scheduler.step(running_metric)
lr = utils.get_lr(self.optimizer)
self.phase_writer[phase].add_scalar('epoch/lr', lr,
global_step=epoch_num)
return running_loss, running_metric
def fit(self, epochs_count, from_scratch=False):
if from_scratch:
self.recreate_experiment_folders(from_scratch)
start_time = time.time()
self.epochs_count = epochs_count
epoch_without_improvement = 0
best_metric = None
def is_better(new_m, old_m, eps=1e-5):
if old_m is None:
return True
return new_m > old_m + eps if self.metric_increase else \
new_m < old_m - eps
for epoch_num in range(epochs_count):
for phase in ['train', 'val']: # TODO: test phase
if phase == 'train':
self.model.train()
else:
self.model.eval()
loss, metric = self.epoch(epoch_num, phase)
if phase == 'val':
if is_better(metric, best_metric):
best_metric = metric
if self.data_parallel:
torch.save(self.model.module.state_dict(),
self.weights_path)
else:
torch.save(self.model.state_dict(),
self.weights_path)
epoch_without_improvement = 0
self.logger.debug('Val metric improved')
else:
epoch_without_improvement += 1
self.logger.debug(f'Val metric did not improve for '
f'{epoch_without_improvement} epochs')
if self.early_stop_epoch is not None and\
epoch_without_improvement == self.early_stop_epoch:
self.logger.info('Early stopping')
break
time_elapsed = time.time() - start_time
self.logger.info(f'Training complete in {time_elapsed/60:.0f}m'
f' {time_elapsed%60:.0f}s')
self.logger.info(f'Best val metric: {best_metric:.4f}')
| EvgenyKashin/backbone | back/bone.py | bone.py | py | 7,736 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.utils",
... |
33146835188 | from flask import Flask, jsonify, redirect, url_for, request
import os
import pymysql
app = Flask(__name__)
@app.route('/')
def index():
response = jsonify({"Choo Choo": "Welcome to your Flask app 🚅"})
response.headers.add("Access-Control-Allow-Origin", "*")
return response
@app.route('/add', methods = ['POST', 'GET'])
def add():
# connection = pymysql.connect( host='containers-us-west-32.railway.app', user='root', passwd='Jyfcd452Xe3tmMsFLYDY', port=5522, db='railway' )
# with connection.cursor() as cursor:
# sql = "INSERT INTO table_name (name, ship, base) VALUES ('Name', 1, 0)"
# cursor.execute(sql)
user = request.form['nm']
response = jsonify({"data": user})
response.headers.add("Access-Control-Allow-Origin", "*")
return response
@app.route('/insert', methods=['POST'])
def insert_row():
name = request.form['name']
ship = request.form['ship']
base = request.form['base']
query = "INSERT INTO marked_systems (name, ship, base) VALUES (%s, %s, %s)"
values = (name, ship, base)
connection = pymysql.connect( host='containers-us-west-32.railway.app', user='root', passwd='Jyfcd452Xe3tmMsFLYDY', port=5522, db='railway' )
with connection.cursor() as cursor:
sql = "SELECT * FROM `marked_systems` WHERE `name`=%s"
cursor.execute(sql, (name))
result = cursor.fetchall()
print(result)
if result:
print("result found, deleting")
query = "DELETE FROM marked_systems WHERE `name`=%s"
cursor.execute(sql, (name))
else:
print("no result found, creating")
cursor.execute(query, values)
connection.commit()
response = jsonify({'status': 'success'})
response.headers.add("Access-Control-Allow-Origin", "*")
return response
@app.route('/data')
def get_data():
connection = pymysql.connect( host='containers-us-west-32.railway.app', user='root', passwd='Jyfcd452Xe3tmMsFLYDY', port=5522, db='railway' )
with connection.cursor() as cursor:
sql = "SELECT * FROM `marked_systems`"
cursor.execute(sql)
result = cursor.fetchall()
print(result)
response = jsonify({"data": result})
response.headers.add("Access-Control-Allow-Origin", "*")
return response
if __name__ == '__main__':
app.run(debug=True, port=os.getenv("PORT", default=5000))
| zpdunlap/flask | main.py | main.py | py | 2,425 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
... |
73662491386 | from multiprocessing import Pool
import time
from datetime import datetime
from typing import Any
from tqdm import tqdm
from tqdm.contrib.concurrent import process_map
import itertools
from classifiers import project_algorithms
from data_ingestion import ingestion_functions
from sklearn.model_selection import KFold, GridSearchCV
from sklearn import metrics
from dataclasses import dataclass
import joblib
import os
@dataclass
class Result:
algo_name: str
dataset_name: str
trial_num: int
X_train: Any
Y_train: Any
Y_train_pred: Any
X_test: Any
Y_test: Any
Y_test_pred: Any
# Runtime, in seconds
runtime: float
"""A pandas DataFrame that can be imported
"""
best_index_: int
best_params: Any
cv_results_: Any
def run_trial(with_params) -> Result:
"""Runs a given trial using a given algorithm. Fetches data from data_fetcher.
Args:
data_fetcher (fn -> tuple): [description]
algorithm ([fn]): [description]
num_trial ([type]): [description]
Returns:
[type]: [description]
"""
start = datetime.now()
data_fetcher, algorithm, num_trial = with_params
(algo, params) = algorithm()
X_train, X_test, Y_train, Y_test = data_fetcher()
# GridSearchCV automatically does 5 kfold splits.
search_results = GridSearchCV(algo, params, scoring={
'AUC': 'roc_auc',
'Accuracy': metrics.make_scorer(metrics.accuracy_score),
'F1': 'f1',
'Precision': 'precision',
'Recall': 'recall',
'MCC': metrics.make_scorer(metrics.matthews_corrcoef)
}, refit='Accuracy')
search_results.fit(X_train, Y_train)
opt_classifier = search_results.best_estimator_
opt_classifier.fit(X_train, Y_train)
Y_train_pred = opt_classifier.predict(X_train)
Y_test_pred = opt_classifier.predict(X_test)
# Get metrics for the classifiers
end = datetime.now()
runtime = (end - start).total_seconds()
return Result(
algo_name=algorithm.__name__,
dataset_name=data_fetcher.__name__,
trial_num=num_trial,
X_train = X_train,
Y_train = Y_train,
Y_train_pred = Y_train_pred,
X_test=X_test,
Y_test = Y_test,
Y_test_pred = Y_test_pred,
runtime=runtime,
best_index_=search_results.best_index_,
best_params=search_results.best_params_,
cv_results_ = search_results.cv_results_
)
def run_all_trials():
trial_combinations = list(
itertools.product(ingestion_functions, project_algorithms, list(range(5)))
)
# Runs all concurrently on different CPUs
# My M1 Macbook Air has 8 cores, so 8 + 4 = 12
YOUR_CPU_CORES = 8
results = process_map(run_trial, trial_combinations, max_workers=YOUR_CPU_CORES + 4)
#Single-threaded for easier debugging
# results = [run_trial(tc) for tc in trial_combinations]
timestamp = int(time.time())
for result in tqdm(results, desc="Saving classifiers to disk..."):
# Save the classifier to disk for use in a Jupyter Notebook
folder_path = f"./classifier_cache/{timestamp}/{result.algo_name}/{result.dataset_name}"
try:
os.makedirs(folder_path)
except FileExistsError:
pass
result_filename = folder_path + f"/{result.trial_num}_cls.joblib.pkl"
_ = joblib.dump(result, result_filename, compress=9)
if __name__ == "__main__":
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
run_all_trials()
| lukebrb/final_project | runners.py | runners.py | py | 3,580 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Any",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 26,
... |
33944686501 | # -*- coding: utf-8 -*-
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics.scorer import make_scorer
from sklearn import model_selection
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.linear_model import LogisticRegression
from sklearn.metrics.scorer import make_scorer
from sklearn import model_selection
from sklearn.ensemble import GradientBoostingRegressor #GBM algorithm
from sklearn import cross_validation, metrics #Additional scklearn functions
from sklearn.grid_search import GridSearchCV #Perforing grid search
"""
Created on Sat Nov 24 13:28:11 2018
@author: Muhammad Shahraiz Khan Niazi and Muhammad Daniyal Saqib
"""
'''
#Start:Kaggle.com
#Explanation: Plot a bar graph against SalePrice
#End '''
def barPlot(df, var):
plt.xticks(rotation =90)
sns.barplot(df.loc[:,'SalePrice'], df.loc[:, var])
plt.title('SalePrice vs ' + var)
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
def colDrop(df, colName):
return df.drop(colName, axis = 1)
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
'''
#Start:Kaggle.com
#Explanation: to figure out the division/scattering of the data's values
#End '''
def checkRange(df, var):
labels = df.loc[:,var].unique()
sizes = df.loc[:,var].value_counts().values
percent = 100.*sizes/sizes.sum()
labels = ['{0} - {1:1.1f} %'.format(i,j) for i,j in zip(labels, percent)]
print(labels)
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
'''
#Start:Kaggle.com
#Explanation: Plot a historgram graph
#End '''
def histogram(df, var):
sns.distplot(df.loc[:, var]);
fig = plt.figure()
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
def join(df1, df2):
return pd.concat([df1, df2], axis = 1)
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
'''
#Start:Kaggle.com
#Explanation: Plot a scatter graph against SalePrice
#End '''
def scatterPlot(df, var):
data = pd.concat([df.loc[:, 'SalePrice'], df.loc[:,var]], axis=1)
data.plot.scatter(x=var, y='SalePrice', ylim=(0,800000));
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
#This method implements binary encoding on particular columns in the dataframe and returns that dataframe
def encode(ser):
return pd.get_dummies(ser)
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
#This function checks percentage of the number of NA values in a coloumn
def checkMissingValues(df):
return df.apply(lambda col: (col.isnull().sum()/ df.shape[0]) *100 )
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
#This function reads the data from the file and returns the entire dataset, a list of input cols and outputCol
def readData_1(numRows = None):
inputCols = ['MSSubClass', 'MSZoning', 'LotFrontage',
'LotArea', 'Street', 'Alley', 'LotShape', 'LandContour',
'Utilities', 'LotConfig', 'LotConfig',
'Neighborhood', 'Condition1', 'Condition2', 'BldgType'
'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt'
'YearRemodAdd', 'RoofStyle', 'RoofMatl',
'Exterior1st', 'Exterior2nd', 'MasVnrType', 'MasVnrArea',
'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond',
'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1', 'BsmtFinType2',
'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating', 'HeatingQC',
'CentralAir', 'Electrical','1stFlrSF', '2ndFlrSF', 'LowQualFinSF',
'GrLivArea','BsmtFullBath','BsmtHalfBath','FullBath','HalfBath','Bedroom',
'Kitchen','KitchenQual','TotRmsAbvGrd','Functional',
'Fireplaces','FireplaceQu','GarageType','GarageYrBlt',
'GarageFinish','GarageCars','GarageArea',
'GarageQual','GarageCond','PavedDrive',
'WoodDeckSF','OpenPorchSF','EnclosedPorch',
'3SsnPorch', 'ScreenPorch','PoolArea',
'PoolQC','Fence','MiscFeature', 'MiscVal',
'MoSold','YrSold','SaleType','SaleCondition']
outputCol = ['SalePrice']
trainHouseDF = pd.read_csv('Data/train.csv')
testHouseDF = pd.read_csv('Data/test.csv')
houseDF = pd.concat([trainHouseDF, testHouseDF], sort=True)
#houseDF = houseDF.sample(frac = 1, random_state = 99).reset_index(drop = True)
return houseDF, inputCols, outputCol
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
#This function reads the data from the file and returns the two datasets -
# training and testing, a list of input cols and outputCol
def readData(numRows = None):
trainHouseDF = pd.read_csv('Data/train.csv')
testHouseDF = pd.read_csv('Data/test.csv')
outputCol = ['SalePrice']
return trainHouseDF, testHouseDF, outputCol
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
def c2(df, colName):
return df.loc[:, colName].corr(df.loc[:, 'SalePrice'])
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
#This function finds out the correlation between the numerical columns and the SalePrice
def corrRelationForNumerical():
df, b, c = readData_1()
corr=df.corr()["SalePrice"]
print(corr[np.argsort(corr, axis=0)[::-1]])
inputCols = ['OverallQual', 'GrLivArea', 'GarageCars', 'GarageArea', 'TotalBsmtSF', '1stFlrSF',
'FullBath', 'TotRmsAbvGrd', 'YearBuilt', 'YearRemodAdd', 'GarageYrBlt', 'MasVnrArea',
'Fireplaces']
corrMatrix= df[inputCols].corr()
sns.set(font_scale=1.10)
plt.figure(figsize=(10, 10))
sns.heatmap(corrMatrix, vmax=.8, linewidths=0.01, square=True,annot=True,cmap='viridis',linecolor="white")
plt.title('Correlation between features')
'''
According to the correlation that we found with respect to SalePrice, we think:
values that are above close to 0.5 or greater than 0.5 would affect SalePrice and values that are less than -0.1.
Hence, our predictors list till now is: OverallQual, GrLiveArea, GarageCars, GarageArea
TotalBsmtSF, 1stFlrSF, FullBath, TotRmsAbvGrd, YearBuilt, YearRemodAdd, GarageYrBl
'''
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
#This function would preprocess the nonNumerical data and we would evaluate which column to keep as a parameter
#for the final algorithm
def dataProcessing():
df, b, c = readData()
#____________________________________________________________________________________________
#While going through the Utilities - there is only one entry that is other than AllPub (Id = 945)
#Hence dropping ID 945 and then the entire col would not affect the TargetPrice - Sale price
df = df.drop(labels = 944, axis = 0)
'''
From: Kaggle.com
Explanation: After trying different plotting techniques, such as boxPlot, scatterplot and few others,
We think this describes the neighborhood perfectly.
'''
#barPlot(df, 'Neighborhood')
df = colDrop(df, 'Neighborhood')
#End: Kaggle.com
'''Other than few extra spikes, the SalePrice is not really affected that much by Neighborhood, as it is all between
100000 - 200000. Shahraiz and I believe, the Neighborhood really doesn't matter. Hence, we would drop this column'''
#----------------------------------------------------------------------------------------------------------
#MSZoning Attribute
#plt.xticks(rotation =90)
#plt.scatter(df.loc[:,'MSZoning'], df.loc[:, 'SalePrice'])
#plt.title('SalePrice vs MSZoning')
'''This graph clearly shows that they majority of the data is in RL; however, to confirm it further we would use Classifcation graph
to plot it'''
#labels = df.loc[:,"MSZoning"].unique()
#sizes = df.loc[:,"MSZoning"].value_counts().values
#explode=[0.1,0,0,0,0]
#parcent = 100.*sizes/sizes.sum()
#labels = ['{0} - {1:1.1f} %'.format(i,j) for i,j in zip(labels, parcent)]
#colors = ['yellowgreen', 'gold', 'lightblue', 'lightcoral','blue']
#patches = plt.pie(sizes, colors=colors,explode=explode, shadow=True,startangle=90)
#plt.title("Zoning Classification")
#plt.show()
#print(labels)
#print()
'''Clearly a large part of the pie is yellow green which is RL - and the second most occuring
is gold which is RM. Therefore, we would keep the values that are RL and RM and remove the rows
that are any other value.'''
'''We would discreditize the data in MSZoning: 1 - RL, 0 - RM and then find the correlation'''
df = df.loc[(df.loc[:,'MSZoning'] == 'RL') | (df.loc[:,'MSZoning']=='RM') | (df.loc[:,'MSZoning']=='C')]
df.loc[:,'MSZoning'] = df.loc[:,'MSZoning'].map(lambda val: 0 if(val=='RL') else val)
df.loc[:,'MSZoning'] = df.loc[:,'MSZoning'].map(lambda val: 1 if(val=='RM') else val)
df.loc[:,'MSZoning'] = df.loc[:,'MSZoning'].map(lambda val: 2 if(val=='C') else val)
#print(corrRelation2Cols(df, ['MSZoning', 'SalePrice']))
'''
The correlation between SalePrice and MSZoning is 0.29556497792 - that is below the threshold of 0.5; hence, we
would not use this as well.
'''
df = colDrop(df, 'MSZoning')
#----------------------------------------------------------------------------------------------------------
#Street
#plt.xticks(rotation =90)
#plt.scatter(df.loc[:,'Street'], df.loc[:, 'SalePrice'])
#plt.title('SalePrice vs Street')
df = df.loc[(df.loc[:, 'Street'] == 'Pave')]
'''
The graph shows that majority of the values are Pave.
Therefore we would keep all the values that are Pave and get rid of the column.
'''
df = colDrop(df, 'Street')
#----------------------------------------------------------------------------------------------------------
#dropping 'Ally', MiscFeature, PoolQC because there high percentage of Na values, Uncomment the print line below to see that.
#print(checkMissingValues(df))
df = df.drop(['Alley', 'MiscFeature', 'PoolQC', 'Utilities', 'Fence', 'FireplaceQu', 'LotFrontage'], axis = 1)
#----------------------------------------------------------------------------------------------------------
# LotShape
#checkRange(df, 'LotShape')
'''['Reg - 62.2 %', 'IR1 - 34.4 %', 'IR2 - 2.6 %', 'IR3 - 0.7 %'] - the percentage of
the values show that it is all Reg and IR1. Hence we would keep all those values that
are Reg and IR1 otherwise, we would drop those rows.'''
#df = df.loc[(df.loc[:,'LotShape'] == 'Reg') | (df.loc[:,'LotShape']=='IR1') | (df.loc[:,'LotShape']=='IR2')]
#df.loc[:,'LotShape'] = df.loc[:,'LotShape'].map(lambda val: 0 if(val=='Reg') else val)
#df.loc[:,'LotShape'] = df.loc[:,'LotShape'].map(lambda val: 1 if(val=='IR1') else val)
#df.loc[:,'LotShape'] = df.loc[:,'LotShape'].map(lambda val: 2 if(val=='IR2') else val)
#scatterPlot(df, 'LotShape')
'''Now we would discreditize the data into 0,1 or 2 and find the correlation between Lot Shape
and SalePrice'''
#print(c2(df, 'LotShape'))
#Hence we would drop this too.
df = colDrop(df, 'LotShape')
#_____________________________________________________________________________________________
#This is LandContour
#checkRange(df, 'LandContour')
#df = df.loc[(df.loc[:,'LandContour'] == 'Lvl') | (df.loc[:,'LandContour'] == 'Bnk') | (df.loc[:,'LandContour'] == 'Low') | (df.loc[:,'LandContour'] == 'HLS')]
#df.loc[:,'LandContour'] = df.loc[:,'LandContour'].map(lambda val: 0 if(val=='Lvl') else val)
#df.loc[:,'LandContour'] = df.loc[:,'LandContour'].map(lambda val: 1 if(val=='Bnk') else val)
#df.loc[:,'LandContour'] = df.loc[:,'LandContour'].map(lambda val: 2 if(val=='Low') else val)
#df.loc[:,'LandContour'] = df.loc[:,'LandContour'].map(lambda val: 3 if(val=='HLS') else val)
#histogram(df, 'LandContour')
#print(c2(df, 'LandContour'))
'''Since, LandContour and LotShape basically is providing the same information - we would use only
one of it - the one with the higher correlation with SalePrice, if it exceeds 0.5'''
df = colDrop(df, 'LandContour')
#____________________________________________________________________________________________
#This is LotConfig
#checkRange(df, 'LotConfig')
df = df.loc[(df.loc[:,'LotConfig'] == 'Inside') | (df.loc[:,'LotConfig'] == 'FR2') |(df.loc[:,'LotConfig'] == 'Corner') | (df.loc[:,'LotConfig'] == 'CulDsac') ]
df.loc[:,'LotConfig'] = df.loc[:,'LotConfig'].map(lambda val: 0 if(val=='Inside') else val)
df.loc[:,'LotConfig'] = df.loc[:,'LotConfig'].map(lambda val: 1 if(val=='FR2') else val)
df.loc[:,'LotConfig'] = df.loc[:,'LotConfig'].map(lambda val: 2 if(val=='Corner') else val)
df.loc[:,'LotConfig'] = df.loc[:,'LotConfig'].map(lambda val: 3 if(val=='CulDsac') else val)
#print(c2(df, 'LotConfig'))
#Removed Landconfig as well because the correlation is very less
df = colDrop(df, 'LotConfig')
#__________________________________________________________________________________________________
#LandSlope
#checkRange(df, 'LandSlope')
df = df.loc[(df.loc[:,'LandSlope'] == 'Gtl') | (df.loc[:, 'LandSlope'] == 'Mod')]
df.loc[:, 'LandSlope'] = df.apply(lambda row: 1 if row.loc['LandSlope'] == 'Gtl' else 0, axis = 1)
#print(c2(df, 'LandSlope'))
#It shows a high percentage of Gtl values, therefore - we would just keep those and remove the others
#and this column - It also shows a very high -negative correlation - therewould we would keep this column
#_____________________________________________________________________________________________
#Condition1 Condition2
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 0 if(val=='Artery') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 1 if(val=='Feedr') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 2 if(val=='Norm') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 3 if(val=='RRNn') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 4 if(val=='PosN') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 5 if(val=='PosA') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 6 if(val=='RRNe') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 7 if(val=='RRAe') else val)
df.loc[:,'Condition1'] = df.loc[:,'Condition1'].map(lambda val: 8 if(val=='RRAn') else val)
#print(c2(df, 'Condition1'))
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 0 if(val=='Artery') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 1 if(val=='Feedr') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 2 if(val=='Norm') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 3 if(val=='RRNn') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 4 if(val=='PosN') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 5 if(val=='PosA') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 6 if(val=='RRNe') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 7 if(val=='RRAe') else val)
df.loc[:,'Condition2'] = df.loc[:,'Condition2'].map(lambda val: 8 if(val=='RRAn') else val)
#print(c2(df, 'Condition2'))
''' Clearly the correlation between these two coloumns and SalePrice is very low; therefore, we
would drop these two columns'''
df = colDrop(df, 'Condition1')
df = colDrop(df, 'Condition2')
#____________________________________________________________________________________________
#BldgType
#labels = df.loc[:,'BldgType'].unique()
#sizes = df.loc[:,"BldgType"].value_counts().values
#explode=[0.1,0,0,0,0]
#parcent = 100.*sizes/sizes.sum()
#labels = ['{0} - {1:1.1f} %'.format(i,j) for i,j in zip(labels, parcent)]
#print(labels)
'''1 Fam and Duplex are more than 90% of the values for this column therefore we would,
binary encode them and see there correlation with the SalePrice'''
a = encode(df.loc[:, 'BldgType'])
#df.loc[:, '1Fam'] = a.loc[:, '1Fam']
df.loc[:, 'Duplex'] = a.loc[:, 'Duplex']
#print(c2(df, '1Fam'))
#print(c2(df, 'Duplex'))
'''
1Fam is not highly correlated with the SalePrice; therefore, which can be confirmed
by there scatter plots, we won't keep that. However, we would keep the Duplex.
'''
#data = pd.concat([df.loc[:, 'SalePrice'], df.loc[:,'1Fam']], axis=1)
#data.plot.scatter(x='1Fam', y='SalePrice', ylim=(0,800000));
#data1 = pd.concat([df.loc[:, 'SalePrice'], df.loc[:,'Duplex']], axis=1)
#data1.plot.scatter(x='Duplex', y='SalePrice', ylim=(0,800000));
'''In both the cases - SalePrice, is barely affected by 1Fam or DuPlex as it is scattered all over the
place. Hence we would also not consider BldgType. '''
df = colDrop(df, 'BldgType')
#____________________________________________________________________________________________
#RoofMatl
#checkRange(df, 'RoofMatl')
''' 99.6 percent of the values are CompShg therefore we would ignore the other values and ignore
this column'''
df = df.loc[df.loc[:, 'RoofMatl'] == 'CompShg']
df = colDrop(df, 'RoofMatl')
#____________________________________________________________________________________________
#MasVnrType
#checkRange(df, 'MasVnrType')
#scatterPlot(df, 'MasVnrArea')
df.loc[:, 'MasVnrType'] == df.loc[:, 'MasVnrType'].fillna('None')
df.loc[:, 'MasVnrArea'] == df.loc[:, 'MasVnrArea'].fillna(0)
a = df.loc[(df.loc[:,'MasVnrType'] == 'None') & (df.loc[:, 'MasVnrArea'] == 0)]
#print(len(a))
#print(len(df[df.loc[:, 'MasVnrType'] == 'None']))
#print(len(df[df.loc[:, 'MasVnrArea'] == 0]))
'''This shows a relationship between the two and even from the names we can figure it out that we just need one of it;
therefore, we would keep the one that is more easier to use (the one that is numeric) and figure out it's corr'''
df = colDrop(df, 'MasVnrType')
#print(c2(df, 'MasVnrArea'))
'''We would keep this because it's correlation with SalePrice is 0.499 which is approximately 0.5 - among the threshold
we are considering values.'''
#____________________________________________________________________________________________
#ExteriorQuality
#checkRange(df, 'ExterQual')
'''From this we figure out that Gd, TA, and Ex makes the entire data therefore we would remove the rest'''
df = df.loc[(df.loc[:, 'ExterQual'] == 'Gd') | (df.loc[:, 'ExterQual'] == 'TA') | (df.loc[:, 'ExterQual'] == 'Ex')]
df.loc[:,'ExterQual'] = df.loc[:,'ExterQual'].map(lambda val: 1 if(val=='Gd') else val)
df.loc[:,'ExterQual'] = df.loc[:,'ExterQual'].map(lambda val: 2 if(val=='TA') else val)
df.loc[:,'ExterQual'] = df.loc[:,'ExterQual'].map(lambda val: 3 if(val=='Ex') else val)
#histogram(df, 'ExterQual')
#checkRange(df, 'ExterCond')
#print(c2(df, 'ExterQual'))
'''Since, it's corr relation is negative and far from zero, therefore, we would keep this. However, we would not
use exterior condition because both of them are almost the same thing because in both the cases - TA and Gd make up a large
portion of the data.'''
df = colDrop(df, 'ExterCond')
#____________________________________________________________________________________________
#Foundation
#checkRange(df, 'Foundation')
df = df.loc[(df.loc[:, 'Foundation'] == 'PConc') | (df.loc[:, 'Foundation'] == 'CBlock') | (df.loc[:, 'Foundation'] == 'CBlock')]
df.loc[:,'Foundation'] = df.loc[:,'Foundation'].map(lambda val: 1 if(val=='PConc') else val)
df.loc[:,'Foundation'] = df.loc[:,'Foundation'].map(lambda val: 2 if(val=='CBlock') else val)
df.loc[:,'Foundation'] = df.loc[:,'Foundation'].map(lambda val: 3 if(val=='CBlock') else val)
#histogram(df, 'Foundation')
#scatterPlot(df, 'Foundation')
#print(c2(df, 'Foundation'))
'''The corr is very high negative - therefore we will take this into consideration either.'''
#____________________________________________________________________________________________
#Basement Features
'''There is high correlation between basement features (BsmtFinType2, BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1);
therefore, we would only use BsmtCond - correlation shown below'''
#checkRange(df, 'BsmtQual')
#checkRange(df, 'BsmtCond')
df.loc[:, 'BsmtQual'] = df.loc[:, 'BsmtQual'].fillna('None')
df.loc[:, 'BsmtCond'] = df.loc[:, 'BsmtCond'].fillna('None')
df.loc[:,'BsmtQual'] = df.loc[:,'BsmtQual'].map(lambda val: 1 if(val=='Gd') else val)
df.loc[:,'BsmtQual'] = df.loc[:,'BsmtQual'].map(lambda val: 2 if(val=='TA') else val)
df.loc[:,'BsmtQual'] = df.loc[:,'BsmtQual'].map(lambda val: 2 if(val== 'Ex') else val)
df.loc[:,'BsmtQual'] = df.loc[:,'BsmtQual'].map(lambda val: 4 if(val== 'Fa') else val)
df.loc[:,'BsmtQual'] = df.loc[:,'BsmtQual'].map(lambda val: 5 if(val== 'None') else val)
df.loc[:,'BsmtQual'] = df.loc[:,'BsmtQual'].map(lambda val: 6 if(val== 'Po') else val)
df.loc[:,'BsmtCond'] = df.loc[:,'BsmtCond'].map(lambda val: 1 if(val=='Gd') else val)
df.loc[:,'BsmtCond'] = df.loc[:,'BsmtCond'].map(lambda val: 2 if(val=='TA') else val)
df.loc[:,'BsmtCond'] = df.loc[:,'BsmtCond'].map(lambda val: 3 if(val== 'Ex') else val)
df.loc[:,'BsmtCond'] = df.loc[:,'BsmtCond'].map(lambda val: 4 if(val== 'Fa') else val)
df.loc[:,'BsmtCond'] = df.loc[:,'BsmtCond'].map(lambda val: 5 if(val== 'None') else val)
df.loc[:,'BsmtCond'] = df.loc[:,'BsmtCond'].map(lambda val: 6 if(val== 'Po') else val)
#print(df.loc[:, 'BsmtCond'].corr(df.loc[:,'BsmtQual']))
'''This shows a high correlation between BsmtCond and BsmtQual - therefore, we would keep BsmtCond'''
df = colDrop(df, 'BsmtQual')
#____________________________________________________________________________________________
#Gas
#checkRange(df, 'Heating') #Show 99.6% GasA - therefore keeping only data that is GasA
df = df.loc[df.loc[:, 'Heating'] == 'GasA']
df = colDrop(df, 'Heating')
#checkRange(df, 'HeatingQC')
df.loc[:,'HeatingQC'] = df.loc[:,'HeatingQC'].map(lambda val: 1 if(val=='Gd') else val)
df.loc[:,'HeatingQC'] = df.loc[:,'HeatingQC'].map(lambda val: 2 if(val=='TA') else val)
df.loc[:,'HeatingQC'] = df.loc[:,'HeatingQC'].map(lambda val: 3 if(val== 'Ex') else val)
df.loc[:,'HeatingQC'] = df.loc[:,'HeatingQC'].map(lambda val: 4 if(val== 'Fa') else val)
df.loc[:,'HeatingQC'] = df.loc[:,'HeatingQC'].map(lambda val: 5 if(val== 'Po') else val)
#print(c2(df, 'HeatingQC'))
#scatterPlot(df, 'HeatingQC')
'''The graph shows that Heating Quality definitely affects the price range towards the upper end.'''
#____________________________________________________________________________________________
#Central Air
#checkRange(df, 'CentralAir')
df.loc[:,'CentralAir'] = df.loc[:,'CentralAir'].map(lambda val: 0 if(val=='N') else val)
df.loc[:,'CentralAir'] = df.loc[:,'CentralAir'].map(lambda val: 1 if(val=='Y') else val)
#scatterPlot(df, 'CentralAir')
#histogram(df, 'CentralAir')
#print(c2(df, 'CentralAir'))
df = colDrop(df, 'CentralAir')
#checkRange(df, 'Electrical')
df = df.loc[(df.loc[:, 'Electrical'] == 'SBrkr') | (df.loc[:, 'Electrical'] == 'FuseF')] #98.8 of the total values make this up
df.loc[:,'Electrical'] = df.loc[:,'Electrical'].map(lambda val: 0 if(val=='SBrkr') else 1)
#print(df.loc[:, 'Electrical'])
#print(c2(df, 'Electrical'))
'''This is a negative number close to 0; hence, a low corr between Electricity and SalePrice.
therefore - we would not keep this.'''
df = colDrop(df, 'Electrical')
#KitchenQuality
#checkRange(df, 'KitchenQual')
''' 98% of the data is made up of Gd, TA and Ex hence we would only keep, discretize it
and then find the correlation between KitcenQual and SalePrice'''
df = df.loc[(df.loc[:, 'KitchenQual'] == 'Gd') | (df.loc[:, 'KitchenQual'] == 'Ex') | (df.loc[:, 'KitchenQual'] == 'TA')]
df.loc[:,'KitchenQual'] = df.loc[:,'KitchenQual'].map(lambda val: 0 if(val=='Ex') else val)
df.loc[:,'KitchenQual'] = df.loc[:,'KitchenQual'].map(lambda val: 1 if(val=='TA') else val)
df.loc[:,'KitchenQual'] = df.loc[:,'KitchenQual'].map(lambda val: 2 if(val=='Gd') else val)
#print(c2(df, 'KitchenQual'))
'''It really has a low negative corr with SalePrice - therefore we would not keep this.'''
df = colDrop(df, 'KitchenQual')
#function
#checkRange(df, 'Functional')
df = df.loc[(df.loc[:, 'Functional'] == 'Typ') | (df.loc[:, 'Functional'] == 'Min1') | (df.loc[:, 'Functional'] == 'Maj1')]
df.loc[:,'Functional'] = df.loc[:,'Functional'].map(lambda val: 0 if(val=='Typ') else val)
df.loc[:,'Functional'] = df.loc[:,'Functional'].map(lambda val: 1 if(val=='Min1') else val)
df.loc[:,'Functional'] = df.loc[:,'Functional'].map(lambda val: 2 if(val=='Maj1') else val)
#print(c2(df, 'Functional'))
'''This has negative corr very close to zero; therefore, we would drop it.'''
df = colDrop(df, 'Functional')
#----------------------------------------------------------------------------------------------------------
#HouseStyle
#checkRange(df, 'HouseStyle')
df = df.loc[(df.loc[:, 'HouseStyle'] == '2Story') | (df.loc[:, 'HouseStyle'] == '1Story') | (df.loc[:, 'HouseStyle'] == '1.5Unf')]
df.loc[:,'HouseStyle'] = df.loc[:,'HouseStyle'].map(lambda val: 0 if(val=='2Story') else val)
df.loc[:,'HouseStyle'] = df.loc[:,'HouseStyle'].map(lambda val: 1 if(val=='1Story') else val)
df.loc[:,'HouseStyle'] = df.loc[:,'HouseStyle'].map(lambda val: 2 if(val=='1.5Unf') else val)
#print( df.loc[:,'HouseStyle'].dtype)
'''It is a high negative correlation with SalePrice - therefore we would keep it.'''
#----------------------------------------------------------------------------------------------------------
#RoofStyle
#checkRange(df, 'RoofStyle')
'''Data represents a lot like HouseStyle; therefore, assuming a high correlation between
roofstyle and housestyle - we would drop this as well.'''
df = colDrop(df, 'RoofStyle')
#_____________________________________________________________
#Exterior1st
df = colDrop(df, 'Exterior1st')
df = colDrop(df, 'Exterior2nd')
df = colDrop(df, 'BsmtExposure')
df = colDrop(df, 'BsmtFinType1')
df = colDrop(df, 'BsmtFinType2')
df = colDrop(df, 'GarageType')
#checkRange(df, 'GarageFinish')
a = encode(df.loc[:, 'GarageFinish'])
df = join(df, a)
df = colDrop(df,'GarageFinish')
#checkRange(df, 'PavedDrive')
df = df.loc[(df.loc[:, 'PavedDrive'] == 'Y') | (df.loc[:, 'PavedDrive'] == 'N')]
df.loc[:,'PavedDrive'] = df.loc[:,'PavedDrive'].map(lambda val: 0 if(val=='N') else 1)
#print(c2(df, 'PavedDrive'))
'''Very low correlation - hence we would drop this.'''
df = colDrop(df, 'PavedDrive')
df = colDrop(df, 'GarageCond')
df = colDrop(df, 'GarageQual')
#checkRange(df, 'SaleType')
df = df.loc[(df.loc[:, 'SaleType'] == 'WD') | (df.loc[:, 'SaleType'] == 'New') | (df.loc[:, 'SaleType'] == 'COD')]
df.loc[:,'SaleType'] = df.loc[:,'SaleType'].map(lambda val: 0 if(val=='WD') else val)
df.loc[:,'SaleType'] = df.loc[:,'SaleType'].map(lambda val: 1 if(val=='New') else val)
df.loc[:,'SaleType'] = df.loc[:,'SaleType'].map(lambda val: 2 if(val=='COD') else val)
df = colDrop(df, 'SaleType')
#checkRange(df, 'SaleCondition')
df = df.loc[(df.loc[:, 'SaleCondition'] == 'Normal') | (df.loc[:, 'SaleCondition'] == 'Partial') | (df.loc[:, 'SaleCondition'] == 'Abnormal')]
df.loc[:,'SaleCondition'] = df.loc[:,'SaleCondition'].map(lambda val: 0 if(val=='Normal') else val)
df.loc[:,'SaleCondition'] = df.loc[:,'SaleCondition'].map(lambda val: 1 if(val=='Partial') else val)
df.loc[:,'SaleCondition'] = df.loc[:,'SaleCondition'].map(lambda val: 2 if(val=='Abnormal') else val)
#print(c2(df, 'SaleCondition'))
#_handling the NA values
df.loc[:,'MasVnrArea'] = df.loc[:,'MasVnrArea'].fillna(0)
#df.loc[:,'BsmtExposure'] = df.loc[:,'BsmtExposure'].fillna(df.loc[:,'BsmtExposure'].mode()[0])
#df.loc[:,'BsmtFinType1'] = df.loc[:,'BsmtFinType1'].fillna(df.loc[:,'BsmtFinType1'].mode()[0])
#df.loc[:,'BsmtFinType2'] = df.loc[:,'BsmtFinType2'].fillna(df.loc[:,'BsmtFinType2'].mode()[0])
#df.loc[:,'GarageType'] = df.loc[:,'GarageType'].fillna(df.loc[:,'GarageType'].mode()[0])
df.loc[:,'GarageYrBlt'] = df.loc[:,'GarageYrBlt'].fillna(df.loc[:,'GarageYrBlt'].mean())
#df.loc[:,'GarageFinish'] = df.loc[:,'GarageFinish'].fillna(df.loc[:,'GarageFinish'].mode()[0])
#df.loc[:,'GarageQual'] = df.loc[:,'GarageQual'].fillna(df.loc[:,'GarageQual'].mode()[0])
#df.loc[:,'GarageCond'] = df.loc[:,'GarageCond'].fillna(df.loc[:,'GarageCond'].mode()[0])
#corr=df.corr()["SalePrice"]
#corr[np.argsort(corr, axis=0)[::-1]]
#print(corr)
'''Cols that we would keep because the corr relation with SalePrice are relatively higher
'LotArea', 'HouseStyle', 'OverallQual', 'OverallCond', 'OverallCond',
'YearRemodAdd', 'MasVnrArea', 'ExterQual', 'Foundation', 'BsmtCond',
'TotalBsmtSF', '1stFlrSF', 'GrLivArea', 'FullBath', 'KitchenAbvGr',
'TotRmsAbvGrd', 'GarageYrBlt', 'GarageCars', 'Unf'
'''
df = colDrop(df, 'MSSubClass')
df = colDrop(df, 'Fireplaces')
df = colDrop(df, 'LandSlope')
df = colDrop(df, 'BsmtFinSF1')
df = colDrop(df, 'BsmtFinSF2')
df = colDrop(df, 'BsmtUnfSF')
df = colDrop(df, 'HeatingQC')
df = colDrop(df, '2ndFlrSF')
df = colDrop(df, 'LowQualFinSF')
df = colDrop(df, 'BsmtHalfBath')
df = colDrop(df, 'FullBath')
df = colDrop(df, 'HalfBath')
df = colDrop(df, 'BedroomAbvGr')
df = colDrop(df, 'KitchenAbvGr')
df = colDrop(df, 'TotRmsAbvGrd')
df = colDrop(df, 'GarageCars')
df = colDrop(df, 'WoodDeckSF')
df = colDrop(df, 'OpenPorchSF')
df = colDrop(df, '3SsnPorch')
df = colDrop(df, 'ScreenPorch')
df = colDrop(df, 'PoolArea')
df = colDrop(df, 'MiscVal')
df = colDrop(df, 'MoSold')
df = colDrop(df, 'YrSold')
df = colDrop(df, 'Fin')
df = colDrop(df, 'RFn')
df = colDrop(df, 'EnclosedPorch')
return df
def standardize(df, ls):
df.loc[:, ls] = (df.loc[:, ls] - df.loc[:, ls].mean())
df.loc[:, ls] = (df.loc[:, ls])/df.loc[:, ls].std()
def test10():
df = dataProcessing()
inputCols = ['Id', 'LotArea', 'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt',
'YearRemodAdd', 'MasVnrArea', 'ExterQual', 'Foundation', 'BsmtCond',
'TotalBsmtSF', '1stFlrSF', 'GrLivArea', 'BsmtFullBath',
'GarageYrBlt', 'GarageArea', 'SaleCondition', 'Duplex', 'Unf']
outputCol = ['SalePrice']
#print(df.dtypes)
standardize(df, inputCols + outputCol)
df1 = df.loc[:,inputCols]
outputSeries = df.loc[:,outputCol]
alg = GradientBoostingRegressor()
alg.fit(df1, outputSeries)
cvScores = model_selection.cross_val_score(alg, df1, outputSeries ,cv=10, scoring='r2')
print(cvScores.mean())
'''
alg = LogisticRegression()
df1 = df.loc[:, inputCols]
df2 = df.loc[:, outputCol]
standardize(df1, inputCols)
standardize(df2, outputCol)
#FROM:https://stackoverflow.com/questions/34165731/a-column-vector-y-was-passed-when-a-1d-array-was-expected
#Explanation: casts flaot to int types. As an error came forward
df2=df2.astype('int')
alg.fit(df1,df2.values.ravel())
#END: https://stackoverflow.com/questions/34165731/a-column-vector-y-was-passed-when-a-1d-array-was-expected
cvScores = model_selection.cross_val_score(alg, df1, df2 ,cv=10, scoring='accuracy')
orginal_cvScore_mean = cvScores.mean()
print(orginal_cvScore_mean)
'''
| shahraizniazi/Regression-Simulation | Final_Final/Final.py | Final.py | py | 36,277 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "seaborn.barplot",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib... |
39259069256 | import logging
from django.db import transaction
from rest_framework import serializers
from rest_framework import status
from rest_framework.decorators import detail_route
from rest_framework.permissions import DjangoModelPermissions
from rest_framework.response import Response
from rest_framework.routers import DefaultRouter
from rest_framework.viewsets import ModelViewSet
from eums.api.standard_pagination import StandardResultsSetPagination
from eums.models import DistributionPlanNode as DeliveryNode, UserProfile
from eums.permissions.distribution_plan_node_permissions import DistributionPlanNodePermissions
logger = logging.getLogger(__name__)
class DistributionPlanNodeSerialiser(serializers.ModelSerializer):
quantity = serializers.IntegerField(write_only=True, required=False)
parents = serializers.ListField(required=False)
balance = serializers.IntegerField(read_only=True, required=False)
consignee_name = serializers.CharField(read_only=True, source='consignee.name')
item_description = serializers.CharField(read_only=True, source='item.item.description')
order_type = serializers.CharField(read_only=True, source='type')
class Meta:
model = DeliveryNode
fields = ('id', 'distribution_plan', 'location', 'consignee', 'tree_position', 'parents', 'quantity_in',
'contact_person_id', 'item', 'delivery_date', 'remark', 'track', 'quantity', 'quantity_out',
'balance', 'has_children', 'consignee_name', 'item_description', 'order_number', 'order_type',
'time_limitation_on_distribution', 'additional_remarks', 'is_assigned_to_self')
class DistributionPlanNodeViewSet(ModelViewSet):
permission_classes = (DjangoModelPermissions, DistributionPlanNodePermissions)
queryset = DeliveryNode.objects.all()
serializer_class = DistributionPlanNodeSerialiser
pagination_class = StandardResultsSetPagination
search_fields = ('location', 'consignee__name', 'delivery_date')
filter_fields = ('consignee', 'item', 'distribution_plan', 'contact_person_id', 'item__item')
def get_queryset(self):
user_profile = UserProfile.objects.filter(user_id=self.request.user.id).first()
logger.info('user profile = %s' % user_profile)
logger.info('user id = %s' % self.request.user.id)
if user_profile and user_profile.consignee:
logger.info('user consignee = %s' % user_profile.consignee)
return self._get_consignee_queryset(user_profile)
is_root = self.request.GET.get('is_root')
if is_root:
logger.info('root nodes = %s(%s)' % (DeliveryNode.objects.root_nodes(), DeliveryNode.objects.root_nodes()))
return DeliveryNode.objects.root_nodes()
logger.info('queryset clone node = %s(%s)' % (self.queryset._clone(), len(self.queryset._clone())))
return self.queryset._clone()
def _get_consignee_queryset(self, user_profile):
item_id = self.request.GET.get('consignee_deliveries_for_item')
if item_id:
return DeliveryNode.objects.delivered_by_consignee(user_profile.consignee, item_id).order_by('-id')
parent_id = self.request.GET.get('parent')
if parent_id:
logger.info('parent_id = %s' % parent_id)
parent = DeliveryNode.objects.get(pk=parent_id)
return parent.children()
return self._consignee_nodes(user_profile)
def _consignee_nodes(self, user_profile):
queryset = DeliveryNode.objects.filter(ip=user_profile.consignee)
logger.info('is_distributable = %s' % self.request.GET.get('is_distributable'))
if self.request.GET.get('is_distributable'):
queryset = queryset.filter(balance__gt=0, distribution_plan__confirmed=True,
tree_position=DeliveryNode.IMPLEMENTING_PARTNER)
logger.info('user consignee nodes after query = %s(%s)' % (queryset, len(queryset)))
return queryset
return queryset
def list(self, request, *args, **kwargs):
paginate = request.GET.get('paginate', None)
if paginate != 'true':
self.paginator.page_size = 0
return super(DistributionPlanNodeViewSet, self).list(request, *args, **kwargs)
@transaction.atomic
def perform_create(self, serializer):
serializer.save()
@detail_route()
def lineage(self, request, pk=None):
node = self.get_object()
lineage = node.lineage()
return Response(self.get_serializer(lineage, many=True).data)
@detail_route(methods=['patch'])
def report_loss(self, request, pk=None):
quantity_lost = request.data['quantity']
justification = request.data['justification']
node = self.get_object()
node.losses.create(quantity=quantity_lost, remark=justification)
node.save() # for updating the balance on the node - DO NOT REMOVE
return Response(status=status.HTTP_204_NO_CONTENT)
distributionPlanNodeRouter = DefaultRouter()
distributionPlanNodeRouter.register(r'distribution-plan-node', DistributionPlanNodeViewSet)
| unicefuganda/eums | eums/api/distribution_plan_node/distribution_plan_node_endpoint.py | distribution_plan_node_endpoint.py | py | 5,146 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 19,
"usage_type": "name"
... |
27673839201 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import html2text
class Html2TextPipeline(object):
def process_item(self, item, spider):
for f in ('description_md', 'abstract_md'):
val = item.get(f)
if val:
item[f] = html2text.html2text(val)
return item
| redapple/pyvideo-contrib | pyconfr2015/pyconfr2015/pyconfr2015/pipelines.py | pipelines.py | py | 459 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "html2text.html2text",
"line_number": 15,
"usage_type": "call"
}
] |
72490450747 | from polygon import *
import math
from functools import wraps,lru_cache
from collections import namedtuple
@validate_type(int)
@validate_params
class Polygon_sequence:
def __init__(self,n,circumradius):
""" This function initializes the number of polygons and circum radius. """
self.n = n
self.circumradius = circumradius
@lru_cache(maxsize=2**10)
def get_polygon(self,vertex , circumradius):
""" This function returns the properties of the polygon such as vertex , circumradius, interior angle, edge length , apothem, area, perimeter as a named tuple.
"""
polygon = Polygon(vertex, circumradius)
interiorAngle = polygon.interiorAngle
edgeLength = polygon.edgeLength
apothem = polygon.apothem
area = polygon.area
perimeter = polygon.perimeter
prop_names = ('vertex' , 'circumradius', 'interiorAngle', 'edgeLength' , 'apothem', 'area', 'perimeter')
properties = namedtuple('Polygon', prop_names)
# print(f'Calculating for Polygon with Vertex:{vertex} , CircumRadius: {circumradius}')
return properties(vertex , circumradius, interiorAngle, edgeLength , apothem, area, perimeter)
def max_efficiency(self):
""" This function returns the maximum efficiency polygon.
Here, a maximum efficiency polygon is one that has the highest area to perimeter ratio.
"""
ratios = []
for i in range(3, self.n+1):
""" This function """
p = self.get_polygon( i , self.circumradius)
ratios.append(p.area/p.perimeter)
# print(ratios)
max_index = max(range(len(ratios)), key=ratios.__getitem__)
# print(ratios)
print(f'Polygon with {max_index+3} vertices has the Max Efficiency of {ratios[max_index]}')
def __getitem__(self,vertex):
""" This function returns the properties of the polygon whose vertices are as passed in the arguments.
It returns 'Not a polygon' message if the number of vertices is less than 3.
"""
if isinstance(vertex,int)==False:
return 'Error: Incorrect type for parameter '
elif vertex <3 :
return 'Error: This is not a polygon'
else:
return self.get_polygon( vertex , self.circumradius)
def __repr__(self):
""" This function gives the details of the Polygon Sequence object"""
return f""" Contains {self.n} polygons with a circum radius of {self.circumradius} and vertices ranging from 3 to {self.n}"""
def __len__(self):
""" This function gives the length of the Polygon Sequence object """
return self.n
| m-shilpa/EPAI | Session_10_Sequence_Types/polygon_sequence.py | polygon_sequence.py | py | 2,781 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "polygon.interiorAngle",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "polygon.edgeLength",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "polygon.apothem",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name"... |
24021214626 | """
Allows for platform-specific configuration
options.
"""
import os
from enum import Enum
from typing import Any, Dict
class BaseOrigin(Enum):
"""
Enum for the origin of the base path
"""
ENV = 1
"""The base path is specified in the environment"""
CONF = 2
"""The base path is specified in the config file"""
DEFAULT = 3
"""The base path is the default one"""
class PlatformsConfig:
"""
Contains some configuration options for
platforms.
"""
base_dir: str
"""
The directory containing all platform-related content (base path)
- nothing should be stored in here directly
"""
base_origin: BaseOrigin
"""The origin of the base path"""
data_dir: str
"""The directory where data of all platforms should be stored"""
__data_dir: str
"""Unparsed version of data_dir"""
conf_dir: str
"""The directory where platforms can store configuration files"""
__conf_dir: str
"""Unparsed version of conf_dir"""
def __init__(self, base_dir: str, base_origin: BaseOrigin, data_dir: str, conf_dir: str):
"""
Creates a new PlatformsConfig object
Args:
base_dir (str): The directory containing all platform-related content (base path) - nothing should be stored in here directly
base_origin (BaseOrigin): The origin of the base path
data_dir (str): The directory where data of all platforms should be stored
conf_dir (str): The directory where platforms can store configuration files
"""
self.__dict__['base_dir'] = base_dir
self.__dict__['base_origin'] = base_origin
if not os.path.isdir(base_dir):
os.makedirs(base_dir)
self.data_dir = data_dir
self.conf_dir = conf_dir
def dumpj(self) -> Dict[str, str]:
"""
Returns a dictionary representation of this object
"""
dump: Dict[str, str] = {
'data_dir': self.data_dir,
'conf_dir': self.conf_dir
}
if self.base_origin == BaseOrigin.CONF:
dump['base_dir'] = self.base_dir
return dump
def __setattr__(self, k: str, v: Any) -> None:
if k in ('data_dir', 'conf_dir',):
self.__dict__[f'__{k}'] = v
self.__dict__[k] = v.replace('$D4V1D_DIR', self.base_dir)
if not os.path.isdir(self.__dict__[k]):
os.makedirs(self.__dict__[k])
elif k == 'base_dir':
self.__dict__[k] = v
self.__dict__['base_origin'] = BaseOrigin.CONF
self.data_dir = self.__data_dir
self.conf_dir = self.__conf_dir
elif k.startswith('_'):
# platform-specific options should always be
# prefixed with an underscore
self.__dict__[k] = v
@classmethod
def loadj(cls, j: Dict[str, str]) -> "PlatformsConfig":
"""
Creates a new PlatformsConfig object from a dictionary
"""
base_dir: str
base_origin: BaseOrigin
if 'base_dir' in j.keys():
base_dir = j['base_dir']
base_origin = BaseOrigin.CONF
elif os.getenv('D4V1D_DIR'):
base_dir = os.getenv('D4V1D_DIR')
base_origin = BaseOrigin.ENV
else:
base_dir = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')), '_')
base_origin = BaseOrigin.DEFAULT
return PlatformsConfig(base_dir, base_origin, j['data_dir'], j['conf_dir'])
| MattMoony/d4v1d | d4v1d/config/platforms.py | platforms.py | py | 3,549 | python | en | code | 34 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.path.isdir",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number":... |
39131054965 | import os
import base64
from pathlib import Path
from sys import stdout
from Get_data import get_keys
from encrypt_setting import *
class colors:
def __init__(self):
self.blue = "\033[94m"
self.red = "\033[91m"
self.end = "\033[0m"
self.green = "\033[92m"
col = colors()
def print_hacked():
print(col.red+"""
/| /| ---------------------------
||__|| | |
/ O O\__ Hacked Hacked Hacked |
/ \ operating system |
/ \ \ |
/ _ \ \ ----------------------
/ |\____\ \ ||
/ | | | |\____/ ||
/ \| | | |/ | __||
/ / \ ------- |_____| ||
/ | | | --|
| | | |_____ --|
| |_|_|_| | \----
/\ |
/ /\ | /
/ / | | |
___/ / | | |
|____/ c_c_c_C/ \C_c_c_c
\t By: Unifox
"""+col.end)
def encrypt_executeable():
# 경로 설정
p = Path('/Users/realsung/Desktop')
# base64 인코딩되어있는 key값
key = get_keys()
list_f = []
# 확장자들
extensions = ["*"] # ['jpg', 'png', 'jpeg', 'iso','exe', 'mp3', "mp4", 'zip', 'rar', 'txt', 'iso']
for extension in extensions:
try:
searche = list(p.glob('**/*.{}'.format(extension)))
for File in searche:
File = str(File)
if File.endswith(".unifox"):
pass
else:
#x = x.split("/")[-1]
list_f.append(File)
#print(File)
except OSError:
print("Permission Error")
for i in list_f:
file_name = i.split("/")[-1]
file_path = i.replace(file_name, "")
word = col.blue+"Encryption: "+col.end+str(i)
print(word)
os.chdir(file_path)
encrypt(getkey(base64.b64decode(key)), file_name)
try:
os.remove(file_name)
except OSError:
pass
print(col.green+"\n* Finish Encryption *\n") | realsung/Ransomeware | encrypt.py | encrypt.py | py | 2,052 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "Get_data.get_keys",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_... |
17273180201 | import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
from scipy.sparse.linalg import norm as sparsenorm
from scipy.linalg import qr
# from sklearn.metrics import f1_score
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
#
# def calc_f1(y_true, y_pred):
# y_true = np.argmax(y_true, axis=1)
# y_pred = np.argmax(y_pred, axis=1)
# return f1_score(y_true, y_pred, average="micro"), f1_score(y_true, y_pred, average="macro")
#
#
# def load_data(dataset_str):
# """Load data."""
# names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
# objects = []
# for i in range(len(names)):
# with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
# if sys.version_info > (3, 0):
# objects.append(pkl.load(f, encoding='latin1'))
# else:
# objects.append(pkl.load(f))
#
# x, y, tx, ty, allx, ally, graph = tuple(objects)
# test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
# test_idx_range = np.sort(test_idx_reorder)
#
# if dataset_str == 'citeseer':
# # Fix citeseer dataset (there are some isolated nodes in the graph)
# # Find isolated nodes, add them as zero-vecs into the right position
# test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
# tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
# tx_extended[test_idx_range-min(test_idx_range), :] = tx
# tx = tx_extended
# ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
# ty_extended[test_idx_range-min(test_idx_range), :] = ty
# ty = ty_extended
#
# features = sp.vstack((allx, tx)).tolil()
# features[test_idx_reorder, :] = features[test_idx_range, :]
# adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
#
# labels = np.vstack((ally, ty))
# labels[test_idx_reorder, :] = labels[test_idx_range, :]
#
# idx_test = test_idx_range.tolist()
# idx_train = range(len(y))
# idx_val = range(len(y), len(y)+500)
#
# train_mask = sample_mask(idx_train, labels.shape[0])
# val_mask = sample_mask(idx_val, labels.shape[0])
# test_mask = sample_mask(idx_test, labels.shape[0])
#
# y_train = np.zeros(labels.shape)
# y_val = np.zeros(labels.shape)
# y_test = np.zeros(labels.shape)
# y_train[train_mask, :] = labels[train_mask, :]
# y_val[val_mask, :] = labels[val_mask, :]
# y_test[test_mask, :] = labels[test_mask, :]
#
# return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
#
def load_data(dataset_str):
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(ally)-500)
idx_val = range(len(ally)-500, len(ally))
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def load_data_original(dataset_str):
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def nontuple_preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return features
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def nontuple_preprocess_adj(adj):
adj_normalized = normalize_adj(sp.eye(adj.shape[0]) + adj)
# adj_normalized = sp.eye(adj.shape[0]) + normalize_adj(adj)
return adj_normalized.tocsr()
def column_prop(adj):
column_norm = sparsenorm(adj, axis=0)
# column_norm = pow(sparsenorm(adj, axis=0),2)
norm_sum = sum(column_norm)
return column_norm/norm_sum
def mix_prop(adj, features, sparseinputs=False):
adj_column_norm = sparsenorm(adj, axis=0)
if sparseinputs:
features_row_norm = sparsenorm(features, axis=1)
else:
features_row_norm = np.linalg.norm(features, axis=1)
mix_norm = adj_column_norm*features_row_norm
norm_sum = sum(mix_norm)
return mix_norm / norm_sum
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
# adj_appr = np.array(sp.csr_matrix.todense(adj))
# # adj_appr = dense_lanczos(adj_appr, 100)
# adj_appr = dense_RandomSVD(adj_appr, 100)
# if adj_appr.sum(1).min()<0:
# adj_appr = adj_appr- (adj_appr.sum(1).min()-0.5)*sp.eye(adj_appr.shape[0])
# else:
# adj_appr = adj_appr + sp.eye(adj_appr.shape[0])
# adj_normalized = normalize_adj(adj_appr)
# adj_normalized = normalize_adj(adj+sp.eye(adj.shape[0]))
# adj_appr = np.array(sp.coo_matrix.todense(adj_normalized))
# # adj_normalized = dense_RandomSVD(adj_appr,100)
# adj_normalized = dense_lanczos(adj_appr, 100)
adj_normalized = normalize_adj(sp.eye(adj.shape[0]) + adj)
# adj_normalized = sp.eye(adj.shape[0]) + normalize_adj(adj)
return sparse_to_tuple(adj_normalized)
from lanczos import lanczos
def dense_lanczos(A,K):
q = np.random.randn(A.shape[0], )
Q, sigma = lanczos(A, K, q)
A2 = np.dot(Q[:,:K], np.dot(sigma[:K,:K], Q[:,:K].T))
return sp.csr_matrix(A2)
def sparse_lanczos(A,k):
q = sp.random(A.shape[0],1)
n = A.shape[0]
Q = sp.lil_matrix(np.zeros((n,k+1)))
A = sp.lil_matrix(A)
Q[:,0] = q/sparsenorm(q)
alpha = 0
beta = 0
for i in range(k):
if i == 0:
q = A*Q[:,i]
else:
q = A*Q[:,i] - beta*Q[:,i-1]
alpha = q.T*Q[:,i]
q = q - Q[:,i]*alpha
q = q - Q[:,:i]*Q[:,:i].T*q # full reorthogonalization
beta = sparsenorm(q)
Q[:,i+1] = q/beta
print(i)
Q = Q[:,:k]
Sigma = Q.T*A*Q
A2 = Q[:,:k]*Sigma[:k,:k]*Q[:,:k].T
return A2
# return Q, Sigma
def dense_RandomSVD(A,K):
G = np.random.randn(A.shape[0],K)
B = np.dot(A,G)
Q,R =qr(B,mode='economic')
M = np.dot(Q, np.dot(Q.T, A))
return sp.csr_matrix(M)
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
| matenure/FastGCN | utils.py | utils.py | py | 12,670 | python | en | code | 514 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.bool",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sys.version_info",
"line_... |
8316768665 | from brownie import FundMe
from scripts.helpful_scripts import get_account
def fund():
# Set variable fund_me to the latest deployment of the FundMe contract
fund_me = FundMe[-1]
account = get_account()
entrance_fee = fund_me.getEntranceFee()
print(entrance_fee)
print(f"The current entry fee is {entrance_fee}")
print("Funding")
fund_me.fund({"from": account, "value": entrance_fee})
def withdraw():
fund_me = FundMe[-1]
account = get_account()
fund_me.withdraw({"from": account})
# 0.025000000000000000 ether (at 1 ETH = $2000)
def main():
fund()
withdraw()
| AgenP/brownie_fund_me | scripts/fund_and_withdraw.py | fund_and_withdraw.py | py | 618 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "brownie.FundMe",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "scripts.helpful_scripts.get_account",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "brownie.FundMe",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "scrip... |
27535979388 |
import os
import matplotlib.pyplot as plt
def get_project_path(project_name):
"""
:param project_name: 项目名称,如pythonProject
:return: ******/project_name
"""
# 获取当前所在文件的路径
cur_path = os.path.abspath(os.path.dirname(__file__))
# 获取根目录
return cur_path[:cur_path.find(project_name)] + project_name
def draw_img_groups(img_groups: list, imgs_every_row: int = 8, block: bool = True, show_time: int = 5):
num_groups = len(img_groups)
for i in range(num_groups):
assert img_groups[i].shape[0] >= imgs_every_row
img_groups[i] = img_groups[i].cpu().squeeze(1).detach().numpy()
fig = plt.figure()
gs = fig.add_gridspec(num_groups, imgs_every_row)
for i in range(num_groups):
for j in range(imgs_every_row):
ax = fig.add_subplot(gs[i, j])
ax.imshow(img_groups[i][j], cmap="gray")
ax.axis("off")
plt.tight_layout()
plt.show(block=block)
if not block:
plt.pause(show_time)
plt.close("all")
print(get_project_path(project_name="Defense"))
| fym1057726877/Defense | utils.py | utils.py | py | 1,115 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure... |
31280749903 | import numpy as np, glob, face_recognition, ntpath, pickle, os
from os.path import basename
from shutil import copyfile
def copy_face_image():
for i in glob.glob("data/avatars/*.jpg"):
image = face_recognition.load_image_file(i)
face_locations = face_recognition.face_locations(image)
if face_locations:
print(i)
copyfile(i, "data/human_face/" + ntpath.basename(i))
def encoding():
known_faces = []
name_index = []
for i in glob.glob("data/human_face/*.jpg"):
print(i)
image = face_recognition.load_image_file(i)
face_encoding = face_recognition.face_encodings(image)[0]
known_faces.append(face_encoding)
filename = os.path.splitext(basename(i))[0]
name_index.append(filename)
with open('data/encodings', 'wb') as fp:
pickle.dump(known_faces, fp)
with open('data/index', 'wb') as fp:
pickle.dump(name_index, fp)
def test_encoding():
with open('data/encodings', 'rb') as fp:
known_faces = pickle.load(fp)
with open('data/index', 'rb') as fp:
name_index = pickle.load(fp)
test_file = "data/avatars/4557.jpg"
image = face_recognition.load_image_file(test_file)
face_encoding = face_recognition.face_encodings(image)[0]
face_distances = face_recognition.face_distance(known_faces, face_encoding)
min_index = np.argmin(face_distances)
print(name_index[min_index])
#copy_face_image()
encoding()
test_encoding()
| chechiachang/scouter | face_recognition/encoding_file_generator.py | encoding_file_generator.py | py | 1,509 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "glob.glob",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "face_recognition.load_image_file",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "face_recognition.face_locations",
"line_number": 8,
"usage_type": "call"
},
{
"api_name"... |
17034068791 | import logging, os, json
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import status
from app.worker.tasks import recommend_options_exe
logger = logging.getLogger(__name__)
@api_view(['GET'])
def get_recommend_options(request, format=None):
key = os.environ.get('STOCK_API_KEY')
if request.META['HTTP_AUTHORIZATION'] == None or key != request.META['HTTP_AUTHORIZATION']:
return Response('', status=status.HTTP_401_UNAUTHORIZED)
if request.method == 'POST':
data = request.data
if 'tickers' in data:
for ticker in data['tickers']:
recommend_options_exe.delay(ticker)
return Response(data, status=status.HTTP_200_OK)
return Response('', status=status.HTTP_403_FORBIDDEN) | dearvn/tdameritrade-bot | app/api/views.py | views.py | py | 816 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.respon... |
72474001787 | import random
import numpy as np
from math import sqrt, log
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
x1_list = []
x2_list = []
y_list = []
counter = 0
def drawFunc(minX, minY, maxX, maxY):
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('f(x1,x2)')
x1_array = np.arange(minX, maxX, 0.1)
x2_array = np.arange(minY, maxY, 0.1)
x1_array, x2_array = np.meshgrid(x1_array, x2_array)
R = f(x1_array, x2_array)
ax.plot_surface(x1_array, x2_array, R, color='b', alpha=0.5)
plt.show()
def drawBoder(ax, x1, g, z_min, z_max):
zs = np.arange(0, 300, 100)
X, Z = np.meshgrid(x1, zs)
Y = g(X)
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z)
def show(x1_list, x2_list):
N = int(x1_list.__len__())
if (N <= 0):
return
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
x1_array = []
x2_array = []
#x1_array = np.arange(min(x1_list) - 0.1, max(x1_list) + 0.1, 0.1)
#x2_array = np.arange(min(x2_list) - 0.1, max(x2_list) + 0.1, 0.1)
nums = np.arange(0, 5, 0.1)
for i in range(len(nums)):
for j in range(len(nums)):
if(barier(nums[i], nums[j])):
x1_array.append(nums[i])
x2_array.append(nums[j])
x1_array = np.array(x1_array)
x2_array = np.array(x2_array)
x1_array, x2_array = np.meshgrid(x1_array, x2_array)
R = f(x1_array, x2_array)
#drawBoder(ax, x1_array, g1_1, R.min(), R.max())
#drawBoder(ax, x1_array, g2_1, R.min(), R.max())
#drawBoder(ax, x1_array, g3_1, R.min(), R.max())
#drawBoder(ax, x1_array, g4_1, R.min(), R.max())
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('f(x1,x2)')
ax.plot_surface(x1_array, x2_array, R, color='b', alpha=0.5)
x1_list2 = []
x2_list2 = []
f_list = []
ax.scatter(x1_list[0], x2_list[0], f(x1_list[0], x2_list[0]), c='black')
x1_list2.append(x1_list[0])
x2_list2.append(x2_list[0])
f_list.append(f(x1_list[0], x2_list[0]))
for n in range(1, N - 1):
ax.scatter(x1_list[n], x2_list[n], f(x1_list[n], x2_list[n]), c='red')
x1_list2.append(x1_list[n])
x2_list2.append(x2_list[n])
f_list.append(f(x1_list[n], x2_list[n]))
ax.scatter(x1_list[N - 1], x2_list[N - 1], f(x1_list[N - 1], x2_list[N - 1]), c='green')
x1_list2.append(x1_list[N - 1])
x2_list2.append(x2_list[N - 1])
f_list.append(f(x1_list[N - 1], x2_list[n]))
ax.plot(x1_list2, x2_list2, f_list, color="black")
plt.show()
def f_1(x1, x2):
if (g1_1(x1,x2) and g2_1(x1,x2) and g3_1(x1,x2) and g4_1(x1,x2)):
return (x1-6)**2 +(x2-7)**2
return 0
def g1_1(x1):
return (-3*x1 + 6) / 2
def g2_1(x1):
return (-x1 - 3) / (-1)
def g3_1(x1):
return (x1 - 7) / (-1)
def g4_1(x1):
return (2*x1 - 4) / 3
def f(x1, x2):
return (x1-6)**2 +(x2-7)**2
def g1(x1, x2):
return -3*x1 - 2*x2 + 6 #<= 0
def g2(x1, x2):
return -x1 + x2 - 3 #<= 0
def g3(x1, x2):
return x1 + x2 - 7 #<= 0
def g4(x1, x2):
return 2*x1 - 3*x2 - 4 #<= 0
def g1_t(x, y):
return -3*x - 2*y + 6 <= 0
def g2_t(x, y):
return -x + y - 3 <= 0
def g3_t(x, y):
return x + y - 7 <= 0
def g4_t(x, y):
return 2*x - 3*y - 4 <= 0
def F(x1, x2, r):
#sum = 1/g1(x1, x2) + 1/g2(x1, x2) + 1/g3(x1, x2) + 1/g4(x1, x2)
#- r * sum
return f(x1,x2) + P(x1, x2, r)
def F2(x1, x2, r):
#print("x1 =", x1)
#print("x2 =", x2)
#print("gi =", g3(x1, x2))
#print("log =", log(-g3(x1, x2)))
sum = log(-g1(x1, x2)) + log(-g2(x1, x2)) + log(-g3(x1, x2)) + log(-g4(x1, x2))
return f(x1,x2) - r * sum
def P(x1, x2, r):
sum = 1/g1(x1, x2) + 1/g2(x1, x2) + 1/g3(x1, x2) + 1/g4(x1, x2)
return -r*sum
def P2(x1, x2, r):
sum = log(-g1(x1, x2)) + log(-g2(x1, x2)) + log(-g3(x1, x2)) + log(-g4(x1, x2))
return -r*sum
min_val = -1500000
def calc_r(a, n):
return a * (sqrt(n + 1) - 1 + n) / (n * sqrt(2))
def calc_s(a, n):
return a * (sqrt(n + 1) - 1) / (n * sqrt(2))
def dist(x1, x2):
return sqrt((x1[0] - x2[0])**2 + (x1[1] - x2[1])**2)
def max_f(f):
f_max = max(f)
index = f.index(f_max)
return index
def calc_x_next(x, index, n):
res = np.array([0,0])
for i in range(len(x)):
if (i == index): continue
res = res + x[i]
res *= (2 / (n - 1))
res -= x[index]
return res
def calc_centr(x):
return (x[0][0]+x[1][0]+x[2][0]) / 3, (x[0][1]+x[1][1]+x[2][1]) / 3
def barier(x1, x2):
return not (g1_t(x1, x2) and g2_t(x1, x2) and g3_t(x1, x2) and g4_t(x1, x2))
def simplexnyi_method(x0, e, a, n, r):
global counter
x1 = np.array([x0[0] + calc_r(a, n), x0[1] + calc_s(a, n)])
x2 = np.array([x0[0] + calc_s(a, n), x0[1] + calc_r(a, n)])
if (barier(x0[0], x0[1])): return;
while (barier(x1[0], x1[1]) or barier(x2[0], x2[1])):
a /= 2
x1 = np.array([x0[0] + calc_r(a, n), x0[1] + calc_s(a, n)])
x2 = np.array([x0[0] + calc_s(a, n), x0[1] + calc_r(a, n)])
x = [x0, x1, x2]
counter += 3
while (dist(x[0], x[1]) > e or dist(x[1], x[2]) > e or dist(x[2], x[0]) > e):
#print("center =", calc_centr(x), "f =", f(calc_centr(x)[0], calc_centr(x)[1]))
if (barier(x[0][0], x[0][1]) or barier(x[1][0], x[1][1]) or barier(x[2][0], x[2][1])):
return (center[0], center[1], a)
center = calc_centr(x)
f_list = []
x1_list.append(center[0]); x2_list.append(center[1])
counter += 1
f_list.append(F2(x[0][0], x[0][1], r))
f_list.append(F2(x[1][0], x[1][1], r))
f_list.append(F2(x[2][0], x[2][1], r))
counter += 1
while(True):
f_values = f_list
i = max_f(f_values)
xn = calc_x_next(x, i, n)
if (not barier(xn[0], xn[1])):
fn = F2(xn[0], xn[1], r); counter += 1
#x_new = x.copy()
#x_new[i] = xn
#x_c = calc_centr(x_new)
if (f_values[i] > fn): x[i] = xn ; break
f_values[i] = min_val
if (f_values[0] == min_val and f_values[1] == min_val and f_values[2] == min_val):
a /= 2
x[0] = x[0]
x[1] = np.array([x[0][0] + calc_r(a, n), x[0][1] + calc_s(a, n)])
x[2] = np.array([x[0][0] + calc_s(a, n), x[0][1] + calc_r(a, n)])
break
cur_center = calc_centr(x)
#print(center)
if barier(cur_center[0], cur_center[1]):
return (center[0], center[1], a)
point = calc_centr(x)
return (point[0], point[1], a)
def barrier_function_method(x1, x2, r, C, e, a, n, k):
global counter
counter += 1
min_x1, min_x2, a = simplexnyi_method([x1, x2], e, a, n, r)
fine = P2(min_x1, min_x2, r)
if (abs(fine) <= e):
return [(round(min_x1, round_num),
round(min_x2, round_num),
round(f(min_x1, min_x2), round_num)),
k]
k += 1
r = r/C
return barrier_function_method(min_x1, min_x2, r, C, e, a, n, k)
round_num = 3
x1 = 2
x2 = 2
e = 0.001
#a = 0.001
a = 1; n = 3
r = 1
c = 14
k = 0
result = barrier_function_method(x1, x2, r, c, e, a, n, k)
print(f"Barrier function method: {result[0]}; count of iteractions = {result[1]}")
print('Count of compute function =', counter + 1)
show(x1_list, x2_list)
drawFunc(-5, -5, 15, 15)
| AlexSmirno/Learning | 6 Семестр/Оптимизация/Lab_6_test.py | Lab_6_test.py | py | 7,638 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.arang... |
40243178863 | import cv2
from snooker_table import find_snooker_table
from balls import find_balls
from holes import find_holes
# Videó feldolgozás
def process_video(input_path, output_path):
# Open the video file
video_capture = cv2.VideoCapture(input_path)
# Get video properties
frame_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(video_capture.get(cv2.CAP_PROP_FPS))
# Create VideoWriter object to save the processed video
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height))
frame_count = 0
previous_result = {}
balls_expected_location = {}
disappeared_balls = {}
balls_in_pocket = {
"top_left": [],
"top_middle": [],
"top_right": [],
"bottom_left": [],
"bottom_middle": [],
"bottom_right": []
}
while video_capture.isOpened():
frame_count += 1
ret, frame = video_capture.read()
if not ret:
break
# Find snooker table boundaries
snooker_table, x, y, w, h = find_snooker_table(frame.copy())
# Find balls present on table
# RESULT CONTAINS LOCATION OF BALLS
result = {
"red": []
}
balls = find_balls(snooker_table.copy(), result)
# Itt nézzük, hogy előző frame-ben volt, most nincs golyó
for prev_key in previous_result:
if prev_key != 'white' and prev_key not in result:
disappeared_balls[prev_key] = frame_count
if prev_key == 'red' and len(previous_result[prev_key]) != len(result[prev_key]):
pass # TODO: szükséges a red-ek cimkezese, jelenleg nem tudjuk megkülönböztetni őket
# Itt nézzük, hogy tényleg eltűnt-e
for disappeared_ball in list(disappeared_balls.keys()):
if disappeared_ball in result.keys():
del disappeared_balls[disappeared_ball]
else:
if disappeared_balls[disappeared_ball] > 10: # Eltűnés frame határérték
del disappeared_balls[disappeared_ball]
balls_in_pocket[balls_expected_location[disappeared_ball]].append(disappeared_ball)
previous_result = result
# Find holes
holes = find_holes(snooker_table.copy(), balls_in_pocket)
# Final image
final_image = cv2.addWeighted(balls, 0.5, holes, 0.5, 0)
# Lehetséges leütések számítása
for ball in result:
white = result.get("white")
other = result.get(ball)
golyok = []
if ball == "red":
golyok = other
else:
golyok.append(other)
for golyo in golyok:
if (white and golyo) and (white != golyo):
white_ball_position = (white.get("x"), white.get("y")) # Example coordinates of the white ball (x, y)
other_ball_position = (golyo.get("x"), golyo.get("y")) # Example coordinates of the other ball (x, y)
# Calculate the line between the two points
line_thickness = 2
cv2.line(final_image, white_ball_position, other_ball_position, (255, 0, 0), line_thickness)
# Calculate the extended line beyond the other ball's position
delta_x = other_ball_position[0] - white_ball_position[0]
delta_y = other_ball_position[1] - white_ball_position[1]
temp_x, temp_y = other_ball_position
while 0 < temp_x < final_image.shape[1] and 0 <= temp_y < final_image.shape[0]:
temp_x += delta_x
temp_y += delta_y
extended_position = (int(temp_x), int(temp_y))
# Draw the extended line
cv2.line(final_image, other_ball_position, extended_position, (0, 255, 0), line_thickness)
########################################################################################################
# Hova mehet be a golyó, ha most leütjük?
# Initial position of the line starting from the other ball
current_x, current_y = other_ball_position
# Iterate and extend the line until reaching the edge of the image
while 0 <= current_x < final_image.shape[1] and 0 <= current_y < final_image.shape[0]:
current_x += delta_x
current_y += delta_y
if current_x > final_image.shape[1]:
current_x = final_image.shape[1]
if current_y < final_image.shape[0]:
current_y = final_image.shape[0]
# Mark the final point where the line reaches the edge
final_position = (int(current_x), int(current_y))
# Find the intersection point with the image boundary
max_x, max_y = final_image.shape[1], final_image.shape[0]
if delta_x == 0: # Vertical line
final_position = (other_ball_position[0], 0 if delta_y < 0 else max_y - 1)
else:
slope = delta_y / delta_x
if abs(slope) <= max_y / max_x: # Intersects with left or right boundary
final_position = (
0 if delta_x < 0 else max_x - 1, int(other_ball_position[1] - slope * other_ball_position[0]))
else: # Intersects with top or bottom boundary
final_position = (int(other_ball_position[0] - (1 / slope) * (
other_ball_position[1] - (0 if delta_y < 0 else max_y - 1))),
0 if delta_y < 0 else max_y - 1)
cv2.circle(final_image, final_position, 5, (0, 0, 255), -1)
########################################################################################################
# Melyik lyukhoz lenne a golyó legközelebb?
top_boundary = int(final_image.shape[0] * 0.2) # 20% of the image height
bottom_boundary = int(final_image.shape[0] * 0.8) # 80% of the image height
left_boundary = int(final_image.shape[1] * 0.333) # 33.3% of the image width
right_boundary = int(final_image.shape[1] * 0.666) # 66.6% of the image width
# Check the position of the marked point relative to the defined boundaries
if final_position[1] < top_boundary:
if final_position[0] < left_boundary:
balls_expected_location[ball] = "top_left"
elif left_boundary <= final_position[0] <= right_boundary:
balls_expected_location[ball] = "top_middle"
else:
balls_expected_location[ball] = "top_right"
elif final_position[1] > bottom_boundary:
if final_position[0] < left_boundary:
balls_expected_location[ball] = "bottom_left"
elif left_boundary <= final_position[0] <= right_boundary:
balls_expected_location[ball] = "bottom_middle"
else:
balls_expected_location[ball] = "bottom_right"
# Write the processed frame to the output video
frame[y:y + h, x:x + w] = final_image
out.write(frame)
# Display the processed frame (optional)
cv2.imshow('Processed Frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release video capture and writer
video_capture.release()
out.release()
cv2.destroyAllWindows()
| hirschabel/SZTE-snooker | snooker/process.py | process.py | py | 8,137 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_HEIGHT",
"line_number": 14,
"usage_type": "attribute"
},
{
"api... |
35935636198 | from gpt4all import GPT4All
import asyncio
import websockets
import datetime
print(r'''
$$$$$$\ $$\ $$\ $$$$$$\ $$$$$$\
$$ __$$\ $$ | \__| $$ __$$\ \_$$ _|
$$ / $$ |$$ | $$\ $$$$$$\ $$$$$$\ $$\ $$ / $$ | $$ |
$$$$$$$$ |$$ | $$ |\____$$\ $$ __$$\ $$ | $$$$$$$$ | $$ |
$$ __$$ |$$$$$$ / $$$$$$$ |$$ | \__|$$ | $$ __$$ | $$ |
$$ | $$ |$$ _$$< $$ __$$ |$$ | $$ | $$ | $$ | $$ |
$$ | $$ |$$ | \$$\\$$$$$$$ |$$ | $$ | $$ | $$ |$$$$$$\
\__| \__|\__| \__|\_______|\__| \__| \__| \__|\______|
''')
print('Akari AI Server v1.2b\nhttps://github.com/76836/Akari')
print('[ok]Preparing AI...')
model = GPT4All("gpt4all-falcon-q4_0.gguf", allow_download=True)
prompt_template = 'USER: {0}\nAKARI: '
tokens_added = ""
print('[ok]Server running.')
async def handle_websocket(websocket, path):
try:
while True:
message = await websocket.recv()
uprompt = message
print(f"[in]Received message: {uprompt}")
if (message == "test"):
print('\n[ok]Testing connection.\n')
response = f"Akari AI v1.2b connected"
else:
prompts = [uprompt]
current_time = datetime.datetime.now(datetime.UTC).strftime("%Y-%m-%d %H:%M:%S")
print('[ok]Generating response... ('+current_time+')')
system_template = '''You are Akari Crimson AI, you have a kind, joking personality. Write detailed quick answers for any question. Give one answer at a time.
(Akari AI Server v1.2b, system time:'''+current_time+''')'''
first_input = system_template + prompt_template.format(prompts[0])
def stop_on_token_callback(token_id, token_string):
global tokens_added
tokens_added = tokens_added + token_string
if "USER:" in tokens_added:
print('[ok]Generation stopped.')
tokens_added =''
return False
# If the string is not found, continue generating tokens
return True
response = model.generate(first_input, max_tokens=512, temp=0.7, top_k=40, top_p=0.4, repeat_penalty=1.99, repeat_last_n=512, n_batch=8, n_predict=None, callback=stop_on_token_callback)
response = response.replace('USER:', '')
await websocket.send(response)
tokens_added = ''
print(f"[out]Sent message: {response}")
except websockets.exceptions.ConnectionClosed:
print("\n[Error]WebSocket connection closed\n")
start_server = websockets.serve(handle_websocket, "localhost", 8765)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
| 76836/Akari | experimental/server.py | server.py | py | 3,098 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gpt4all.GPT4All",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "datetime... |
72013262909 |
import gym
from memory import ReplayBuff
from models import Network
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
class agent():
'''DQN Agent.
Attribute:
memory: replay buffer to store transition
batch_size: batch size for sampling
epsilon: parameter for epsilon greedy policy
epsilon_decay: step size to decrease epsilon
epsilon_train_start: initial(max) value of epsilon
epsilon_train_end: end(min) value of epsilon
target_update_period: period for target model's hard update
gamma: discount factor
net: RL model to train and select actions
target_net: target model to hard update
optimizer: optimizer for training dqn
train_mode: train a new model of test a model
step_num: current timestep
Arg:
observation_space: shape of observation from environment
action_num: the number of optional discrete actions
replay_capacity: length of memory
batch_size: batch size for sampling
target_update_period: period for target model's hard update
learning_rate: learning rate
epsilon_train_start: max value of epsilon
epsilon_train_end: min value of epsilon
gamma: discout factor
train_mode: train a new model of test a model
'''
def __init__(
self,
scenario,
seed=123,
stack_size=1,
replay_capacity=4096,
batch_size=64,
learning_rate=0.0001,
gamma=0.99,
update_horizon=1,
min_replay_history=128,
update_period=1,
target_update_period=32,
epsilon_train_start=1,
epsilon_train_end=0.01,
epsilon_eval=0.001,
epsilon_decay=0.0001,
# distributional param
v_min = 0.0, # from preliminary experiments over the training games,see section5
v_max = 200.0,
atom_size=51, # see section5,footnote2,in original paper
train_mode=True):
self.env=gym.make(scenario)
self.env.seed(seed)
self.batch_size=batch_size
self.update_period=update_period
self.target_update_period=target_update_period
self.gamma=gamma
#-----dist
self.v_min=v_min
self.v_max=v_max
self.atom_size=atom_size
self.train_mode=train_mode
if min_replay_history<batch_size:
self.min_replay_history=batch_size
else:
self.min_replay_history=min_replay_history
self.action_num=self.env.action_space.n
if self.train_mode:
self.epsilon=epsilon_train_start
self.epsilon_decay=epsilon_decay
self.epsilon_train_start=epsilon_train_start
self.epsilon_train_end=epsilon_train_end
else:
self.epsilon=epsilon_eval
self.device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(self.device)
# distributional DQN
# section4.1
# support is the set of atoms
self.support=torch.linspace(self.v_min,self.v_max,self.atom_size).to(self.device)
self.delta_z=float(self.v_max-self.v_min)/(self.atom_size-1)
self.net=Network(self.env.observation_space.shape[0],self.env.action_space.n,atom_size,self.support).to(self.device)
if self.train_mode:
self.memory=ReplayBuff(replay_capacity,self.env.observation_space.shape[0])
self.target_net=Network(self.env.observation_space.shape[0],self.env.action_space.n,atom_size,self.support).to(self.device)
self.target_net.load_state_dict(self.net.state_dict())
self.target_net.eval()
self.optimizer=torch.optim.RMSprop(self.net.parameters(),lr=learning_rate,alpha=0.9,eps=1e-10)
self.loss_func=nn.MSELoss()
else:
self.net.eval()
def select_action(self,state):
if self.epsilon < np.random.random():
with torch.no_grad():
state=torch.from_numpy(state)
action=self.net(state.to(self.device)).detach().cpu().numpy()
#return the index of action
return action.argmax()
else:
return np.random.randint(self.action_num)
def store_transition(self,obs,action,reward,next_obs,done):
self.memory.append(obs,action,reward,next_obs,done)
def update(self):
self.optimizer.zero_grad()
samples=self.memory.sample(self.batch_size)
state=torch.from_numpy(samples["obs"]).to(self.device)
action=torch.from_numpy(samples["action"].reshape(-1,1)).to(self.device)
reward=torch.from_numpy(samples["reward"].reshape(-1,1)).to(self.device)
next_state=torch.from_numpy(samples["next_obs"]).to(self.device)
done=torch.from_numpy(samples["done"].reshape(-1,1)).to(self.device)
# ---------------distribution dqn--------------
with torch.no_grad():
# see algorithms 1 in original paper
# next_action is index
next_action=self.target_net(next_state).argmax(1) # line2-3
next_dist=self.target_net.distributional(next_state) # nextwork output
next_dist=next_dist[range(self.batch_size),next_action] # get distribution by next_action(argmax policy)
t_z=reward+(1-done)*self.gamma*self.support # line7
# reward.shape is (batchsize,1);support,shape is (atomssize);t_z.shape is (batchszie,atomsize)
t_z=t_z.clamp(min=self.v_min,max=self.v_max) # line7
b=(t_z-self.v_min)/self.delta_z # line8
l=b.floor().long() # line9
u=b.ceil().long() # line9
offset=(
torch.linspace(
0,(self.batch_size-1)*self.atom_size,self.batch_size
).long()
.unsqueeze(1)
.expand(self.batch_size,self.atom_size)
.to(self.device)
)
proj_dist = torch.zeros(next_dist.size(),device=self.device)
proj_dist.view(-1).index_add_(0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1)) # line11
proj_dist.view(-1).index_add_(0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1)) # line12
dist=self.net.distributional(state) # line14, p_i(x_t,a_t)
log_p=torch.log(dist[range(self.batch_size),action]) # line14
loss = -(proj_dist*log_p).sum(1).mean() # line14
loss.backward()
# gradinet clipping
# https://pytorch.prg/docs/stable/nn.html#torch.nn.utils.clip_grad_norm_
clip_grad_norm_(self.net.parameters(),1.0,norm_type=1)
self.optimizer.step()
return loss.item()
def target_update(self):
self.target_net.load_state_dict(self.net.state_dict())
def train(self,num_episode):
if not self.train_mode:
return None
step=0
for i in range(num_episode):
r_batch=[]
state=self.env.reset().astype(np.float32)
done=False
while not done:
step+=1
action=self.select_action(state)
next_state,reward,done,_=self.env.step(action)
next_state=next_state.astype(np.float32)
self.store_transition(state,action,reward,next_state,done)
r_batch.append(reward)
state=next_state
if self.memory.size>=self.min_replay_history and step%self.update_period==0:
self.epsilon=max(self.epsilon_train_end,self.epsilon-(self.epsilon_train_start-self.epsilon_train_end)*self.epsilon_decay)
self.update()
if step % self.target_update_period==0:
self.target_update()
print("episode: "+str(i)+" reward_sum: "+str(np.sum(r_batch)))
del r_batch[:]
def test(self,model_path=None,seedlist=None):
if self.train_mode:
return None
if model_path is None:
print("no model to test")
return None
if seedlist is None:
seedlist=[111,123,1234]
self._load(model_path)
for s in seedlist:
self.env.seed(s)
r_batch=[]
state=self.env.reset().astype(np.float32)
done=False
while not done:
step+=1
action=self.select_action(state)
next_state,reward,done,_=self.env.step(action)
next_state=next_state.astype(np.float32)
self.store_transition(state,action,reward,next_state,done)
r_batch.append(reward)
state=next_state
print("seed: "+str(s)+" reward_sum: "+str(np.sum(r_batch)))
del r_batch[:]
def _restore(self,path):
if self.train_mode:
torch.save(self.net.state_dict(),path)
else:
print("testing model,cannot save models")
def _load(self,path):
if self.train_mode:
print("training model,cannot load models")
else:
self.net.load_state_dict(torch.load(path))
def reset(self):
self.memory.ptr=0
self.memory.size=0
if __name__ =='__main__':
torch.set_num_threads(3)
seed=123
np.random.seed(seed)
torch.manual_seed(seed)
train_agent=agent('CartPole-v0',seed=seed)
train_agent.train(15)
| linnaeushuang/RL-pytorch | value-based/distributionalDQN/distributionalDQN_learner.py | distributionalDQN_learner.py | py | 9,928 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "gym.make",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_... |
5123241016 | """
Requests モジュールによりリモートファイルを読み込むサンプル
事前にRequestsモジュールをインストールしましょう
# pip install requests
"""
import requests
url = 'https://it-engineer-lab.com/feed'
try:
r = requests.get(url, timeout=10.0)
print(r.text)
except requests.exceptions.RequestException as err:
print(err)
# ダウンロード(読み込み + ローカル保存)
# ダウンロードして rss.xml というファイル名で保存する例
try:
r = requests.get(url, timeout=10.0)
with open('rss.xml', mode='w') as f:
f.write(r.text)
except requests.exceptions.RequestException as err:
print(err) | toksan/python3_study | network/get_by_requests.py | get_by_requests.py | py | 690 | python | ja | code | 2 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.exception... |
33644608975 | from django.shortcuts import render, redirect, get_object_or_404
from .models import Product, Category, Cart, Address, Order
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
import decimal
from django.contrib.auth.models import User
from django.contrib import messages
from django.db.models import Q
def home(request):
categories = Category.objects.filter(is_active=True, is_featured=True)[0:4]
data_product = Product.objects.filter(is_active=True, is_featured=True)[0:4]
context = {'products':data_product, 'categories':categories}
return render(request, 'index.html', context)
def categories(request):
categories_data = Category.objects.filter(is_active=True, is_featured=True)
context = {'categories':categories_data}
return render(request, 'categories.html', context)
def product_detail(request, slug):
one_product = Product.objects.get(slug=slug)
related_products = Product.objects.exclude(id=one_product.id).filter(is_active=True, category=one_product.category)
context = {'item':one_product, 'related_products':related_products}
return render(request, 'product_detail.html', context)
@login_required(login_url='signin')
def add_to_cart(request):
user = request.user
product_id = request.GET.get('product_id')
product = get_object_or_404(Product, id=product_id)
# Check Whether the Product is Already in Cart or Not
item_already_in_cart = Cart.objects.filter(product=product_id, user=user)
if item_already_in_cart:
cp = get_object_or_404(Cart, product=product_id, user=user)
cp.quantity += 1
cp.save()
else:
Cart(user=user, product=product).save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@login_required(login_url='signin')
def cart(request):
user = request.user
cart_product = Cart.objects.filter(user=user)
amount = decimal.Decimal(0)
shipping_charges = decimal.Decimal(100)
cp = [p for p in Cart.objects.all() if p.user == user]
if cp:
for p in cp:
temp_amount = (p.quantity * p.product.price)
amount += temp_amount
# Customer Address
address = Address.objects.filter(user=user)
context = {
'cart_products':cart_product,
'amount':amount,
'shipping_charges':shipping_charges,
'address':address,
'total':amount + shipping_charges,
}
return render(request, 'cart.html', context)
@login_required(login_url='signin')
def remove_cart(request, id):
cd = Cart.objects.get(id=id)
cd.delete()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@login_required(login_url='signin')
def profile(request):
orders = Order.objects.filter(user=request.user)
user = User.objects.get(username=request.user)
address = Address.objects.filter(user=request.user)
context = {'user':user, 'useraddress':address, 'orders':orders}
return render(request, 'profile.html', context)
@login_required(login_url='signin')
def address(request):
if request.GET.get('q'):
mycheck = request.GET.get('q')
if mycheck:
if request.method == 'POST':
locality = request.POST['locality']
city = request.POST['city']
state = request.POST['state']
get_address = Address(user=request.user, locality=locality, city=city, state=state)
get_address.save()
messages.success(request, 'Address has been Added.')
return redirect('checkout')
else:
if request.method == 'POST':
locality = request.POST['locality']
city = request.POST['city']
state = request.POST['state']
get_address = Address(user=request.user, locality=locality, city=city, state=state)
get_address.save()
messages.success(request, 'Address has been Added.')
return redirect('profile')
return render(request, 'address.html')
def trash_address(request, id):
del_address = Address.objects.get(id=id)
del_address.delete()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def plus_cart(request, cart_id):
cp = get_object_or_404(Cart, id=cart_id)
cp.quantity += 1
cp.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def minus_cart(request, cart_id):
cp = get_object_or_404(Cart, id=cart_id)
if cp.quantity == 1:
cp.delete()
else:
cp.quantity -= 1
cp.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@login_required(login_url='signin')
def checkout(request):
if request.method == 'POST':
try:
radioAddress = request.POST['radioAddress']
order_address = Address.objects.get(user=request.user, id=radioAddress)
cart_items = Cart.objects.filter(user=request.user)
for cart in cart_items:
price = cart.quantity * cart.product.price
orders = Order(user=request.user, address=order_address, product=cart.product, quantity=cart.quantity, total_price=price)
orders.save()
cart.delete()
return redirect('orders')
except Exception as error:
print(error)
messages.error(request, 'Add Shipping Address.')
check_address = Address.objects.filter(user=request.user)
total_cart_amount = Cart.objects.filter(user=request.user)
total_amount = decimal.Decimal(0)
shipping_charges = decimal.Decimal(100)
for cart in total_cart_amount:
carts = cart.quantity * cart.product.price
total_amount += carts
context = {
'address':check_address,
'price_amount':total_amount,
'shipping_charges':shipping_charges,
'total_amount':total_amount + shipping_charges
}
return render(request, 'checkout.html', context)
@login_required(login_url='signin')
def orders(request):
all_orders = Order.objects.filter(user=request.user).order_by('-ordered_date')
context = {'all_orders':all_orders}
return render(request, 'orders.html', context)
# Categories
def category_product(request, slug):
category = get_object_or_404(Category, slug=slug)
products = Product.objects.filter(is_active=True, category=category)
context = {'products':products, 'category':category}
return render(request, 'search.html', context)
def search(request):
search_query = request.GET.get('q')
if len(search_query) > 80:
products = Product.objects.none()
else:
# product_title = Product.objects.filter(is_active=True, title__icontains=search_query)
# product_category = Product.objects.filter(is_active=True, category__icontains=search_query)
# product_short_desc = Product.objects.filter(is_active=True, short_description__icontains=search_query)
# product_long_desc = Product.objects.filter(is_active=True, detail_description__icontains=search_query)
# products = product_title.values_list().union(product_short_desc.values_list(), product_long_desc.values_list())
products = Product.objects.filter(is_active=True, title__icontains=search_query)
# if products.count() == 0:
# messages.warning(request, 'Query Not Found.')
context = {'products':products, 'query':search_query}
return render(request, 'search.html', context) | digital-era-108/Ecommerce-django | storeApp/views.py | views.py | py | 7,779 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.Category.objects.filter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.Category.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "models.Category",
"line_number": 13,
"usage_type": "name"
},
{
"api_n... |
36572760050 | import random
import colorgram
from turtle import Turtle, Screen
colors = colorgram.extract("./example.jpg", 10)
list_colors = []
for color in colors:
current_color = color.rgb
color_tuple = (current_color[0], current_color[1], current_color[2])
list_colors.append(color_tuple)
porto = Turtle()
porto.penup()
porto.hideturtle()
porto.speed('fast')
screen = Screen()
screen.colormode(255)
number_dots = 100
porto.seth(225)
porto.fd(300)
porto.seth(0)
for dot in range(1, number_dots + 1):
porto.dot(20, random.choice(list_colors))
porto.fd(50)
if dot % 10 == 0:
porto.seth(90)
porto.fd(50)
porto.seth(180)
porto.fd(500)
porto.seth(0)
screen.exitonclick()
| porto-o/Python_projects | 18. hirst-painting/main.py | main.py | py | 725 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "colorgram.extract",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "turtle.Turtle",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "turtle.Screen",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "random.choice",
"lin... |
17640174097 | """
Use blender to convert FBX (T-pose) to BVH file
"""
import os
import bpy
import numpy as np
def get_bvh_name(filename):
filename = filename.split(".")[-2]
return filename + ".bvh"
def main():
fbx_dir = "./mixamo/fbx/"
bvh_dir = "./mixamo/bvh/"
for filename in os.listdir(fbx_dir):
fbx_path = os.path.join(fbx_dir, filename)
bvh_path = os.path.join(bvh_dir, get_bvh_name(filename))
bpy.ops.import_scene.fbx(filepath=fbx_path)
action = bpy.data.actions[-1]
frame_start = action.frame_range[0]
frame_end = max(60, action.frame_range[1])
bpy.ops.export_anim.bvh(filepath=bvh_path,
frame_start=int(frame_start),
frame_end=int(frame_end),
root_transform_only=True)
bpy.data.actions.remove(bpy.data.actions[-1])
if __name__ == "__main__":
main()
| awkrail/mixamo_preprocessor | fbx2bvh.py | fbx2bvh.py | py | 935 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number"... |
43085026671 | sqmesh_min_coord = [359919.189 - 360600.0, 3972158.559 - 3973000.0]
sqmesh_step = 2.0
import h5py
import math
tmesh_data = h5py.File("visdump_surface_mesh_jaramillo_384.h5",'r')
tmesh_key = '6234'
ME_len = len(tmesh_data[tmesh_key]['Mesh']['MixedElements'])
ntris = ME_len // 4;
tricells_inodes = [[0 for x in range(3)] for x in range(ntris)]
for i in range(ME_len):
if i % 4 == 0:
if tmesh_data[tmesh_key]['Mesh']['MixedElements'][i] != 4:
raise RuntimeError("Mesh should only contain triangular cells!")
else:
tricells_inodes[i // 4][i % 4 - 1] = tmesh_data[tmesh_key]['Mesh']['MixedElements'][i]
tnodes = tmesh_data[tmesh_key]['Mesh']['Nodes']
def cohensutherland(left, top, right, bottom, x1, y1, x2, y2):
"""Clips a line to a rectangular area.
This implements the Cohen-Sutherland line clipping algorithm. left,
top, right and bottom denote the clipping area, into which the line
defined by x1, y1 (start point) and x2, y2 (end point) will be
clipped.
If the line does not intersect with the rectangular clipping area,
four None values will be returned as tuple. Otherwise a tuple of the
clipped line points will be returned in the form (cx1, cy1, cx2, cy2).
"""
LEFT_, RIGHT_, BOTTOM_, TOP_ = 1, 2, 4, 8
k1 = k2 = 0
def _getclip(xa, ya):
p = 0
if xa < left:
p |= LEFT_
elif xa > right:
p |= RIGHT_
if ya < bottom:
p |= BOTTOM_
elif ya > top:
p |= TOP_
return p
k1 = _getclip(x1, y1)
k2 = _getclip(x2, y2)
while (k1 | k2) != 0:
if (k1 & k2) != 0:
return None, None, None, None
opt = k1
if k1 == 0:
opt = k2
if opt & TOP_:
x = x1 + (x2 - x1) * (1.0*(top - y1)) / (y2 - y1)
y = top
elif opt & BOTTOM_:
x = x1 + (x2 - x1) * (1.0*(bottom - y1)) / (y2 - y1)
y = bottom
elif opt & RIGHT_:
y = y1 + (y2 - y1) * (1.0*(right - x1)) / (x2 - x1)
x = right
elif opt & LEFT_:
y = y1 + (y2 - y1) * (1.0*(left - x1)) / (x2 - x1)
x = left
if opt == k1:
x1 = x
y1 = y
k1 = _getclip(x1, y1)
else:
x2 = x
y2 = y
k2 = _getclip(x2, y2)
return x1, y1, x2, y2
def get_intersect_poly(sq_i, sq_j, itri):
left = sqmesh_min_coord[0] + sq_j*sqmesh_step
right = sqmesh_min_coord[0] + (sq_j + 1)*sqmesh_step
bottom = sqmesh_min_coord[1] + sq_i*sqmesh_step
top = sqmesh_min_coord[1] + (sq_i + 1)*sqmesh_step
#Triangle's nodes clockwise ordering: first node is the one with the min x coordinate
ifir = 0
for inode in range(1, 3):
if tnodes[int(tricells_inodes[itri][inode])][0] < tnodes[int(tricells_inodes[itri][ifir])][0]:
ifir = inode
isec = -1
clkw_ang = -math.pi
for inode in range(3):
if (inode != ifir):
cur_ang = math.atan2(tnodes[int(tricells_inodes[itri][inode])][1] - tnodes[int(tricells_inodes[itri][ifir])][1], tnodes[int(tricells_inodes[itri][inode])][0] - tnodes[int(tricells_inodes[itri][ifir])][0])
if cur_ang > clkw_ang:
clkw_ang = cur_ang
isec = inode
inodes_clkw = [ifir, isec, 0]
for inode in range(3):
if (inode != ifir) and (inode != isec):
inodes_clkw[2] = inode
break
for inode in range(3):
inodes_clkw[inode] = int(tricells_inodes[itri][inodes_clkw[inode]])
nclipped = 0
seg_pts = [[[0.0 for x in range(2)] for x in range(2)] for x in range(3)]
for iseg in range(3):
inode1 = inodes_clkw[iseg]
inode2 = inodes_clkw[(iseg + 1)%3]
x1, y1, x2, y2 = cohensutherland(left, top, right, bottom, tnodes[inode1][0], tnodes[inode1][1], tnodes[inode2][0], tnodes[inode2][1])
if x1 != None:
seg_pts[nclipped][0][0] = x1
seg_pts[nclipped][0][1] = y1
seg_pts[nclipped][1][0] = x2
seg_pts[nclipped][1][1] = y2
nclipped += 1
if nclipped == 0:
return [[]]
poly_nodes = [[0.0 for x in range(2)] for x in range(7)]
poly_nodes[0] = seg_pts[0][0]
inext_seg = 0
npolynodes = 1;
sides_cmp = [left, top, right, bottom]
sq_nodes = [[left, top], [right, top], [right, bottom], [left, bottom]]
if seg_pts[0][0] == seg_pts[nclipped - 1][1]:
isq_side_start = -1
isq_side_stop = -1
else:
node_cmp = [poly_nodes[0][0], poly_nodes[0][1], poly_nodes[0][0], poly_nodes[0][1]]
for iside in range(4):
if node_cmp[iside] == sides_cmp[iside]:
isq_side_start = iside
break
for iside in range(4):
if node_cmp[iside] == sides_cmp[iside]:
isq_side_stop = iside
while(1):
if (inext_seg != nclipped) and (poly_nodes[npolynodes - 1] == seg_pts[inext_seg][0]):
if (isq_side_stop == -1) and (inext_seg == nclipped - 1):
break
else:
poly_nodes[npolynodes] = seg_pts[inext_seg][1]
npolynodes += 1
inext_seg += 1
continue
node_cmp = [poly_nodes[npolynodes - 1][0], poly_nodes[npolynodes - 1][1], poly_nodes[npolynodes - 1][0], poly_nodes[npolynodes - 1][1]]
icurside = -1
if isq_side_start != -1:
for i in range(4):
iside = (isq_side_start + i)%4
if node_cmp[iside] == sides_cmp[iside]:
icurside = iside
else:
for iside in range(4):
if node_cmp[iside] == sides_cmp[iside]:
icurside = iside
isq_side_start = icurside
if icurside == isq_side_stop:
if inext_seg < nclipped:
raise RuntimeError("Completed the intersection polygon before tracing all the clipped segments!")
break
if inext_seg < nclipped:
next_node_cmp = [seg_pts[inext_seg][0][0], seg_pts[inext_seg][0][1], seg_pts[inext_seg][0][0], seg_pts[inext_seg][0][1]]
if next_node_cmp[icurside] == sides_cmp[icurside]:
poly_nodes[npolynodes] = seg_pts[inext_seg][0]
npolynodes += 1
continue
poly_nodes[npolynodes] = sq_nodes[icurside]
npolynodes += 1
poly_nodes = poly_nodes[0:npolynodes]
return poly_nodes
def get_poly_area(polynodes):
nnodes = len(polynodes)
poly_area = 0.0
for itri in range(nnodes - 2):
inodes = [0, itri + 1, itri + 2]
tri_nodes = [[0.0 for x in range(2)] for x in range(3)]
for i in range(3):
tri_nodes[i] = polynodes[inodes[i]]
tri_area = 0.5*abs((tri_nodes[0][0] - tri_nodes[2][0])*(tri_nodes[1][1] - tri_nodes[0][1]) - (tri_nodes[0][0] - tri_nodes[1][0])*(tri_nodes[2][1] - tri_nodes[0][1]))
poly_area += tri_area
return poly_area
def is_inside_tri(itri, sq_i, sq_j):
tri_nodes = [[0.0 for x in range(2)] for x in range(3)]
for inode in range(3):
tri_nodes[inode] = tnodes[int(tricells_inodes[itri][inode])]
sq_nodes = [[0.0 for x in range(2)] for x in range(4)]
for i in range(2):
for j in range(2):
sq_nodes[2*i + j] = [sqmesh_min_coord[0] + (sq_j + j)*sqmesh_step, sqmesh_min_coord[1] + (sq_i + i)*sqmesh_step]
is_inside = True
for inode in range(4):
det = (tri_nodes[1][1] - tri_nodes[2][1])*(tri_nodes[0][0] - tri_nodes[2][0]) + (tri_nodes[2][0] - tri_nodes[1][0])*(tri_nodes[0][1] - tri_nodes[2][1])
s = ((tri_nodes[1][1] - tri_nodes[2][1])*(sq_nodes[inode][0] - tri_nodes[2][0]) + (tri_nodes[2][0] - tri_nodes[1][0])*(sq_nodes[inode][1] - tri_nodes[2][1])) / det
t = ((tri_nodes[2][1] - tri_nodes[0][1])*(sq_nodes[inode][0] - tri_nodes[2][0]) + (tri_nodes[0][0] - tri_nodes[2][0])*(sq_nodes[inode][1] - tri_nodes[2][1])) / det
if (s < 0) or (t < 0) or (s + t > 1):
is_inside = False
break
return is_inside
fid = open('area_weights.dat', 'w')
fid.write(repr(ntris) + '\n')
fid.write(repr(sqmesh_min_coord[0]) + ' ' + repr(sqmesh_min_coord[1]) + ' ' + repr(sqmesh_step) + '\n')
for itri in range(ntris):
print('Processing triangle ' + repr(itri + 1) + '/' + repr(ntris) + '\r'),
xmin = tnodes[int(tricells_inodes[itri][0])][0]
xmax = tnodes[int(tricells_inodes[itri][0])][0]
ymin = tnodes[int(tricells_inodes[itri][0])][1]
ymax = tnodes[int(tricells_inodes[itri][0])][1]
for inode in range(1, 3):
if tnodes[int(tricells_inodes[itri][inode])][0] < xmin:
xmin = tnodes[int(tricells_inodes[itri][inode])][0]
elif tnodes[int(tricells_inodes[itri][inode])][0] > xmax:
xmax = tnodes[int(tricells_inodes[itri][inode])][0]
if tnodes[int(tricells_inodes[itri][inode])][1] < ymin:
ymin = tnodes[int(tricells_inodes[itri][inode])][1]
elif tnodes[int(tricells_inodes[itri][inode])][1] > ymax:
ymax = tnodes[int(tricells_inodes[itri][inode])][1]
imin = int(math.floor((ymin - sqmesh_min_coord[1]) / sqmesh_step))
imax = int(math.floor((ymax - sqmesh_min_coord[1]) / sqmesh_step))
jmin = int(math.floor((xmin - sqmesh_min_coord[0]) / sqmesh_step))
jmax = int(math.floor((xmax - sqmesh_min_coord[0]) / sqmesh_step))
fid.write(repr(itri) + '\t')
for i in range(imin, imax + 1):
for j in range(jmin, jmax + 1):
area_weight = 0.0
if is_inside_tri(itri, i, j):
area_weight = 1.0
else:
polygon = get_intersect_poly(i, j, itri)
if polygon != [[]]:
cur_poly_area = get_poly_area(polygon)
area_weight = cur_poly_area / pow(sqmesh_step, 2)
if area_weight > 0.0:
fid.write(repr(i) + '\t' + repr(j) + '\t')
fid.write(repr(area_weight) + '\t')
fid.write('\n')
fid.close
| amanzi/ats | tools/square_to_tri_mesh_data_parser/tri_square_overlap_weights.py | tri_square_overlap_weights.py | py | 9,284 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "h5py.File",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "math.atan2",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 238,... |
21053864803 | # Import the necessary libraries
import PySimpleGUI as sg
import qrcode
# Set the theme for the UI
sg.theme('GreenMono')
# Define the layout for the app
layout = [ [sg.Text('Enter Text: ', font=('Helvetica', 12, 'bold')), sg.InputText(font=('Helvetica', 12), size=(30,1))],
[sg.Button('Create', font=('Helvetica', 12), button_color=('white', '#007F00')), sg.Button('Exit', font=('Helvetica', 12), button_color=('white', 'firebrick'))],
[sg.Image(key='-IMAGE-', size=(200, 150))]
]
# Create the window
window = sg.Window('QR Code Generator', layout)
# Event loop for the app
while True:
# Read events and values from the window
event, values = window.read()
# If the Exit button or window is closed, exit the app
if event in (sg.WIN_CLOSED, 'Exit'):
break
# If the Create button is clicked, generate the QR code image
if event == 'Create':
# Get the text input from the user
data = values[0]
# If the text input is not empty, generate the QR code
if data:
# Generate the QR code image
img = qrcode.make(data)
# Save the QR code image to a file
img.save('qrcode.png')
# Update the image in the UI
window['-IMAGE-'].update(filename='qrcode.png')
# Close the window and exit the app
window.close() | haariswaqas/Project2 | QR Code Generator.py | QR Code Generator.py | py | 1,401 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PySimpleGUI.theme",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.Text",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.InputText",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.But... |
1499727625 | from bs4 import BeautifulSoup
import json
import logging
import requests
from Mongo import client as MongoClient
from Redis import client as RedisClient
WEB = 'https://www.gensh.in/events/promotion-codes'
class RequestErrorException(Exception):
def __init__(self, message, resp):
super().__init__(message)
self.resp = resp
self.message = message
def scrapper():
logging.info("Running scrapper")
try:
codes = _scrapCodes()
except RequestErrorException as e:
logging.warning("%s\nStatus code %s", e.message, e.resp.status_code)
return
for code in codes:
if MongoClient.existCode(code["id"]):
continue
else:
logging.info("[%s] - New code detected",code["id"])
RedisClient.sendCode(json.dumps(code))
logging.info("[%s] - Notification sent",code["id"])
MongoClient.insertCode(code)
logging.info("[%s] - Saved on Mongo",code["id"])
logging.info("Scrapper finished")
def _scrapCodes():
resp = requests.get(WEB)
if not resp:
raise RequestErrorException(resp=resp,message="There was an error on the request")
source = resp.text
soup = BeautifulSoup(source, 'lxml')
table = soup.find('table')
headers = [heading.text for heading in table.find_all('th')]
table_rows = [row for row in table.find_all('tr')]
results = [{headers[index] : cell.text for index,cell in enumerate(row.find_all('td')) } for row in table_rows]
while {} in results:
results.remove({})
results = list(map(_formatter,results))
codes = []
for result in results:
code = {
"id":result["NA"],
"date_added":result["Date Added"],
"rewards":result["Rewards"],
"expired":result["Expired"],
"eu":result["EU"],
"na":result["NA"],
"sea":result["SEA"]
}
codes.append(code)
return codes
def _formatter(result):
result = {x.strip(): v.strip()
for x, v in result.items()}
return result | BRAVO68WEB/genshin-notify | scrapperCodes/scrapper.py | scrapper.py | py | 2,107 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.info",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "Mongo.client.existCode",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "Mongo.client",
... |
18997085490 | import torch.nn as nn
from efficientnet_pytorch import EfficientNet
class EfficientNetCustom(nn.Module):
def __init__(self, model_name, in_channels, num_classes,
load_pretrained_weights=True, train_only_last_layer=False):
super(EfficientNetCustom, self).__init__()
self.model_name = model_name
self.in_channels = in_channels
self.num_classes = num_classes
# self.image_size = EfficientNet.get_image_size(self.model_name)
self.load_pretrained_weights = load_pretrained_weights
self.train_only_last_layer = train_only_last_layer
if self.load_pretrained_weights:
self.features = EfficientNet.from_pretrained(self.model_name, in_channels=self.in_channels)
else:
self.features = EfficientNet.from_name(self.model_name, in_channels=self.in_channels)
if self.train_only_last_layer:
print('Training only last layer...')
for param in self.features.parameters():
param.requires_grad = False
in_ftrs = self.features._fc.in_features
self.features._fc = nn.Linear(in_ftrs, self.num_classes)
# self.features._fc.requires_grad = True
def forward(self, inputs):
x = self.features(inputs)
return x | sanjeebSubedi/cats-dogs-efficientnet | efficientNetCustom.py | efficientNetCustom.py | py | 1,204 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "efficientnet_pytorch.EfficientNet.from_pretrained",
"line_number": 16,
"usage_type": "call"
},
{
"api... |
25294948506 | from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from .forms import *
from .models import *
import psycopg2
from mysite.settings import DATABASES
from psycopg2.extras import RealDictCursor
def db_answer(query):
try:
connection = psycopg2.connect(
database=DATABASES['default']['NAME'],
user=DATABASES['default']['USER'],
host=DATABASES['default']['HOST'],
port=DATABASES['default']['PORT'],
password=DATABASES['default']['PASSWORD']
)
cursor = connection.cursor(cursor_factory=RealDictCursor)
cursor.execute(query)
result = cursor.fetchall()
return result
except Exception as err:
print(err)
print('ЧТо то пошло не так!')
finally:
cursor.close()
connection.close()
def index(request):
query1 = """SELECT * FROM employees_worker;"""
all_workers = db_answer(query1)
query2 = """SELECT * FROM employees_category;"""
all_cat = db_answer(query2)
query3 = """SELECT * FROM employees_position;"""
all_pos = db_answer(query3)
context = {
'workers': all_workers,
'cats': all_cat,
'positoin': all_pos,
'title': 'Главная страница',
}
return render(request, 'employees/index.html', context=context)
def adduser(request):
# добавление сотрудника
if request.method == 'POST': # проверка валиднасти данных, если данные не верны вернёться заполненная форма
form = AddWorkerForm(request.POST)
if form.is_valid():
try:
Worker.objects.create(**form.cleaned_data)
return redirect('home')
except:
form.add_error(None, "Ошибка добавления поста!")
else:
form = AddWorkerForm()
query2 = """SELECT * FROM employees_category;"""
all_cat = db_answer(query2)
context = {
'cats': all_cat,
'form': form,
'title': 'Добавление сотрудника',
}
return render(request, 'employees/adduser.html', context=context)
def create_user(request, w_id):
# редактирование должности
query2 = """SELECT * FROM employees_category;"""
all_cat = db_answer(query2)
work_up = Worker.objects.get(pk=w_id)
if request.method == 'POST': # проверка валиднасти данных, если данные не верны вернёться заполненная форма
form = AddWorkerForm(request.POST, instance=work_up)
if form.is_valid():
form.save()
return redirect('home')
else:
form = AddWorkerForm(instance=work_up)
context = {
'cats': all_cat,
'form': form,
'title': 'Главная страница',
}
return render(request, 'employees/create_user.html', context=context)
def add_position(request):
# добавление дожности
if request.method == 'POST': # проверка валиднасти данных, если данные не верны вернёться заполненная форма
form = AddPositionForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
form = AddPositionForm()
query2 = """SELECT * FROM employees_category;"""
all_cat = db_answer(query2)
context = {
'cats': all_cat,
'form': form,
'title': 'Добавление должности',
}
return render(request, 'employees/add_position.html', context=context)
def show_worker(request, pos_id):
# выводит всех сотрудников работающих на конкретной должности
query1 = f"SELECT * FROM employees_worker WHERE pos_id = {pos_id};"
all_workers = db_answer(query1)
query2 = """SELECT * FROM employees_category;"""
all_cat = db_answer(query2)
query3 = f"SELECT * FROM employees_position"
all_pos = db_answer(query3)
context = {
'workers': all_workers,
'cats': all_cat,
'positoin': all_pos,
'title': 'Главная страница',
}
return render(request, 'employees/show_worker.html', context=context)
def show_cats(request, cat_id):
# Выводит все должности в категории
form = AddPositionForm()
query2 = """SELECT * FROM employees_category;"""
all_cat = db_answer(query2)
query3 = f"SELECT * FROM employees_position WHERE cat_id = {cat_id};"
all_pos = db_answer(query3)
context = {
'cats': all_cat,
'positoin': all_pos,
'form': form,
'title': 'Главная страница',
}
return render(request, 'employees/show_cats.html', context=context)
def create_position(request, pos_id):
# редактирование должности
query2 = """SELECT * FROM employees_category;"""
all_cat = db_answer(query2)
query3 = f"SELECT * FROM employees_position WHERE id = {pos_id};"
pos = db_answer(query3)
pos_up = Position.objects.get(pk=pos_id)
if request.method == 'POST': # проверка валиднасти данных, если данные не верны вернёться заполненная форма
form = AddPositionForm(request.POST, instance=pos_up)
if form.is_valid():
form.save()
return redirect('home')
else:
form = AddPositionForm(instance=pos_up)
context = {
'cats': all_cat,
'positoin': pos,
'form': form,
'title': 'Главная страница',
}
return render(request, 'employees/create_position.html', context=context)
def del_worker(request, w_id):
get_worker = Worker.objects.get(pk=w_id)
get_worker.delete()
return redirect(reverse('home'))
def del_position(request, pos_id):
get_pos = Position.objects.get(pk=pos_id)
get_pos.delete()
return redirect(reverse('home'))
| Fastsnai1/Employee_log | mysite/employees/views.py | views.py | py | 6,205 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "psycopg2.connect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "mysite.settings.DATABASES",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "mysite.settings.DATABASES",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.