index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
17,234
|
hybby/sreport
|
refs/heads/main
|
/tests/test_summary.py
|
"""
Unit tests for the sreport.py utility relating to summary report generation
"""
import pytest
from sreport import generate_summary
def test_summary_report_output():
"""
Tests that given a summary dict of response codes and response counts,
we produce a report in the format that we expect
"""
sample_summary = {
200: 50,
403: 2,
404: 10,
500: 1
}
expected_output = [
{
"Status_code": 200,
"Number_of_responses": 50
},
{
"Status_code": 403,
"Number_of_responses": 2
},
{
"Status_code": 404,
"Number_of_responses": 10
},
{
"Status_code": 500,
"Number_of_responses": 1
},
{
"Number_of_responses": 63
}
]
assert generate_summary(sample_summary) == expected_output
def test_summary_bad_input_error():
"""
Tests that we raise TypeError if a dictionary hasn't been provided
"""
sample_object = ""
with pytest.raises(TypeError, match="input must be dict"):
generate_summary(sample_object)
def test_summary_bad_count_error():
"""
Tests that we throw a ValueError if a non-integer response count provided
"""
sample_object = {
404: "sixty-one"
}
error = "bad input; response counts must be integers"
with pytest.raises(ValueError, match=error):
generate_summary(sample_object)
def test_summary_bad_code_error():
"""
Tests that we throw a ValueError if a non-integer response code provided
"""
sample_object = {
"four-oh-four": 100
}
error = "bad input; response codes must be integers"
with pytest.raises(ValueError, match=error):
generate_summary(sample_object)
|
{"/tests/test_urls.py": ["/sreport.py"], "/tests/test_io.py": ["/sreport.py"], "/tests/test_summary.py": ["/sreport.py"]}
|
17,235
|
GreyZmeem/habr-proxy
|
refs/heads/master
|
/main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
from typing import Final
from aiohttp import web
from app.application import AppConfig, create_app
from app.proxy import const
_host: Final[str] = "0.0.0.0"
_port: Final[int] = 8080
def get_parser() -> ArgumentParser:
"""Build argument parser."""
parser = ArgumentParser(description="iVelum proxy")
parser.add_argument(
"-l",
"--len",
nargs="?",
type=int,
const=const.word_length,
default=const.word_length,
help="Word length",
dest="word_length",
)
parser.add_argument(
"-t",
"--text",
nargs="?",
type=str,
const=const.word_append,
default=const.word_append,
help="Text appended to words",
dest="word_append",
)
parser.add_argument(
"-u",
"--up",
"--upstream",
nargs="?",
type=str,
const=const.upstream,
default=const.upstream,
help="Upstream URL (e.g. 'https://example.com')",
dest="upstream",
)
parser.add_argument(
"--host",
nargs="?",
type=str,
const=_host,
default=_host,
help="HTTP server listen address",
dest="host",
)
parser.add_argument(
"--port",
nargs="?",
type=int,
const=_port,
default=_port,
help="HTTP server listen port",
dest="port",
)
return parser
def run():
"""Run main application server."""
parser = get_parser()
args = parser.parse_args()
app_config = AppConfig(
upstream=args.upstream,
length=args.word_length,
append=args.word_append,
)
app = create_app(app_config)
web.run_app(app, host=args.host, port=args.port)
if __name__ == "__main__":
run() # type: ignore
|
{"/main.py": ["/app/application.py"], "/app/application.py": ["/app/proxy/config.py", "/app/proxy/patcher_ivelum.py", "/app/proxy/proxy.py", "/app/proxy/req_resp.py"], "/app/proxy/patcher_ivelum.py": ["/app/proxy/config.py", "/app/proxy/patcher_default.py"], "/app/proxy/patcher_default.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"], "/app/proxy/config.py": ["/app/proxy/req_resp.py"], "/app/proxy/proxy.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"]}
|
17,236
|
GreyZmeem/habr-proxy
|
refs/heads/master
|
/app/application.py
|
# -*- coding: utf-8 -*-
import dataclasses
from aiohttp import web
from app.proxy.config import Config, Patcher
from app.proxy.patcher_ivelum import IVelumPatcher, IVelumPatcherConfig
from app.proxy.proxy import Proxy
from app.proxy.req_resp import Request, Response
@dataclasses.dataclass(frozen=True)
class AppConfig(object):
"""AioHTTP application configuration."""
#: Proxied resource (e.g. `https://habr.com`).
upstream: str
#: Length of words to patch.
length: int
#: String which must be appended to the matched words.
append: str
def create_app(config: AppConfig) -> web.Application:
"""Create AioHTTP application."""
proxy_patcher_config = IVelumPatcherConfig(config.length, config.append)
proxy_patcher = IVelumPatcher(proxy_patcher_config)
proxy_config = Config("", config.upstream, proxy_patcher)
proxy_handler = ProxyHandler(proxy_patcher, proxy_config)
app = web.Application()
app.add_routes([
web.get("/{tail:.*}", proxy_handler.dispatch),
])
return app
class ProxyHandler(object):
"""Wildcard handler which sends all requests through proxy."""
def __init__(self, proxy_patcher: Patcher, proxy_config: Config):
self.proxy_patcher = proxy_patcher
self.proxy_config = proxy_config
async def dispatch(self, request: web.Request) -> web.Response:
"""Handle HTTP request."""
proxy = self._proxy(request)
proxy_request = await _aio_to_proxy(request)
proxy_response = await proxy.dispatch(proxy_request)
return await _proxy_to_aio(proxy_response)
def _proxy(self, request: web.Request) -> Proxy:
"""Create proxy instance for the given request."""
host = f"{request.scheme}://{request.host}"
config = dataclasses.replace(self.proxy_config, host=host)
return Proxy(config)
async def _aio_to_proxy(request: web.Request) -> Request:
"""Build proxy request instance from aiohttp request."""
url = str(request.url)
body = await request.read()
headers = tuple(request.headers.items())
return Request(url, request.method, headers=headers, body=body)
async def _proxy_to_aio(response: Response) -> web.Response:
"""Build aiohttp response instance from proxy response."""
headers = dict(response.headers)
return web.Response(
body=response.body,
status=response.status,
headers=headers,
)
|
{"/main.py": ["/app/application.py"], "/app/application.py": ["/app/proxy/config.py", "/app/proxy/patcher_ivelum.py", "/app/proxy/proxy.py", "/app/proxy/req_resp.py"], "/app/proxy/patcher_ivelum.py": ["/app/proxy/config.py", "/app/proxy/patcher_default.py"], "/app/proxy/patcher_default.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"], "/app/proxy/config.py": ["/app/proxy/req_resp.py"], "/app/proxy/proxy.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"]}
|
17,237
|
GreyZmeem/habr-proxy
|
refs/heads/master
|
/app/proxy/patcher_ivelum.py
|
# -*- coding: utf-8 -*-
import dataclasses
import itertools
import unicodedata
from typing import Iterable, Optional, Set
from app.proxy.config import ReqResp, SrcDst
from app.proxy.patcher_default import DefaultPatcher
from bs4 import BeautifulSoup
from bs4.element import NavigableString, Tag
from nltk import wordpunct_tokenize
@dataclasses.dataclass(frozen=True)
class IVelumPatcherConfig(object):
"""Configuration for the `IVelumPatcher` service."""
#: Length of words to patch.
words_length: int
#: String which must be appended to the matched words.
append: str
class IVelumPatcher(DefaultPatcher):
"""Patcher implementation for iVelum code challenge."""
#: Form of unicode normalization.
unicode_form: str = "NFKC"
#: Skip patching these tags.
skip_tags: Set[str] = {"script", "style"}
def __init__(self, config: IVelumPatcherConfig):
self.config = config
def patch_body(self, req_resp: ReqResp, hosts: SrcDst) -> Optional[bytes]:
"""Iterate over all tags changing its text."""
if not self.must_patch_body(req_resp):
return req_resp.body
body = super().patch_body(req_resp, hosts)
text = body.decode("utf-8") # type: ignore
html = BeautifulSoup(text, "lxml")
for tag in html.find_all(name=True):
if tag.name not in self.skip_tags:
self.patch_tag(tag)
return html.encode("utf-8")
def patch_tag(self, tag: Tag):
"""Patch single tag's text."""
for child_tag in tag.children:
if not isinstance(child_tag, NavigableString):
continue
if child_tag.parent != tag:
continue
self.patch_navigable_string(child_tag)
def patch_navigable_string(self, nav_string: NavigableString):
"""Replace navigable string tag with a patched text."""
text = unicodedata.normalize(self.unicode_form, nav_string)
split = ((wordpunct_tokenize(word), " ") for word in text.split())
words: Iterable[str] = itertools.chain(*list(
itertools.chain(*split),
))
words = (
(
f"{word}{self.config.append}"
if len(word) == self.config.words_length
else word
)
for word in words
)
nav_string.replace_with(NavigableString("".join(words)))
|
{"/main.py": ["/app/application.py"], "/app/application.py": ["/app/proxy/config.py", "/app/proxy/patcher_ivelum.py", "/app/proxy/proxy.py", "/app/proxy/req_resp.py"], "/app/proxy/patcher_ivelum.py": ["/app/proxy/config.py", "/app/proxy/patcher_default.py"], "/app/proxy/patcher_default.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"], "/app/proxy/config.py": ["/app/proxy/req_resp.py"], "/app/proxy/proxy.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"]}
|
17,238
|
GreyZmeem/habr-proxy
|
refs/heads/master
|
/app/proxy/patcher_default.py
|
# -*- coding: utf-8 -*-
import dataclasses
from http import HTTPStatus
from typing import List, Optional, overload
from urllib.parse import ParseResult, urlparse
from app.proxy import const
from app.proxy.config import Patcher, ReqResp, SrcDst
from app.proxy.req_resp import Header, Headers, Request, Response
from multidict import CIMultiDict
# noinspection PyMethodMayBeStatic
class DefaultPatcher(Patcher): # noqa: WPS214
"""Default patcher implementation."""
remove_headers = const.remove_headers
@overload
def __call__(
self,
req_resp: Request,
hosts: SrcDst,
) -> Request:
"""Patch HTTP request."""
@overload
def __call__( # noqa: F811
self,
req_resp: Response,
hosts: SrcDst,
) -> Response:
"""Patch HTTP response."""
def __call__( # noqa: F811
self,
req_resp: ReqResp,
hosts: SrcDst,
) -> ReqResp:
"""Patch HTTP request / response objects."""
kwargs = {
"headers": self.patch_headers(req_resp, hosts),
"body": self.patch_body(req_resp, hosts),
}
if isinstance(req_resp, Request):
kwargs["url"] = self.patch_url(req_resp, hosts) # type: ignore
if isinstance(req_resp, Response):
if req_resp.status == HTTPStatus.PERMANENT_REDIRECT.value:
kwargs["status"] = HTTPStatus.FOUND.value # type: ignore
return dataclasses.replace(req_resp, **kwargs)
def patch_headers(self, req_resp: ReqResp, hosts: SrcDst) -> Headers:
"""Patch HTTP request / response headers."""
new_headers: List[Header] = []
for header_name, header_value in req_resp.headers:
header_name = header_name.lower()
if header_name in self.remove_headers:
continue
new_header = self.patch_header((header_name, header_value), hosts)
new_headers.append(new_header)
return tuple(new_headers)
def patch_header(self, header: Header, hosts: SrcDst) -> Header:
"""Patch single HTTP header."""
src: ParseResult = urlparse(hosts[0])
dst: ParseResult = urlparse(hosts[1])
header_name, header_value = header
header_value = header_value.replace(
src.scheme or "http",
dst.scheme or "http",
)
header_value = header_value.replace(
src.netloc,
dst.netloc,
)
return header_name, header_value
def patch_url(self, req: Request, hosts: SrcDst) -> str:
"""Patch HTTP request url."""
dst: ParseResult = urlparse(hosts[1])
url: ParseResult = urlparse(req.url)
url = url._replace(scheme=dst.scheme, netloc=dst.netloc) # noqa: WPS437
return url.geturl()
def patch_body(self, req_resp: ReqResp, hosts: SrcDst) -> Optional[bytes]:
"""Patch HTTP request / response body."""
if not self.must_patch_body(req_resp):
return req_resp.body
body = req_resp.body.decode("utf-8") # type: ignore
body = body.replace(hosts[0], hosts[1])
return body.encode("utf-8")
def must_patch_body(self, req_resp: ReqResp) -> bool:
"""Check if request or response body must be patched."""
if not req_resp.body:
return False
content_type = CIMultiDict(req_resp.headers).get("content-type")
if not content_type or not content_type.lower().startswith("text/html"):
return False
return True
|
{"/main.py": ["/app/application.py"], "/app/application.py": ["/app/proxy/config.py", "/app/proxy/patcher_ivelum.py", "/app/proxy/proxy.py", "/app/proxy/req_resp.py"], "/app/proxy/patcher_ivelum.py": ["/app/proxy/config.py", "/app/proxy/patcher_default.py"], "/app/proxy/patcher_default.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"], "/app/proxy/config.py": ["/app/proxy/req_resp.py"], "/app/proxy/proxy.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"]}
|
17,239
|
GreyZmeem/habr-proxy
|
refs/heads/master
|
/app/proxy/req_resp.py
|
# -*- coding: utf-8 -*-
from dataclasses import dataclass, field
from http import HTTPStatus
from typing import Optional, Set, Tuple
Header = Tuple[str, str]
Headers = Tuple[Header, ...]
HeaderNames = Set[str]
@dataclass(frozen=True)
class Request(object):
"""HTTP request."""
#: Requested URL.
url: str
#: HTTP request method.
method: str
#: HTTP request headers.
headers: Headers = field(default_factory=tuple)
#: HTTP request body.
body: Optional[bytes] = None
@dataclass(frozen=True)
class Response(object):
"""HTTP response."""
#: HTTP response headers.
headers: Headers = field(default_factory=tuple)
#: HTTP response status code.
status: int = HTTPStatus.OK.value
#: HTTP response body.
body: Optional[bytes] = None
|
{"/main.py": ["/app/application.py"], "/app/application.py": ["/app/proxy/config.py", "/app/proxy/patcher_ivelum.py", "/app/proxy/proxy.py", "/app/proxy/req_resp.py"], "/app/proxy/patcher_ivelum.py": ["/app/proxy/config.py", "/app/proxy/patcher_default.py"], "/app/proxy/patcher_default.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"], "/app/proxy/config.py": ["/app/proxy/req_resp.py"], "/app/proxy/proxy.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"]}
|
17,240
|
GreyZmeem/habr-proxy
|
refs/heads/master
|
/app/proxy/config.py
|
# -*- coding: utf-8 -*-
import abc
from dataclasses import dataclass
from typing import Tuple, Union, overload
from app.proxy import const
from app.proxy.req_resp import Request, Response
SrcDst = Tuple[str, str]
ReqResp = Union[Request, Response]
class Patcher(abc.ABC):
"""Protocol describes methods for patching HTTP requests and responses."""
@overload
def __call__(
self,
req_resp: Request,
hosts: SrcDst,
) -> Request:
"""Patch HTTP request."""
@overload
def __call__( # noqa: F811
self,
req_resp: Response,
hosts: SrcDst,
) -> Response:
"""Patch HTTP response."""
@abc.abstractmethod
def __call__( # noqa: F811
self,
req_resp: ReqResp,
hosts: SrcDst,
) -> ReqResp:
"""Patch either HTTP request or response."""
raise NotImplementedError
@dataclass(frozen=True)
class Config(object):
"""Configuration for `Proxy` service."""
#: Local resource (e.g. `http://127.0.0.1:8080`).
host: str
#: Proxied resource (e.g. `https://habr.com`).
upstream: str
#: Callable for patching HTTP request / response objects.
patcher: Patcher
#: Upstream HTTP request timeout.
timeout: int = const.timeout
|
{"/main.py": ["/app/application.py"], "/app/application.py": ["/app/proxy/config.py", "/app/proxy/patcher_ivelum.py", "/app/proxy/proxy.py", "/app/proxy/req_resp.py"], "/app/proxy/patcher_ivelum.py": ["/app/proxy/config.py", "/app/proxy/patcher_default.py"], "/app/proxy/patcher_default.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"], "/app/proxy/config.py": ["/app/proxy/req_resp.py"], "/app/proxy/proxy.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"]}
|
17,241
|
GreyZmeem/habr-proxy
|
refs/heads/master
|
/app/proxy/const.py
|
# -*- coding: utf-8 -*-
from typing import Final, Set
# =======
# General
# =======
#: Default upstream.
upstream: Final[str] = "https://habr.com"
#: List of headers to remove from HTTP requests / responses.
remove_headers: Final[Set[str]] = {
"accept-encoding",
"content-encoding",
"keep-alive",
"p3p",
"public-key-pins",
"strict-transport-security",
"te",
"transfer-encoding",
"upgrade-insecure-requests",
}
#: Upstream request timeout.
timeout: Final[int] = 30
# ==============
# iVelum patcher
# ==============
#: Default word length.
word_length: Final[int] = 6
#: Default text to append.
word_append: Final[str] = "™"
|
{"/main.py": ["/app/application.py"], "/app/application.py": ["/app/proxy/config.py", "/app/proxy/patcher_ivelum.py", "/app/proxy/proxy.py", "/app/proxy/req_resp.py"], "/app/proxy/patcher_ivelum.py": ["/app/proxy/config.py", "/app/proxy/patcher_default.py"], "/app/proxy/patcher_default.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"], "/app/proxy/config.py": ["/app/proxy/req_resp.py"], "/app/proxy/proxy.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"]}
|
17,242
|
GreyZmeem/habr-proxy
|
refs/heads/master
|
/app/proxy/proxy.py
|
# -*- coding: utf-8 -*-
import aiohttp
from app.proxy.config import Config
from app.proxy.req_resp import Headers, Request, Response
class Proxy(object):
"""Asd."""
def __init__(self, config: Config):
self.config: Config = config
async def dispatch(self, request: Request) -> Response:
"""Asd."""
request = self.patch_request(request)
response = await self.make_request(request)
return self.patch_response(response)
def patch_request(self, request: Request) -> Request:
"""Patch HTTP request."""
hosts = self.config.host, self.config.upstream
return self.config.patcher(request, hosts)
def patch_response(self, response: Response) -> Response:
"""Patch HTTP response."""
hosts = self.config.upstream, self.config.host
return self.config.patcher(response, hosts)
async def make_request(self, request: Request) -> Response:
"""Perform HTTP request to the upstream."""
# TODO: Move this code to separate module and defined as a dependency.
async with aiohttp.ClientSession() as session:
req_headers = dict(request.headers)
response = await session.request(
url=request.url,
method=request.method,
headers=req_headers,
allow_redirects=False,
raise_for_status=False,
verify_ssl=False,
)
headers: Headers = tuple(response.headers.items())
body = await response.read()
return Response(headers, response.status, body)
|
{"/main.py": ["/app/application.py"], "/app/application.py": ["/app/proxy/config.py", "/app/proxy/patcher_ivelum.py", "/app/proxy/proxy.py", "/app/proxy/req_resp.py"], "/app/proxy/patcher_ivelum.py": ["/app/proxy/config.py", "/app/proxy/patcher_default.py"], "/app/proxy/patcher_default.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"], "/app/proxy/config.py": ["/app/proxy/req_resp.py"], "/app/proxy/proxy.py": ["/app/proxy/config.py", "/app/proxy/req_resp.py"]}
|
17,245
|
redwingsdan/python-tests
|
refs/heads/master
|
/diceroll.py
|
import random
def start_dice_roll():
min = 1
max = 6
roll_again = 'yes'
while roll_again == 'y' or roll_again == 'yes':
diceroll = random.randint(min, max)
print('Roll 1: ' + str(diceroll))
diceroll = random.randint(min, max)
print('Roll 2: ' + str(diceroll))
roll_again = input('Roll the dice again? ').lower()
|
{"/menu.py": ["/numbergame.py", "/rockpaperscissors.py", "/passwordgenerator.py", "/diceroll.py", "/ciphers.py"]}
|
17,246
|
redwingsdan/python-tests
|
refs/heads/master
|
/filemanipulation.py
|
import os
import shutil
#w = write permissions (create if not exists)
#r = read permissions
#a = append to end of file
myFile = open('filemanipulationtest.txt', 'w+')
myFile.write('This is a test string')
myFile.close()
os.rename('D:/Python/filemanipulationtest.txt', 'D:/Python/filedir1/filemanipulationtest.txt')
shutil.move('D:/Python/filedir1/filemanipulationtest.txt', 'D:/Python/filedir2/filemanipulationtest.txt')
os.remove('D:/Python/filedir2/filemanipulationtest.txt')
|
{"/menu.py": ["/numbergame.py", "/rockpaperscissors.py", "/passwordgenerator.py", "/diceroll.py", "/ciphers.py"]}
|
17,247
|
redwingsdan/python-tests
|
refs/heads/master
|
/passwordgenerator.py
|
import random
def shuffle(string):
tempPass = list(string)
random.shuffle(tempPass)
return ''.join(tempPass)
def generate_char():
number = random.randint(0,3)
if number == 0:
return chr(random.randint(65,90)) #uppercase
elif number == 1:
return chr(random.randint(97,122)) #lowercase
elif number == 2:
return chr(random.randint(48,57)) #digit
else:
return chr(random.randint(33,64)) #punctuation
def start_password_generation(total_characters):
password = ''
if not total_characters.isdigit():
total_characters = 8
for num in range(int(total_characters)):
password += generate_char()
password = shuffle(password)
print('Generated password is: ' + password)
|
{"/menu.py": ["/numbergame.py", "/rockpaperscissors.py", "/passwordgenerator.py", "/diceroll.py", "/ciphers.py"]}
|
17,248
|
redwingsdan/python-tests
|
refs/heads/master
|
/cryptochallenge.py
|
import codecs
from binascii import hexlify, unhexlify
class InvalidMessageException(Exception):
pass
#read in a file name and return the contents as a string
def read_file_data(file_name):
file = open(file_name, 'r')
return file.read()
#decode the hex string data into bytes (equivalent to unhexlify)
def convert_hex_str_to_bytes(hex_str):
return codecs.decode(hex_str, 'hex')
#decode the hex data, then encode it into the base64 format, and then decode into a string
def hex_to_base_64(hex_data):
return codecs.encode(convert_hex_str_to_bytes(hex_data), 'base64').decode()
#iterate through each character in the 2 byte arrays and xor them with each other
def xor_bytes(input_bytes1, input_bytes2):
return bytes([byte1 ^ byte2 for (byte1, byte2) in zip(input_bytes1, input_bytes2)])
#return the matched xor data for a given encrypted hex value
def find_best_xor_match(data_hex):
best_match = None
ascii_text_chars = list(range(97, 122)) + [32] #determine a list of ascii characters to match
#iterate through all 255 ascii characters for the encrpytion
for i in range(1,255):
xor_key = bytes([i]) #create a key from the character
xor_key_str = xor_key * len(data_hex) #match the key length with the hex text
xor_byte_data = xor_bytes(data_hex, xor_key_str) #xor the key with the hex text
number_letters = sum([x in ascii_text_chars for x in xor_byte_data]) #score the resulting data by totaling the matching ascii characters (more ascii characters means likely a more readable string of text)
#Use the best scored data for the guess
if best_match == None or number_letters > best_match['number_letters']:
best_match = {'message': xor_byte_data, 'number_letters': number_letters, 'key': xor_key}
#Ensure the best answer is at least 70% readable, otherwise throw an exception
if best_match['number_letters'] > 0.7*len(data_hex):
return best_match
else:
raise InvalidMessageException('Best candidate message is: %s' % best_match['message'])
def challenge_1():
file_data = read_file_data('hextext.txt')
base_64_data = hex_to_base_64(file_data)
print(base_64_data)
def challenge_2():
file_data_1 = read_file_data('hextext2a.txt')
file_data_2 = read_file_data('hextext2b.txt')
data_1_hex = convert_hex_str_to_bytes(file_data_1)
data_2_hex = convert_hex_str_to_bytes(file_data_2)
xor_byte_data = xor_bytes(data_1_hex, data_2_hex)
print(xor_byte_data.hex())
def challenge_3():
file_data = read_file_data('hextext3.txt')
data_hex = convert_hex_str_to_bytes(file_data)
best_match = find_best_xor_match(data_hex)
print('message: ' + str(best_match['message']))
print('letters: ' + str(best_match['number_letters']))
print('key: ' + str(best_match['key']))
def challenge_4():
data_list = []
#Iterate through each line of a file and convert the values from hex and add them to the list
with open('hextext4.txt') as file_content:
for line in file_content:
data_list.append(convert_hex_str_to_bytes(line.strip()))
possible_matches = list()
#Iterate through each data line and try to find a valid xor match
#Non-valid matches will throw an exception (since they weren't encrypted)
for (line_number, text) in enumerate(data_list):
try:
best_match_message = find_best_xor_match(text)['message']
except InvalidMessageException:
pass
else:
possible_matches.append({
'line_number': line_number,
'original_text': text,
'message': best_match_message
})
if len(possible_matches) > 1:
print("Error: more than one match")
else:
#Print the object formatted as key: value
for (key, value) in possible_matches[0].items():
print(f'{key}: {value}')
def challenge_5():
file_data = read_file_data('hextext5.txt')
#Convert file data and key to bytes
byte_data = bytes(file_data, 'utf8')
xor_key = bytes('ICE', 'utf8')
#get the xor to match at least the length of the message
xor_key_str = xor_key*(len(byte_data)//len(xor_key) + 1) #message length / key length without remainder + 1
xor_byte_data = xor_bytes(byte_data, xor_key_str) #xor the key with the hex text
print(xor_byte_data.hex())
challenge_5()
|
{"/menu.py": ["/numbergame.py", "/rockpaperscissors.py", "/passwordgenerator.py", "/diceroll.py", "/ciphers.py"]}
|
17,249
|
redwingsdan/python-tests
|
refs/heads/master
|
/fixedvsfloatingdecimals.py
|
def rec(y, z):
return 108 - ((815-1500/z)/y)
def floatpt(N):
x = [4, 4.25]
for i in range(2, N+1):
x.append(rec(x[i-1], x[i-2]))
return x
def fixedpt(N):
x = [Decimal(4), Decimal(17)/Decimal(4)]
for i in range(2, N+1):
x.append(rec(x[i-1], x[i-2]))
return x
N = 20
flt = floatpt(N)
fxd = fixedpt(N)
for i in range(N):
print str(i) + ' | '+str(flt[i])+' | '+str(fxd[i])
|
{"/menu.py": ["/numbergame.py", "/rockpaperscissors.py", "/passwordgenerator.py", "/diceroll.py", "/ciphers.py"]}
|
17,250
|
redwingsdan/python-tests
|
refs/heads/master
|
/menu.py
|
import numbergame
import rockpaperscissors
import passwordgenerator
import diceroll
import ciphers
selection = input("Select Program:\n" +
"1. Number Guessing\n" +
"2. Rock, Paper, Scissors\n" +
"3. Password Generator\n" +
"4. Dice Rolling\n" +
"5. Ciphers\n" +
"")
if selection == str(1):
numbergame.start_number_game()
elif selection == str(2):
rockpaperscissors.start_rock_paper_scissors_game()
elif selection == str(3):
passwordgenerator.start_password_generation(input('Password Length: '))
elif selection == str(4):
diceroll.start_dice_roll()
elif selection == str(5):
ciphers.convert_cipher()
else:
print('Invalid input')
|
{"/menu.py": ["/numbergame.py", "/rockpaperscissors.py", "/passwordgenerator.py", "/diceroll.py", "/ciphers.py"]}
|
17,251
|
redwingsdan/python-tests
|
refs/heads/master
|
/numbergame.py
|
import random
def start_number_game():
number = random.randint(1, 10)
user_name = input('What is your name?')
number_guesses = 0;
print('Alright, ' + user_name + ' I guessed a number: ')
while number_guesses < 5:
guess = int(input())
number_guesses += 1
if guess < number:
print('Too low!')
if guess > number:
print('Too high!')
if guess == number:
break
if guess == number:
print('YOU GOT IT! Took ' + str(number_guesses) + ' tries')
else:
print('YOU FAILED! The number was ' + str(number))
|
{"/menu.py": ["/numbergame.py", "/rockpaperscissors.py", "/passwordgenerator.py", "/diceroll.py", "/ciphers.py"]}
|
17,252
|
redwingsdan/python-tests
|
refs/heads/master
|
/ciphers.py
|
def convert_cipher():
type = input('What type of cipher? ')
if type == 'b':
convert_binary_cipher()
elif type == 'u':
convert_unicode_cipher()
else:
print('Cipher type not supported')
def convert_binary_cipher():
file_content = open('cipher1.txt', 'r')
result_text = ''
ascii_value = 0
multVal = 128
while 1:
binary_character = file_content.read(1)
if not binary_character:
break
ascii_value += int(binary_character) * int(multVal)
if multVal == 1:
result_text += chr(ascii_value)
multVal = 128
ascii_value = 0
else:
multVal /= 2
print('Decoded Text: ' + result_text)
file_content.close()
def convert_unicode_cipher():
file_content = open('cipher2.txt', 'r')
result_text = file_content.read().encode('utf-8').decode('unicode-escape')
print('Decoded Text: ' + result_text)
file_content.close()
|
{"/menu.py": ["/numbergame.py", "/rockpaperscissors.py", "/passwordgenerator.py", "/diceroll.py", "/ciphers.py"]}
|
17,253
|
redwingsdan/python-tests
|
refs/heads/master
|
/rockpaperscissors.py
|
from random import randint
def compare_selections(choices, choice_names, player_selection, ai_selection, total_wins, total_losses):
player_index = choices.index(player_selection)
ai_index = choices.index(ai_selection)
is_player_win = (player_index > ai_index or (player_index == 0 and ai_index == 2)) and not (player_index == 2 and ai_index == 0)
if is_player_win:
print('You win! ' + choice_names[player_index] + ' beats ' + choice_names[ai_index])
else:
print('You lose! ' + choice_names[ai_index] + ' beats ' + choice_names[player_index])
return is_player_win
def start_rock_paper_scissors_game():
choices = ['r', 'p', 's']
choice_names = ['Rock', 'Paper', 'Scissors']
ai_selection = choices[randint(0,2)]
player_selection = None
total_wins = 0
total_losses = 0
while player_selection == None:
player_selection = input("Rock, Paper, Scissors? ").lower()
if player_selection == ai_selection:
player_index = choices.index(player_selection)
print('Both played ' + choice_names[player_index])
elif player_selection in choices:
is_win = compare_selections(choices, choice_names, player_selection, ai_selection, total_wins, total_losses)
if is_win:
total_wins += 1
else:
total_losses += 1
else:
print('Invalid choice, try again')
print('Current Record: Wins = ' + str(total_wins) + ' Losses = ' + str(total_losses))
player_selection = None
ai_selection = choices[randint(0,2)]
|
{"/menu.py": ["/numbergame.py", "/rockpaperscissors.py", "/passwordgenerator.py", "/diceroll.py", "/ciphers.py"]}
|
17,254
|
redwingsdan/python-tests
|
refs/heads/master
|
/csvmanipulation.py
|
import csv
def writer (header, data, filename, option):
with open (filename, 'w', newline = '') as csvfile:
if option == 'write':
pizza_places = csv.writer(csvfile)
pizza_places.writerow(header)
for data_row in data:
pizza_places.writerow(data_row)
elif option == 'update':
writer = csv.DictWriter(csvfile, fieldnames = header)
writer.writeheader()
writer.writerows(data)
else:
print('Unknown option')
def updater(filename):
with open(filename, 'r', newline = '') as csvfile:
readData = [row for row in csv.DictReader(csvfile)]
readData[0]['Rating'] = '9.2'
readHeader = readData[0].keys()
writer(readHeader, readData, filename, 'update')
csvFileName = 'csv_pizza_ratings.csv'
csvHeader = ('Rank', 'Rating', 'Pizza Place')
csvData = [
(1, 9.4, 'Di Faras'),
(2, 8.8, 'Frank Pepes'),
(3, 7.3, 'Pizza District'),
(4, 6.9, 'Poppa Ginos')
]
writer(csvHeader, csvData, csvFileName, 'write')
updater(csvFileName)
|
{"/menu.py": ["/numbergame.py", "/rockpaperscissors.py", "/passwordgenerator.py", "/diceroll.py", "/ciphers.py"]}
|
17,255
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/schema/mutation.py
|
import logging
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
from django.contrib.gis.db import models
from django.db.models import Q
import django_filters
import graphene
from graphene_django import DjangoObjectType
# models defination
from core.models import (
BusinessDetailsAndCategoryGrouping,
Category,
FollowerFollowedMapping,
)
# get Nodes definitions
from core.schema.node import (
FollowerFollowedMappingFilter, FollowerFollowedMappingNode
)
# Get an instance of a logger
logger = logging.getLogger(__name__)
# class CreateOrUpdateFollowerFollowedMapping(graphene.relay.ClientIDMutation):
# follower_followed_mapping = graphene.Field(FollowerFollowedMappingNode)
# class Input:
# followed = graphene.Int(required=True)
# follow = graphene.Int(default=1)
# def mutate_and_get_payload(root, info, **input):
# user = info.context.user
# followerFollowedMapping, _ = FollowerFollowedMapping.objects.update_or_create(
# follower = user,
# followed = get_user_model().objects.get(id=input.get('followed')),
# defaults = {
# 'follow': bool(int(input.get('follow')))
# }
# )
# return CreateOrUpdateFollowerFollowedMapping(follower_followed_mapping=followerFollowedMapping)
class Mutation(graphene.AbstractType):
# create_or_update_follower = CreateOrUpdateFollowerFollowedMapping.Field()
pass
# update_user = UpdateUser.Field()
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,256
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/google_storage.py
|
# GCP storege functions which is used accross the site
# Imports the Google Cloud client library
from google.cloud import storage
import datetime
# Instantiates a client
storage_client = storage.Client.from_service_account_json('config/gcp_storage.json')
def upload_blob(bucket_name, source_file, destination_blob_name):
"""Uploads a file to the bucket."""
# bucket_name = "your-bucket-name"
# source_file_name = "local/path/to/file"
# destination_blob_name = "storage-object-name"
# bucket to upload
bucket = storage_client.bucket(bucket_name)
# final name of the blob
blob = bucket.blob(destination_blob_name)
#upload the bolb
# result = blob.upload_from_filename(source_file_name)
result = blob.upload_from_file(
source_file,
content_type=str(source_file.content_type))
print(result)
print(
"File {} uploaded to {}".format(
source_file, destination_blob_name
)
)
return True
def generate_download_signed_url_v4(bucket_name, blob_name):
"""
Generates a v4 signed URL for downloading a blob.
"""
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
url = blob.generate_signed_url(
version="v4",
# This URL is valid for 15 minutes
expiration=datetime.timedelta(minutes=30),
# Allow GET requests using this URL.
method="GET",
)
return url
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,257
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/migrations/0005_auto_20200416_1355.py
|
# Generated by Django 3.0.5 on 2020-04-16 08:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('storage', '0004_auto_20200416_1353'),
]
operations = [
migrations.RemoveField(
model_name='filestorage',
name='connection_name',
),
migrations.RemoveField(
model_name='filestorage',
name='driver_name',
),
]
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,258
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/helper.py
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import hashlib
import json
try:
import libcloud
except ImportError:
raise ImproperlyConfigured("Could not load libcloud")
try:
from django.utils.six.moves.urllib.parse import urljoin
except ImportError:
string_types = str
from urllib.parse import urljoin
from storage.models import (
FileStorage,
FileTransactionLogs
)
provider_mapper = {
'google_storage' : libcloud.DriverType.STORAGE.GOOGLE_STORAGE,
}
class Storage:
'''
Storage class to abstract the storage function accross the multiple service providers
'''
# hold the drive then main connection the provider
driver = None
# hold the provider data
provider = settings.STORAGE_PROVIDERS['default']
# hold the bucket data
bucket = None
# initilize the class
def __init__(self, provider_name=None):
"""Establish the connection.
__init__ establish the connection with the provider and connect to the bucket.
Parameters
----------
provider_name : string
it should be the configuration key which is defined in the settings file
Returns
-------
None
It updated the object connection parameter
Raises
------
ImproperlyConfigured
When the configuration is not proper
Examples
--------
>>> s=Storage('default')
"""
# check if the provider name is in the list or none
if provider_name is not None:
# build the provider dict
self.provider = settings.STORAGE_PROVIDERS[provider_name]
try:
# load the connection driver
cls = libcloud.get_driver(libcloud.DriverType.STORAGE, provider_mapper[self.provider['type']])
except Exception as e:
# if connection driver is not found, raise an expection
raise ImproperlyConfigured("Unable to find libcloud driver type %s: %s" %(provider_mapper[self.provider['type']], e))
try:
# make the connection with the platform, pass the username and secret
self.driver = cls(
key=self.provider['user'],
secret=self.provider['secret']
)
# connect to the bucket, only one bucket connection is allowed per connection
self.bucket = self.driver.get_container(self.provider['bucket'])
except Exception as e:
# if connection to bucket is not established then raise an expection
raise ImproperlyConfigured("Unable to create libcloud driver type %s: %s" %(provider_mapper[self.provider['type']], e))
def hash_file(self, file_data, hash_algo=None):
"""Hash the file
Funtion to generate the hash sha3_512 hash of the given file
Parameters
----------
file_data : File
The File which need to be hashed, it should be inMemory file
hash_algo : string
Name of the hashing algorithm need to be use
(supported algorithms : md5,)
Returns
-------
srting
Hash digest of the file
Raises
------
AttributeError
The ``Raises`` section is a list of all exceptions
that are relevant to the interface.
"""
# Create the hash object
file_hash = hashlib.sha3_512()
# Open the file to read it's bytes
for f in file_data.chunks():
# Update the hash
file_hash.update(f)
# Retun the hexadecimal digest of the hash
return (file_hash.hexdigest())
def list_container_objects(self):
'''
Return a list of objects for the given (in setting) container.
'''
return self.driver.list_container_objects(self.bucket)
def list_containers(self):
'''
Return a list of containers.
'''
return self.driver.list_containers()
def save_file(self, request, file):
# create the return object
reply = {
"status" : False,
"error" : 'None',
"file" : None,
}
# create hash of the file
file_hash = self.hash_file(file)
# upload the file
try:
server_reply = self.upload_object(file=file, file_name=file_hash)
except Exception as e:
# update the error and return
reply['error'] = "error Uploading the file :" + str(e)
return reply
# try saving the file related data to DB
try:
file_data = FileStorage.objects.create(
hashed_name = self.hash_file(file),
original_name = file.name,
original_meta_data = file.content_type,
original_size = file.size,
original_charset = file.charset,
bucket_raw = self.bucket,
bucket_name = self.bucket.name,
server_reply = server_reply,
)
except Exception as e:
# update the error and return
reply['error'] = "error Saving file data in DB :" + str(e)
return reply
# try saving the file transaction log
try:
FileTransactionLogs.objects.create(
file = file_data,
user = request.user,
remark = 'File uploaded',
)
except Exception as e:
# update the error and return
reply['error'] = "error Saving file transaction data :" + str(e)
return reply
reply['file'] = file_data
reply['status'] = True
return reply
def upload_object(self, file, file_name):
'''
upload the file to bucket
'''
return self.driver.upload_object_via_stream(
iterator=file.chunks(),
container=self.bucket,
object_name=file_name,
extra={
'meta_data': {
'content_type': file.content_type,
}
}
)
def get_object_url(self, object_data=None, object_id=None, object_hash=None):
'''
Return the object URL based on the id of the object from the storage table
'''
object_details = None
# get the object from the database
try:
if object_data is not None:
object_details = object_data
elif object_id is not None:
object_details = FileStorage.objects.get(id=object_id)
elif object_hash is not None:
object_details = FileStorage.objects.get(hashed_name=object_hash)
except Exception as e:
error = "No object found" + str(e)
print(error)
# connect to the object bucket
if self.mount_driver_from_bucket_name(bucket_name=object_details.bucket_name):
print('bucket connected')
else:
error = "No bucket found"
print(error)
# get the object
object_blob = self.driver.get_object(container_name=self.bucket.name, object_name=object_details.hashed_name)
# get the object URL
try:
url = self.driver.get_object_cdn_url(object_blob)
except NotImplementedError as e:
object_path = '{}/{}'.format(self.bucket.name, object_blob.name)
if 's3' in self.provider['type']:
base_url = 'https://%s' % self.driver.connection.host
url = urljoin(base_url, object_path)
elif 'google' in self.provider['type']:
url = urljoin('https://storage.googleapis.com', object_path)
else:
raise e
# return the URL
return url
def mount_driver_from_bucket_name(self, bucket_name):
for provider in settings.STORAGE_PROVIDERS:
if settings.STORAGE_PROVIDERS[provider]['bucket'] == bucket_name:
self.__init__(provider_name=provider)
return True
return False
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,259
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/migrations/0004_auto_20200416_1353.py
|
# Generated by Django 3.0.5 on 2020-04-16 08:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('storage', '0003_filestorage_server_reply'),
]
operations = [
migrations.RenameField(
model_name='filestorage',
old_name='bucket',
new_name='bucket_name',
),
migrations.AddField(
model_name='filestorage',
name='bucket_raw',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='filestorage',
name='connection_name',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='filestorage',
name='driver_name',
field=models.CharField(max_length=255, null=True),
),
]
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,260
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/schema/query.py
|
import graphene
from graphene_django.filter import DjangoFilterConnectionField
from storage.schema.node import (
ImageNode
)
class Query(graphene.ObjectType):
pass
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,261
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/urls.py
|
'''
storage module URL Configuration
'''
from django.urls import include, path
from storage.views import (
FileDownload,
FileUpload
)
urlpatterns = [
path('upload', FileUpload.as_view(), name='file_upload'),
path('url/<int:fid>', FileDownload.as_view(), name='file_upload'),
]
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,262
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/migrations/0002_auto_20200415_1845.py
|
# Generated by Django 3.0.5 on 2020-04-15 13:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('storage', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='filestorage',
name='hashed_id',
),
migrations.AlterField(
model_name='filestorage',
name='hashed_name',
field=models.CharField(max_length=255, unique=True),
),
]
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,263
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/migrations/0001_initial.py
|
# Generated by Django 3.0.5 on 2020-04-14 15:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='FileStorage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hashed_id', models.CharField(max_length=255)),
('hashed_name', models.CharField(max_length=255)),
('original_name', models.CharField(max_length=255)),
('original_meta_data', models.TextField(null=True)),
('original_size', models.IntegerField(help_text='The size, in bytes, of the uploaded file.', null=True)),
('original_charset', models.CharField(max_length=255, null=True)),
('bucket', models.CharField(max_length=255, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='FileTransactionLogs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('remark', models.TextField(null=True)),
('file', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='storage.FileStorage')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,264
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/storage.py
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import Storage
class MultiStorage(Storage):
'''
Base Storage class to abstract the storage function accross the multiple service providers
'''
# hold the drive then main connection the provider
driver = None
# hold the provider data
provider = settings.STORAGE_PROVIDERS['default']
# hold the bucket data
bucket = None
def __init__(self, provider_name=None):
"""Establish the connection.
__init__ establish the connection with the provider and connect to the bucket.
Parameters
----------
provider_name : string
it should be the configuration key which is defined in the settings file
Returns
-------
None
It updated the object connection parameter
Raises
------
ImproperlyConfigured
When the configuration is not proper
Examples
--------
>>> s=Storage('default')
"""
# check if the provider name is in the list or none
if provider_name is not None:
# build the provider dict
self.provider = settings.STORAGE_PROVIDERS[provider_name]
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,265
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/schema/node.py
|
import json
from django.contrib.auth import get_user_model
from django.contrib.gis.db import models
import django_filters
import graphene
from graphene_django import DjangoObjectType
from graphene_django.converter import convert_django_field
from storage.models import (
FileStorage,
FileTransactionLogs
)
from storage.helper import (
Storage
)
# File Node
class FileNode(graphene.Interface):
'''
Defines the custom file node
it accepts the file id to view the file and its related details
'''
id = graphene.ID(required=True)
url = graphene.String()
def resolve_id(self, info):
return self.hashed_name
def resolve_url(self, info):
storage = Storage()
return storage.get_object_url(object_id=self.id)
# Image node
class ImageNode(graphene.ObjectType):
'''
Defines the custom Image node, it extends the File Node
it accepts the file id to view the Image and its related details
'''
class Meta:
interfaces = (graphene.relay.Node, FileNode)
url_200 = graphene.String()
def resolve_url_200(self, info):
'''
url of the 200 byte implimation of the image
'''
return "URL comming soon"
# Video node
class VideoNode(graphene.ObjectType):
'''
Defines the custom Video node, it extends the File Node
it accepts the file id to view the Video and its related details
'''
class Meta:
interfaces = (graphene.relay.Node, FileNode)
stream_url = graphene.String()
def resolve_stream_url(self, info):
'''
Streaming Url
'''
return "URL comming soon"
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,266
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/s3_storage.py
|
# AWS S3 storage functions which is used accross the site
# Imports the AWS Boto3 library
import boto3
from botocore.exceptions import ClientError
import datetime
# Create a client for s3
s3_client = boto3.client('s3')
def upload_to_s3(bucket_name, source_file, destination_name=None):
"""Upload a file to an S3 bucket
:param source_file: File to upload
:param bucket_name: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if destination_name is None:
destination_name = file_name
try:
response = s3_client.upload_file(source_file, bucket_name, destination_name)
print(
"File {} uploaded to bucket {} at {}".format(
source_file, bucket_name, destination_name
)
)
except ClientError as e:
print(e)
return False
return True
def create_presigned_url_s3(bucket_name, object_name, expiration=3600):
"""Generate a presigned URL to share an S3 object
bucket_name: string
object_name: string
expiration: Time in seconds for the presigned URL to remain valid
:return: Presigned URL as string. If error, returns None.
"""
# Generate a presigned URL for the S3 object
try:
response = s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket_name,
'Key': object_name},
ExpiresIn=expiration)
except ClientError as e:
print(e)
return None
# The response contains the presigned URL
return response
## Listing all objects in the Bucket
def list_files_in_bucket(bucket_name):
"""
List all files in bucket
"""
try:
response = client.list_objects_v2(
Bucket= bucket_name
)
except ClientError as e:
print(e)
return None
for key in response['Contents']:
print('-> {}'.format(key['Key']))
def download_from_s3(bucket_name, object_name, file_name):
try:
s3_client.download_file(bucket_name, object_name, file_name)
except ClientError as e:
print(e)
return None
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,267
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/views.py
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from storage.helper import Storage
class FileUpload(APIView):
permission_classes = [IsAuthenticated]
def post(self, request):
'''
basic funtion to upload the file
need to pass the file upload location (mainly the bucket name) too
'''
storage = None
# check name data is provided or not
if request.POST:
# mount the bucket specified in request
storage = Storage(request.POST['name'])
else:
# mount the default bucket
storage = Storage()
# check file data is provided
if request.FILES:
# file data is provided then proced the upload the file
file_data = request.FILES['file']
# upload the file
file_details = storage.save_file(request=request, file=file_data)
# return the file hash
if file_details['status']:
return Response({"file": file_details['file'].hashed_name}, status=200)
else:
return Response({"error": file_details['error']}, status=400)
else:
# file is not provided so return the error code
return Response({"error": "Unsupported Media Type"}, status=415)
# By default return the method is not allowed as something is wrong
return Response({"error": "Method Not allowed"}, status=405)
class FileDownload(APIView):
'''
Download the file from the server based on the fid which is provided
'''
def get(self, request, fid):
storage = Storage()
storage.get_object_url(object_id=fid)
return Response({"file": 'ok'}, status=200)
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,268
|
trunkboy/django-multi-storage
|
refs/heads/main
|
/storage/models.py
|
from django.contrib.auth import get_user_model
from django.contrib.gis.db import models
class FileStorage(models.Model):
'''
Stored the data and location related to the files uploaded to the buckets
'''
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
hashed_name = models.CharField(max_length=255, unique=True) # unique name for the file after hashing use for identification
original_name = models.CharField(max_length=255)
original_meta_data = models.TextField(null=True)
original_size = models.IntegerField(null=True, help_text='The size, in bytes, of the uploaded file.')
original_charset = models.CharField(max_length=255, null=True)
bucket_raw = models.CharField(max_length=255, null=True)
bucket_name = models.CharField(max_length=255, null=True)
server_reply = models.TextField(null=True) # server reply after storing the object
class FileTransactionLogs(models.Model):
'''
Stored the file storage and its transaction logs
'''
file = models.ForeignKey(FileStorage, on_delete=models.CASCADE)
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
remark = models.TextField(null=True)
|
{"/storage/helper.py": ["/storage/models.py"], "/storage/schema/query.py": ["/storage/schema/node.py"], "/storage/urls.py": ["/storage/views.py"], "/storage/schema/node.py": ["/storage/models.py", "/storage/helper.py"], "/storage/views.py": ["/storage/helper.py"]}
|
17,309
|
spking/p-skitakall
|
refs/heads/master
|
/main.py
|
# Sölvi Scheving Pálsson #
# 04/02/2020 #
import botprofile
import classes
|
{"/main.py": ["/botprofile.py", "/classes.py"]}
|
17,310
|
spking/p-skitakall
|
refs/heads/master
|
/botprofile.py
|
# Solvi Scheving Pálsson#
# Bot logic file #
# 03/02/2019 #
class Bot:
def __init__(self, d):
self.deck = d
|
{"/main.py": ["/botprofile.py", "/classes.py"]}
|
17,311
|
spking/p-skitakall
|
refs/heads/master
|
/classes.py
|
# Skítakall - Classes Skrá #
# Sölvi Scheving Pálsson #
# 2. Febrúar 2020 #
import random
import time
from termcolor import cprint
class Deck:
def __init__(self, t, n):
self.tegund = t
self.numer = n
def __str__(self):
return self.tegund + "," + str(self.numer)
def createdeck(t, listi):
for x in range(1, 14):
t1 = Deck(t, x)
listi.append(t1)
return listi
|
{"/main.py": ["/botprofile.py", "/classes.py"]}
|
17,317
|
mdberkey/Discord_Convo_Bots
|
refs/heads/master
|
/MarkovGeneration.py
|
import random
import ast
# reads dictionary.txt
dictionaryFile = open("dictionary.txt", "r")
contents = dictionaryFile.read()
dictionary = ast.literal_eval(contents)
dictionaryFile.close()
# generates a unique string using the markov chain values
class generation:
def main(self):
lastWord = '3u9fh27d31r' # random string unlikely to appear in dictionary
finalAnswer = ''
counter = 0
randNumA = random.randint(1, 4) # set to about 1-4 sentences to output
for i in range(1, 10000):
newWord = getNextWord(lastWord, dictionary, self)
if newWord.endswith('.') or newWord.endswith('!') or newWord.endswith('?'):
counter += 1
finalAnswer = finalAnswer + " " + newWord
lastWord = newWord
if counter >= randNumA:
break
return finalAnswer
def getNextWord(lastWord, dict, self):
if lastWord not in dict:
# selects a new starting word for a new sentence
newWord = randomSelect(dict)
return newWord
else:
# selects next word from the dictionary's list
newWord = weightedSelect(lastWord, dict, self)
return newWord
# returns a random word from the dictionary
def randomSelect(dict):
randInt = random.randint(0, len(dict) - 1)
newWord = list(dict.keys())[randInt]
return newWord
# returns the next random word in the markov chain using weighted values
def weightedSelect(lastWord, dict, self):
weightedList = []
for word in dict[lastWord]:
weightInt = dict[lastWord][word]
# adds more weight to words that appear in self
if word in self:
weightInt += (weightInt * 10)
for i in range(0, weightInt):
weightedList.append(word)
randInt = random.randint(0, len(weightedList) - 1)
return weightedList[randInt]
|
{"/DiscordConnection.py": ["/GetRandomEmoji.py", "/GoogleSearch.py", "/MarkovGeneration.py", "/TextToSpeech.py"]}
|
17,318
|
mdberkey/Discord_Convo_Bots
|
refs/heads/master
|
/TextToSpeech.py
|
from gtts import gTTS
# returns tts file of text
def TTSus(text):
tts = gTTS(text, lang='en-us') # female english American accent
tts.save("audio.mp3")
def TTSau(text):
tts = gTTS(text, lang='en-au') # female english Australian accent
tts.save("audio.mp3")
|
{"/DiscordConnection.py": ["/GetRandomEmoji.py", "/GoogleSearch.py", "/MarkovGeneration.py", "/TextToSpeech.py"]}
|
17,319
|
mdberkey/Discord_Convo_Bots
|
refs/heads/master
|
/GoogleSearch.py
|
# returns the first link in a google search
def googleSearch(query):
try:
from googlesearch import search
except ImportError:
print("No module named 'google' found")
for result in search(query, tld="com", num=1, stop=1, pause=2):
return result
|
{"/DiscordConnection.py": ["/GetRandomEmoji.py", "/GoogleSearch.py", "/MarkovGeneration.py", "/TextToSpeech.py"]}
|
17,320
|
mdberkey/Discord_Convo_Bots
|
refs/heads/master
|
/GetRandomEmoji.py
|
import ast
import random
# reads emoticons.txt
emoticonsFile = open("emoticons.txt", "r")
contentsB = emoticonsFile.read()
emoticons = ast.literal_eval(contentsB)
emoticonsFile.close()
# returns a random emoji from the string
def getRandomEmoji():
self = 0
randNum = random.randint(0, 279) # number of emojis in emoticons.txt
for i in range(0, len(emoticons)):
if randNum == i:
return emoticons[i]
|
{"/DiscordConnection.py": ["/GetRandomEmoji.py", "/GoogleSearch.py", "/MarkovGeneration.py", "/TextToSpeech.py"]}
|
17,321
|
mdberkey/Discord_Convo_Bots
|
refs/heads/master
|
/DiscordConnection.py
|
import asyncio
import random
import discord
from discord.ext import commands
from GetRandomEmoji import getRandomEmoji
from GoogleSearch import googleSearch
from MarkovGeneration import generation
from TextToSpeech import TTSau
# Gibs the kangaroo bot
TOKEN = 'NzEzODYyODQxNjY4NzMwOTIw.XsmSlA.WMH33p-HEDf_SiR5ifW2krT1SjE'
client = commands.Bot(command_prefix='$')
channel = client.get_channel(339964184223809547) # text channel 716458739074465813
@client.event
async def on_ready():
await client.change_presence(status=discord.Status.online, activity=discord.Game(name='the didgeridoo'))
print('client ready')
@client.command(pass_context=True)
async def start(ctx):
vChannel = client.get_channel(339964184223809548) # voice channel 376441029752258562
channel = client.get_channel(339964184223809547) # text channel 716458739074465813
await vChannel.connect()
await asyncio.sleep(3)
await channel.send("Hello")
@client.command(pass_context=True)
async def end(ctx):
vGuild = ctx.guild
voice_client = vGuild.voice_client
await voice_client.disconnect()
@client.event
async def on_message(message):
if message.author == client.user: # ignores its own messages
return
elif message.content.startswith('$' or 'http'): # ignores commands and links
await client.process_commands(message)
return
elif message.author.id == 716121158348439634: # user that bot responds to
response = generation.main(message.content)
channel = client.get_channel(716458739074465813) # 716458739074465813
# random numbers for reactions and link probabilities
randNumA = random.randint(1, 10)
randNumB = random.randint(1, 10)
# converts text response to voice that is played in the voice channel
TTSau(response)
vGuild = message.guild
voice_client = vGuild.voice_client
voice_client.play(discord.FFmpegPCMAudio('audio.mp3'), after=None)
# adds typing appearance
while voice_client.is_playing():
async with channel.typing():
await asyncio.sleep(1)
await channel.send(response)
# sends a related link about 20% of the time
if randNumA > 7:
link = googleSearch(response)
if link:
await channel.send(link)
# reacts to the previous message with emoticons 40%
if randNumB > 6:
for i in range(random.randint(1, 5)):
await message.add_reaction(getRandomEmoji())
# allows other commands to be used
await client.process_commands(message)
client.run(TOKEN)
|
{"/DiscordConnection.py": ["/GetRandomEmoji.py", "/GoogleSearch.py", "/MarkovGeneration.py", "/TextToSpeech.py"]}
|
17,322
|
mdberkey/Discord_Convo_Bots
|
refs/heads/master
|
/SetupFiles/LearningDictionary.py
|
file = open("Source_Words.txt")
string = file.read()
# creates dictionary of words in Markov Chain fashion
def learn(dict, input):
tokens = input.split(" ")
for i in range(0, len(tokens) - 1):
currentWord = tokens[i]
nextWord = tokens[i + 1]
if currentWord not in dict:
# adds new word to dictionary
dict[currentWord] = {nextWord: 1}
else:
# word is already in dictionary
allNextWords = dict[currentWord]
if nextWord not in allNextWords:
# adds new future state word to dictionary
dict[currentWord][nextWord] = 1
else:
# increases frequency to state word already in dictionary
dict[currentWord][nextWord] = dict[currentWord][nextWord] + 1
return dict
dictionary = {}
dictionary = learn(dictionary, string)
print(dictionary)
|
{"/DiscordConnection.py": ["/GetRandomEmoji.py", "/GoogleSearch.py", "/MarkovGeneration.py", "/TextToSpeech.py"]}
|
17,325
|
sjquant/sanic-redis
|
refs/heads/master
|
/sanic_redis/__init__.py
|
from .core import SanicRedis
__all__ = ['SanicRedis']
__version__ = '0.1.0'
|
{"/sanic_redis/__init__.py": ["/sanic_redis/core.py"]}
|
17,326
|
sjquant/sanic-redis
|
refs/heads/master
|
/setup.py
|
from setuptools import setup
setup(
name='sanic-redis',
version='0.1.1',
description='Adds redis support to sanic .',
long_description='sanic-redis is a sanic framework extension which adds support for the redis.',
url='https://github.com/strahe/sanic-redis',
author='strahe',
license='MIT',
packages=['sanic_redis'],
install_requires=('sanic', 'aioredis'),
zip_safe=False,
keywords=['sanic', 'redis', 'aioredis'],
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP :: Session',
],
)
|
{"/sanic_redis/__init__.py": ["/sanic_redis/core.py"]}
|
17,327
|
sjquant/sanic-redis
|
refs/heads/master
|
/sanic_redis/core.py
|
from sanic import Sanic
from aioredis import create_redis_pool
class SanicRedis:
def __init__(self, app: Sanic=None, redis_config: dict=None):
self.app = app
self.config = redis_config
self.conn = None
if app:
self.init_app(app=app)
def init_app(self, app: Sanic, redis_config: dict=None):
self.app = app
self.config = redis_config
@app.listener('before_server_start')
async def aio_redis_configure(_app, loop):
_c = dict(loop=loop)
if self.config:
config = self.config
else:
config = _app.config.get('REDIS')
for key in ['address', 'db', 'password', 'ssl', 'encoding', 'minsize',
'maxsize', 'timeout']:
if key in config:
_c.update({key: config.get(key)})
_redis = await create_redis_pool(**_c)
_app.redis = _redis
self.conn = _redis
@app.listener('after_server_stop')
async def close_redis(_app, _loop):
_app.redis.close()
await _app.redis.wait_closed()
|
{"/sanic_redis/__init__.py": ["/sanic_redis/core.py"]}
|
17,335
|
lukaskubis/darkskylib
|
refs/heads/master
|
/test/__init__.py
|
import os
import pickle
import unittest
import darksky
import requests
class TestPickle(unittest.TestCase):
""" Forecast pickling """
@classmethod
def setUpClass(cls):
def mock_request_get(*args, **kwargs):
response = type('Response', (object,), {})
response.headers = {}
response.status_code = 200
with open('./test/response.json', 'r') as fixture:
response.text = fixture.read()
return response
cls.request_get = requests.get
requests.get = mock_request_get
@classmethod
def tearDownClass(cls):
os.system('find . -name "*.pickle" -exec rm {} \;')
requests.get = cls.request_get
def test_pickle(self):
location = -77.843906, 166.686520 # McMurdo station, antarctica
# This doesn't actually hit the API since we mocked out the request lib
forecast = darksky.forecast('test_key', *location)
# Make sure we got the right data, via our mock
self.assertEqual(forecast.currently.temperature, -23.58)
# Ensure pickling by actually pickling
with open('./forecast.pickle', 'wb') as outfile:
pickle.dump(forecast, outfile)
# Check that the file exists
self.assertTrue(os.path.exists('./forecast.pickle'))
def test_unpickle(self):
# Check that the previous test, which writes out the pickle, succeeded
self.assertTrue(os.path.exists('./forecast.pickle'))
# Load the pickle file
with open('./forecast.pickle', 'rb') as infile:
forecast = pickle.load(infile)
# Make sure it loaded right
self.assertTrue(forecast)
self.assertEqual(forecast.currently.temperature, -23.58)
if __name__ == '__main__':
unittest.main()
|
{"/test/__init__.py": ["/darksky/__init__.py"], "/darksky/__init__.py": ["/darksky/forecast.py"], "/darksky/forecast.py": ["/darksky/data.py"]}
|
17,336
|
lukaskubis/darkskylib
|
refs/heads/master
|
/darksky/__init__.py
|
# __init__.py
from .forecast import Forecast
def forecast(key, latitude, longitude, time=None, timeout=None, **queries):
return Forecast(key, latitude, longitude, time, timeout, **queries)
|
{"/test/__init__.py": ["/darksky/__init__.py"], "/darksky/__init__.py": ["/darksky/forecast.py"], "/darksky/forecast.py": ["/darksky/data.py"]}
|
17,337
|
lukaskubis/darkskylib
|
refs/heads/master
|
/setup.py
|
import os
from setuptools import setup
# use pandoc to convert
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst')) as f:
README = f.read()
setup(name='darkskylib',
version='0.3.91',
description='The Dark Sky API wrapper',
long_description=README,
url='https://github.com/lukaskubis/darkskylib',
author='Lukas Kubis',
author_email='contact@lukaskubis.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Topic :: Home Automation',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent',
],
keywords='darksky dark-sky dark sky forecast home weather home-weather weather-station',
packages=['darksky'],
install_requires=[
'future',
'requests',
],
test_suite='test',
zip_safe=True
)
|
{"/test/__init__.py": ["/darksky/__init__.py"], "/darksky/__init__.py": ["/darksky/forecast.py"], "/darksky/forecast.py": ["/darksky/data.py"]}
|
17,338
|
lukaskubis/darkskylib
|
refs/heads/master
|
/darksky/forecast.py
|
# forecast.py
from __future__ import print_function
from builtins import super
import json
import sys
import requests
from .data import DataPoint
_API_URL = 'https://api.darksky.net/forecast'
class Forecast(DataPoint):
def __init__(self, key, latitude, longitude, time=None, timeout=None, **queries):
self._parameters = dict(key=key, latitude=latitude, longitude=longitude, time=time)
self.refresh(timeout, **queries)
def __setattr__(self, key, value):
if key in ('_queries', '_parameters', '_data'):
return object.__setattr__(self, key, value)
return super().__setattr__(key, value)
def __getattr__(self, key):
currently = object.__getattribute__(self, 'currently')
_data = object.__getattribute__(currently, '_data')
if key in _data.keys():
return _data[key]
return object.__getattribute__(self, key)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
del self
@property
def url(self):
time = self._parameters['time']
timestr = ',{}'.format(time) if time else ''
uri_format = '{url}/{key}/{latitude},{longitude}{timestr}'
return uri_format.format(url=_API_URL, timestr=timestr, **self._parameters)
def refresh(self, timeout=None, **queries):
self._queries = queries
self.timeout = timeout
request_params = {
'params': self._queries,
'headers': {'Accept-Encoding': 'gzip'},
'timeout': timeout
}
response = requests.get(self.url, **request_params)
self.response_headers = response.headers
if response.status_code is not 200:
raise requests.exceptions.HTTPError('Bad response')
return super().__init__(json.loads(response.text))
|
{"/test/__init__.py": ["/darksky/__init__.py"], "/darksky/__init__.py": ["/darksky/forecast.py"], "/darksky/forecast.py": ["/darksky/data.py"]}
|
17,339
|
lukaskubis/darkskylib
|
refs/heads/master
|
/darksky/data.py
|
# data.py
class DataPoint(object):
def __init__(self, data):
self._data = data
if isinstance(self._data, dict):
for name, val in self._data.items():
setattr(self, name, val)
if isinstance(self._data, list):
setattr(self, 'data', self._data)
def __setattr__(self, name, val):
def setval(new_val=None):
return object.__setattr__(self, name, new_val if new_val else val)
# regular value
if not isinstance(val, (list, dict)) or name == '_data':
return setval()
# set specific data handlers
if name in ('alerts', 'flags'):
return setval(eval(name.capitalize())(val))
# data
if isinstance(val, list):
val = [DataPoint(v) if isinstance(v, dict) else v for v in val]
return setval(val)
# set general data handlers
setval(DataBlock(val) if 'data' in val.keys() else DataPoint(val))
def __getitem__(self, key):
return self._data[key]
def __len__(self):
return len(self._data)
class DataBlock(DataPoint):
def __iter__(self):
return self.data.__iter__()
def __getitem__(self, index):
# keys in darksky API datablocks are always str
if isinstance(index, str):
return self._data[index]
return self.data.__getitem__(index)
def __len__(self):
return self.data.__len__()
class Flags(DataPoint):
def __setattr__(self, name, value):
return object.__setattr__(self, name.replace('-', '_'), value)
class Alerts(DataBlock):
pass
|
{"/test/__init__.py": ["/darksky/__init__.py"], "/darksky/__init__.py": ["/darksky/forecast.py"], "/darksky/forecast.py": ["/darksky/data.py"]}
|
17,342
|
jvadebossan/EVA-Editable-Virtual-Assistant
|
refs/heads/master
|
/configs.py
|
import datetime
#padrão ==============================================
version = ('0.1')
creator = str('@jvadebossan')
creator_to_talk = str('@ j v a debossan')
#variaveis de dia e hora
dia1 = datetime.datetime.now()
dia = ('hoje é dia {} do {} de {}'.format(dia1.day, dia1.month, dia1.year))
hr1 = datetime.datetime.now()
hr = hr1.strftime("%H:%M")
#editaveis ========================================
name_to_talk = ('éva') #fonética
name = ('eva') #nome correto
playlist = ('https://open.spotify.com/')
user_name = ('joão vitor')
#listas
l_boas_vindas = [
'o que você precisa, {}'.format(user_name),
'seja bem vindo de volta,{}'.format(user_name),
'olá {}'.format(user_name),
'sim'
'pode falar'
]
l_piadas = [
'o que é um pontinho vermelho no castelo, , , é uma pimenta do reino ',
'o que é um pontinho amarelo na africa, , , é um ieloufante',
'porque a plantinha nao foi atendida no hospital, , , porque só tinha médico de plantão',
'o que o pagodeiro foi fazer na igreja, , , ele foi cantar pa god',
'o que acontece quando chove na inglaterra, , , ela vira inglalama',
]
|
{"/main.py": ["/configs.py"]}
|
17,343
|
jvadebossan/EVA-Editable-Virtual-Assistant
|
refs/heads/master
|
/main.py
|
from configs import *
import speech_recognition as sr
from random import choice
import pyttsx3
import datetime
import sys
from time import sleep as wait
import webbrowser as wb
def intro():
print('=============================================================================================')
print('= version: ' + version, ' ' + name, 'assistant' ' made by' + creator, ' =')
print('=============================================================================================')
frase_intro = ('{} assistente, versão {} feito por {}'.format(name_to_talk, version, creator_to_talk) )
say(frase_intro)
start()
def restart():
print('.')
wait(0.2)
start()
def desligar():
sys.exit()
def reboot():
wait(0.2)
intro()
def say(tosay):
engine = pyttsx3.init()
engine.say(tosay)
engine.runAndWait()
def start():
while True:
r = sr.Recognizer()
with sr.Microphone() as fonte:
print('ouvindo...')
audio = r.listen(fonte)
textc = r.recognize_google(audio, language='pt-br')
text = textc.lower()
print(text)
try:
engine = pyttsx3.init()
#função boas vindas
if text == str(name): #esse ouve o próprio nome dela e responde com um bom dia ou algo do tipo
msg_boas_vindas = choice(l_boas_vindas)
say(msg_boas_vindas)
#função tocar música
elif 'playlist' in text:#abre a playlist de muscia do usuário, pre definida em "configs"
wb.open(playlist, new=2)
#função dia
elif 'dia' in text: #fala o dia
print(dia)
say(dia)
#função horas
elif 'horas' in text: #fala as horas
print(hr)
say(hr)
#função piadas
elif 'piada' in text: # lança a braba
joke = (choice(l_piadas))
joke = choice(l_piadas)
print (joke)
say(joke)
#função desligar
elif 'desligar' in text: #desliga o sistema
desligando = str('desligando em 3, 2, 1')
print (desligando)
engine.say(desligando)
engine.runAndWait()
desligar()
#função reiniciar
elif 'reiniciar' in text: #reinicia o sistema
reiniciando = str('reiniciando em 3, 2, 1')
print (reiniciando)
engine.say(reiniciando)
engine.runAndWait()
reboot()
elif 'fale' in text:
texto_falar = text.replace('fale', '')
say(texto_falar)
elif 'pesquis' in text:
site_pesquisar = text.replace('pesquis', '')
say('pesquisando ' + site_pesquisar)
wb.open('https://www.google.com/search?client=opera-gx&hs=5GZ&sxsrf=ALeKk02LWQxX_lhfnlTF6lCi_LYm0x5kqg%3A1601686367378&ei=X8t3X_LeFpPA5OUP0e6-WA&q={}&oq={}&gs_lcp=CgZwc3ktYWIQAzIHCAAQChDLATIECAAQHjoHCCMQ6gIQJzoECCMQJzoFCAAQsQM6CAguELEDEIMBOgIIADoFCC4QsQM6BAgAEAo6BggAEAoQHlD_EVjVH2COImgBcAB4AIABsgKIAdMJkgEHMC41LjAuMZgBAKABAaoBB2d3cy13aXqwAQrAAQE&sclient=psy-ab&ved=0ahUKEwiyiuDXmpfsAhUTILkGHVG3DwsQ4dUDCAw&uact=5'.format(site_pesquisar, site_pesquisar), new=2)
elif text not in comandos:
restart()
except:
restart()
intro()
|
{"/main.py": ["/configs.py"]}
|
17,344
|
jvadebossan/EVA-Editable-Virtual-Assistant
|
refs/heads/master
|
/testes.py
|
import webbrowser
a = input('say something: ')
if ('open') in a:
if ('google') in a:
webbrowser.open('www.google.com.br', new=2)
elif ('youtube') in a:
webbrowser.open('www.youtube.com', new=2)
elif ('kahoot') in a:
webbrowser.open('kahoot.it', new=2)
else:
print('aplicativo não encontrado')
elif ('pesquisar') in a:
print(a)
webbrowser.open('https://www.google.com/search?q={}&oq={}&aqs=chrome.0.0j46j0l6.1656j1j15&sourceid=chrome&ie=UTF-8'.format(a, a), new=2)
else:
print('comando ainda não suportado')
|
{"/main.py": ["/configs.py"]}
|
17,345
|
DiegoAscanio/hostnames_provider
|
refs/heads/master
|
/hostnames_provider/hostnames/models.py
|
'''
@file models.py
@lastmod 9/11/2016
'''
# Importacoes
from django.db import models
import django
from django import forms
# Area de criacao de classes
# Classe do Host
class Host(models.Model):
'''
Classe de Host: Armazena um hostname, e seus Enderecos Mac e IP
'''
hostname = models.CharField(max_length=15)
mac_address = models.CharField(max_length=17)
ip_address = models.CharField(primary_key=True, unique=True, max_length=15)
def __str__(self):
return self.hostname+'|'+self.mac_address+'|'+self.ip_address
|
{"/hostnames_provider/hostnames/serializers.py": ["/hostnames_provider/hostnames/models.py"]}
|
17,346
|
DiegoAscanio/hostnames_provider
|
refs/heads/master
|
/hostnames_provider/hostnames/HostList.py
|
'''
@file HostList.py
@lastmod 9/11/2016
'''
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
class HostList(APIView):
"""
List all hosts, or create a new host.
"""
def get(self, request, format=None):
hosts = Host.objects.all()
serializer = HostSerializer(hosts, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = HostSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
{"/hostnames_provider/hostnames/serializers.py": ["/hostnames_provider/hostnames/models.py"]}
|
17,347
|
DiegoAscanio/hostnames_provider
|
refs/heads/master
|
/hostnames_provider/hostnames/apps.py
|
from django.apps import AppConfig
class HostnamesConfig(AppConfig):
name = 'hostnames'
|
{"/hostnames_provider/hostnames/serializers.py": ["/hostnames_provider/hostnames/models.py"]}
|
17,348
|
DiegoAscanio/hostnames_provider
|
refs/heads/master
|
/hostnames_provider/hostnames/urls.py
|
'''
@file urls.py
@lastmod 8/11/2016
'''
# Importacoes
from django.conf.urls import include, url
from django.contrib import admin
import django.contrib.auth.views
from views import HostDetail
from . import views
from . import models
# Area de criacao de urls
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^ajuda$', views.help, name='help'),
url(r'^sobre$', views.sobre, name='sobre'),
url(r'^contato$', views.contato, name='contato'),
url(r'^perfil$', views.profile, name='profile'),
url(r'^opcoes$', views.options, name='options'),
url(r'^rest/search__ip__address/(?P<ip>[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})$', views.host_detail_ipaddress),
url(r'^rest/search__mac__address/(?P<mac>[0-9a-f]{2}\:[0-9a-f]{2}\:[0-9a-f]{2}\:[0-9a-f]{2}\:[0-9a-f]{2}\:[0-9a-f]{2})$', views.host_detail_macaddress),
url(r'^rest/list/$',views.HostList.as_view(), name='restlist'),
url(r'^rest/detail/(?P<pk>[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})$',views.HostDetail.as_view(), name='restdetail'),
url(r'^error/$', views.error, name='error'),
url(r'^accounts/login/$', django.contrib.auth.views.login, name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout',{'next_page': '/'}, name='admin'),
url(r'^admin/', include(admin.site.urls)),
url(r'^hostname/$', views.hostname, name='hostname'),
url(r'^create/$', views.create, name='create'),
url(r'^upload/$', views.upload, name='upload'),
url(r'^download/$', views.download, name='download'),
url(r'^update/$', views.update, name='update'),
url(r'^retrieve/$', views.retrieve, name='retrieve'),
url(r'^list/$', views.list, name='list'),
url(r'^pesquisar/$', views.pesquisar, name='pesquisar'),
url(r'^delete/$', views.delete, name='delete'),
]
|
{"/hostnames_provider/hostnames/serializers.py": ["/hostnames_provider/hostnames/models.py"]}
|
17,349
|
DiegoAscanio/hostnames_provider
|
refs/heads/master
|
/hostnames_provider/convert.py
|
csv_file = request.FILES['filename'].name
txt_file = 'dados.txt'
text_list = []
with open(csv_file, "r") as my_input_file:
for line in my_input_file:
line = line.split(",", 2)
text_list.append(" ".join(line))
with open(txt_file, "w") as my_output_file:
for line in text_list:
my_output_file.write(line)
print('File Successfully written.')
|
{"/hostnames_provider/hostnames/serializers.py": ["/hostnames_provider/hostnames/models.py"]}
|
17,350
|
DiegoAscanio/hostnames_provider
|
refs/heads/master
|
/hostnames_provider/feeder.py
|
import sys, getopt, csv
import django
def feed(file_name):
django.setup()
from hostnames.models import Host
with open(file_name, 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter='|')
for h,m,i in spamreader:
#print(hostname,mac_address,ip_address)
h = Host(hostname=h,mac_address=m,ip_address=i)
h.save()
feed(sys.argv[1]) if __name__ == "__main__" and len(sys.argv) == 2 else print('Usage: python feeder.py <csv_file>')
|
{"/hostnames_provider/hostnames/serializers.py": ["/hostnames_provider/hostnames/models.py"]}
|
17,351
|
DiegoAscanio/hostnames_provider
|
refs/heads/master
|
/hostnames_provider/hostnames/views.py
|
'''
@file views.py
@lastmod 9/11/2016
'''
#Importacoes
from django.db.models import Q
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader, RequestContext
from django.template.context_processors import csrf
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.utils.encoding import smart_str
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import generics
from hostnames.forms import ContactForm
from django.core.mail import EmailMessage
from django.shortcuts import redirect
from django.template import Context
from django.template.loader import get_template
from django.core.mail import EmailMultiAlternatives
import csv, pdb, json
from .models import Host
from .forms import *
from .serializers import HostSerializer
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from django.utils.six import BytesIO
# Area de criacao de views
#Metodo padrao para erro
def showError(request,mensagem):
'''
Retorna uma pagina de erro contendo os dados passados na mensagem
'''
c = RequestContext (request, {
'mensagem' : mensagem
})
templateError = loader.get_template('hostnames/error.html')
return HttpResponse(templateError.render(c))
#Renderiza o conteudo de um HttpResponse em JSONResponse
class JSONResponse(HttpResponse):
'''
HttpResponse que renderiza o conteudo em formato JSON
'''
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
@csrf_exempt
def host_list(request, format=None):
'''
Lista todos os hosts, ou devolve um host especifico
'''
if request.method == 'GET':
hosts = Host.objects.all()
serializer = HostSerializer(hosts, many=True)
return JSONResponse(serializer.data)
@csrf_exempt
def host_detail_ipaddress(request, ip, format=None):
'''
Retorna um JSON com o IP requisitado
'''
if request.method == 'GET':
try:
host = Host.objects.filter(ip_address__icontains = ip)
except Host.DoesNotExist:
return error
serializer = HostSerializer(data = host,many=True)
serializer.is_valid()
content = JSONRenderer().render(serializer.data)
return JSONResponse(serializer.data) if (len(serializer.data) != 0) else showError(request,'ERRO: IP INEXISTENTE!')
@csrf_exempt
def host_detail_macaddress(request, mac, format=None):
'''
Retorna um JSON com o MAC requisitado
'''
if request.method == 'GET':
try:
host = Host.objects.filter(mac_address__icontains = mac)
except Host.DoesNotExist:
return error
serializer = HostSerializer(data = host,many=True)
serializer.is_valid()
content = JSONRenderer().render(serializer.data)
return JSONResponse(serializer.data) if (len(serializer.data) != 0) else showError(request,'ERRO: MAC INEXISTENTE!')
@csrf_exempt
def host_detail_pesquisar(request, pesquisa, ordem):
'''
Retorna um JSON com o resultado da pesquisa (Geral ou Especifica)
'''
if request.method == 'GET':
if('0' in ordem):
if('h' in ordem):
ordem = 'hostname'
elif('m' in ordem):
ordem = 'mac_address'
elif('i' in ordem):
ordem = 'ip_address'
else:
if('h' in ordem):
ordem = '-hostname'
elif('m' in ordem):
ordem = '-mac_address'
elif('i' in ordem):
ordem = '-ip_address'
try:
if (pesquisa != None):
if(len(pesquisa) == 1):
host = Host.objects.filter(Q(hostname__icontains = pesquisa[0]) | Q(mac_address__icontains = pesquisa[0]) | Q(ip_address__icontains = pesquisa[0])).order_by(ordem)
for h in host:
h.hostname = h.hostname.replace(str(pesquisa[0]), "<b style='color:red'>"+str(pesquisa[0])+"</b>")
h.mac_address = h.mac_address.replace(str(pesquisa[0]), "<b style='color:red'>"+str(pesquisa[0])+"</b>")
h.ip_address = h.ip_address.replace(str(pesquisa[0]), "<b style='color:red'>"+str(pesquisa[0])+"</b>")
else:
host = Host.objects.filter(hostname__icontains = pesquisa[0],mac_address__icontains = pesquisa[1],ip_address__icontains = pesquisa[2]).order_by(ordem)
for h in host:
h.hostname = h.hostname.replace(str(pesquisa[0]), "<b style='color:red'>"+str(pesquisa[0])+"</b>")
h.mac_address = h.mac_address.replace(str(pesquisa[1]), "<b style='color:red'>"+str(pesquisa[1])+"</b>")
h.ip_address = h.ip_address.replace(str(pesquisa[2]), "<b style='color:red'>"+str(pesquisa[2])+"</b>")
else:
host = Host.objects.all()
except Host.DoesNotExist:
return error
serializer = HostSerializer(data = host,many=True)
serializer.is_valid()
content = JSONRenderer().render(serializer.data)
return content
def hostname(request):
if request.method == 'GET':
#instancia objeto do tipo HostForm contendo os dados do form recebido pelo GET
form = HostForm(request.GET)
#testa se o form e valido
if form.is_valid():
#obtem os dados recebidos no form
mac = form.cleaned_data['mac']
ip = form.cleaned_data['ip']
#prepara um objeto do tipo Host para ser enviado por GET
h = Host.objects.get(ip_address=ip, mac_address=mac)
#envia
return HttpResponse(h.hostname)
else:
#envia o proprio form
return HttpResponse(form)
#View do delete
def delete(request):
'''
View da Exclusao de Host: Deleta um ou mais Hosts pelo ID
'''
#formalidades de seguranca
c = {}
c.update(csrf(request))
if request.method == 'POST':
#instancia um objeto DeleteForm contendo o request passado por POST
form = DeleteForm(request.POST)
#testa se o form e valido
if form.is_valid():
# pega a(s) id(s) do(s) host(s) a ser(em) excluido(s)
id = form.cleaned_data['ip']
# separa os hosts pela virgula e armazena em um array (em caso de dois ou mais hosts)
idSplit = id.split(',')
# itera o array de hosts
for i in idSplit:
# deleta host por host
h = Host.objects.filter(ip_address=i).delete()
#retorna redirecionamento para a pagina de listagem
return HttpResponseRedirect("/list")
else:
return HttpResponseRedirect("/create")
#View da tela de erro
def error(request):
'''
View da Tela de Error: Redireciona para a Pagina de Erro, caso seja solicitado um redirecionamento para
ela
'''
if (request.method == 'POST'): # redirecionamento padrao
template = loader.get_template('hostnames/error.html')
c = RequestContext (request, {})
return HttpResponse(template.render(c))
if (request.method == 'GET'):
template = loader.get_template('hostnames/error.html')
c = RequestContext (request, {})
return HttpResponse(template.render(c))
#View do index
def index(request):
'''
View da Tela de Index: Redireciona para a Pagina de Insercao de Host, caso o Usuario esteja logado.
Caso contrario, carrega o Template do Index
'''
if (isUserAutenticated(request)):
return HttpResponseRedirect("/create")
else:
template = loader.get_template('hostnames/index.html')
c = RequestContext (request, {})
return HttpResponse(template.render(c))
#View do contato
def contato(request):
# new logic!
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
first_name = request.POST.get('first_name', '')
last_name = request.POST.get('last_name', '')
email = request.POST.get('email', '')
phone = request.POST.get('phone', '')
comment = request.POST.get('comment', '')
# Email the profile with the
# contact information
template = get_template('contact_template.txt')
context = Context({
'first_name': first_name,
'last_name': last_name,
'email': email,
'phone': phone,
'comment': comment,
})
content = template.render(context)
email = EmailMessage(
"New contact form submission",
content,
"Your website" +'',
['formularioscefetcontagem@gmail.com'],
headers = {'Reply-To': email }
)
#pdb.set_trace()
email.send()
return HttpResponseRedirect("/create")
else:
return HttpResponseRedirect("/list")
if request.method == 'GET':
return render(request, 'hostnames/contato.html', {
#'form': form_class,
})
#View do opcoes
def options(request):
template = loader.get_template('hostnames/opcoes.html')
c = RequestContext (request, {})
return HttpResponse(template.render(c))
#View do perfil
def profile(request):
template = loader.get_template('hostnames/perfil.html')
c = RequestContext (request, {})
return HttpResponse(template.render(c))
#View da ajuda
def help(request):
template = loader.get_template('hostnames/ajuda.html')
c = RequestContext (request, {})
return HttpResponse(template.render(c))
#View do sobre
def sobre(request):
template = loader.get_template('hostnames/sobre.html')
c = RequestContext (request, {})
return HttpResponse(template.render(c))
#View da tela de login (nao mais utilizado)
def login(request):
if (request.method == 'GET'): # redirecionamento padrao
#carrega o template da tela de login
template = loader.get_template('registration/login.html')
#carrega o contexto
c = RequestContext (request, {})
#retorna o template a ser renderizado
return HttpResponse(template.render(c))
if (request.method == 'POST'):
#formalidades de seguranca
c = {}
c.update(csrf(request))
#recebe o form
form = LoginForm(request.POST)
#redireciona caso esteja autenticado
if isUserAutenticated(form) is not None:
#redireciona para o template da list
return HttpResponseRedirect("/list")
else:
#redireciona para o template do index
return HttpResponseRedirect("/")
#funcao para verificar se usuario esta autenticado
def isUserAutenticated(request):
#testa autenticacao do usuario (verifica se ele esta logado ou nao)
answer = True if request.user.is_authenticated() else False
return answer
#processa a operacao de logout
def logout(request):
"""
Desconecta o usuario e envia mensagem
"""
logout(request)
#obtem um redirecionamento por GET para o index
redirect_to = request.REQUEST.get('/', '')
if redirect_to:
netloc = urlparse.urlparse(redirect_to)[1]
# Checagem de seguranca -- nao permite o redirecionamento para um host diferente.
if not (netloc and netloc != request.get_host()):
return HttpResponseRedirect(redirect_to)
#View da tela de create
@login_required
def create(request):
if (request.method == 'GET'): # redirecionamento padrao
#carrega template
template = loader.get_template('hostnames/create.html')
#solicita contexto
c = RequestContext (request, {})
#responde
return HttpResponse(template.render(c))
if (request.method == 'POST'):
#formalidades de seguranca
c = RequestContext (request, {})
c.update(csrf(request))
#recebe os dados do form
form = CreateForm(request.POST)
#testa se o form foi e valido
if (form.is_valid()):
#captura os dados de cada campo do form
hostForm = form.cleaned_data['hostname']
macForm = form.cleaned_data['mac']
ipForm = form.cleaned_data['ip']
#testa duplicidade de ip
hosts = Host.objects.all().filter(ip_address__icontains = ipForm)
if(len(hosts) > 0):
c = RequestContext (request, {
'mensagem' : 'ERRO: IP JA EXISTENTE NO BANCO DE DADOS!'
})
templateError = loader.get_template('hostnames/error.html')
return showError(request,'ERRO: IP JA EXISTENTE NO BANCO DE DADOS!')
else:
#instancia um objeto do tipo Host contendo os dados do form
h = Host(hostname=hostForm,mac_address=macForm,ip_address=ipForm)
#salva o objeto no banco de dados
h.save()
#devolve um redirecionamento para a pagina de listagem
return HttpResponseRedirect("/list")
else:
c = RequestContext (request, {
'mensagem' : 'ERRO: IP FORA DO PADRAO!'
})
templateError = loader.get_template('hostnames/error.html')
return showError(request,'ERRO: IP FORA DO PADRAO!')
#View da tela de upload csv
def upload(request):
#formalidades de seguranca
c = {}
c.update(csrf(request))
#instancia um objeto do time UploadFileForm que recebe o request passado por POST e o arquivo
form = UploadFileForm(request.POST, request.FILES)
#testa se o form e valido
if form.is_valid():
#obtem o arquivo passado na requisicao
filename = request.FILES['file']
#itera no arquivo csv
with open(filename.name, 'r') as csvfile:
#separa os dados utilizando o delimitador | e cria um array os contendo em spamreader
spamreader = csv.reader(csvfile, delimiter='|')
#variavel de controle da quantidade de execucoes do loop
n = 0
#para cada elemento em spamreader armazene seu conteudo em row
for row in spamreader:
#incrementa n ate obter a quantidade total de linhas
n = n + 1
#retorna o ponteiro para o inicio do arquivo
csvfile.seek(0)
#percorre spamreader buscando os dados desejados
for (h,m,i) in spamreader:
#cria um objeto do tipo Host contendo os dados obtidos
h = Host(hostname = h, mac_address = m, ip_address = i)
#salva no banco de dados o objeto com os dados obtidos
h.save()
#decrementa o contador da quantidade de execucoes
n = n - 1
#caso ja tenha feito todos, entao pare
if(n == 1):
break
#redireciona para a pagina de listagem
return HttpResponseRedirect("/list")
else:
#redireciona para a pagina de listagem
return HttpResponseRedirect("/list")
#View da tela de upload
def download(request):
# Cria o objeto Http Response com o cabecalho de CSV apropriado.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="hostnames.csv"'
# Pega todos os hosts do banco de dadosz
hosts = Host.objects.all()
writer = csv.writer(response)
# Percorre todos os hosts
for h in hosts:
# Escreve uma linha no arquivo .csv de acordo com o padrao
# <<hostname|mac|ip>>
writer.writerow([
smart_str(h.hostname+"|"+h.mac_address+"|"+h.ip_address),
])
# Retorna a resposta em formato de arquivo, possibilitando o download
return response
#View da tela de update
@login_required
def update(request):
if (request.method == 'GET'): # redirecionamento padrao
#resgata o ip passado por GET
ip = request.GET['ip']
#carrega o template da tela de update
template = loader.get_template('hostnames/update.html')
#passa no contexto o ip do host desejado
c = RequestContext(request, {
'host' : Host.objects.get(ip_address=ip),
})
#responde com o template e o renderiza
return HttpResponse(template.render(c))
if (request.method == 'POST'):
#formalidades de seguranca
c = {}
c.update(csrf(request))
#instacia um objeto EditForm contendo os dados de atualizacao
form = EditForm(request.POST)
#testa se o form e valido
if form.is_valid():
#instancia um objeto do tipo Host que contem o id passado no form
h = Host.objects.get(id=form.cleaned_data['id'])
#seta o valor de hostname com o valor passado no form
h.hostname = form.cleaned_data['hostname']
#seta o valor de mac_adress com o valor passado no form
h.mac_address = form.cleaned_data['mac']
#seta o valor do ip_adress com o valor passado no form
h.ip_address = form.cleaned_data['ip']
#salva no banco de dados o objeto atualizado
h.save()
#redireciona para a pagina de listagem
return HttpResponseRedirect("/list")
else:
return HttpResponseRedirect("/list")
#View da tela de retrieve
@login_required
def retrieve(request):
#captura o ip passado por GET
ip = request.GET['ip']
#carrega o template do retrieve
template = loader.get_template('hostnames/retrieve.html')
#requisita o contexto passando um objeto host contendo o ip
c = RequestContext(request, {
'host' : Host.objects.get(ip_address=ip),
})
#responde com o template renderizado
return HttpResponse(template.render(c))
#View da pagina de listagem
@login_required
def list(request):
page = request.GET.get('page', 1)
try:
pagination = request.GET.get('pagination', 10)
except:
pagination = 10
try:
ordem = request.GET['ordem']
if('0' in ordem):
if('h' in ordem):
host_list = Host.objects.filter().order_by('hostname')
if('m' in ordem):
host_list = Host.objects.filter().order_by('mac_address')
if('i' in ordem):
host_list = Host.objects.filter().order_by('ip_address')
else:
if('h' in ordem):
host_list = Host.objects.filter().order_by('-hostname')
if('m' in ordem):
host_list = Host.objects.filter().order_by('-mac_address')
if('i' in ordem):
host_list = Host.objects.filter().order_by('-ip_address')
except:
ordem = '0h'
host_list = Host.objects.filter().order_by('hostname')
if(pagination == 'todos'):
paginator = Paginator(host_list, len(host_list))
else:
paginator = Paginator(host_list, pagination)
try:
hosts = paginator.page(page)
except PageNotAnInteger:
hosts = paginator.page(1)
except EmptyPage:
hosts = paginator.page(paginator.num_pages)
#carrega o template da list
template = loader.get_template('hostnames/list.html')
if('0' in ordem):
proxOrdem = '1'
else:
proxOrdem = '0'
print(pagination)
#solicita um contexto contendo todos os objetos do tipo Host
c = RequestContext(request, {
'hosts' : hosts,
'ordem': ordem,
'proxOrdem': proxOrdem,
'paginacao': pagination
})
#responde com o template renderizado
return HttpResponse(template.render(c))
# Ordena os campos da pesquisa de acordo com a ordem - Hostname, Mac, Ip
def ordenaPesquisa(pesquisa):
pesquisaOrdenada = ['','','']
if (pesquisa != None):
for i in pesquisa:
if ("hostname" in i):
pesquisaOrdenada[0] = i
if ("mac" in i):
pesquisaOrdenada[1] = i
if ("ip" in i):
pesquisaOrdenada[2] = i
pesquisaOrdenada = separaDados(pesquisaOrdenada)
else:
pesquisaOrdenada = None
return pesquisaOrdenada
# Cria um Array com os dados da pesquisa
def separaDados(pesquisa):
listaDados = ['','','']
if(pesquisa[0] is not ''):
listaDados[0] = pesquisa[0][9:]
if(pesquisa[1] is not ''):
listaDados[1] = pesquisa[1][4:]
if(pesquisa[2] is not ''):
listaDados[2] = pesquisa[2][3:]
return listaDados
import operator
# View do Resultado da Pesquisa
def pesquisar(request):
if request.method == 'GET':
try:
pagination = request.GET.get('pagination', 10)
except:
pagination = 10
#carrega o template da list
pesquisa = request.GET['pesquisa']
pesquisa = pesquisa.replace(' ', '')
try:
ordem = request.GET['ordem']
except:
ordem = '0h'
pesquisaSplit = pesquisa.split("|")
if('|' not in pesquisa and '=' not in pesquisa):
if(len(pesquisa) == 1):
pesquisa2 = [pesquisa]
else:
pesquisa2 = [pesquisa]
else:
pesquisa2 = ordenaPesquisa(pesquisaSplit)
content = host_detail_pesquisar(request, pesquisa2, ordem)
stream = BytesIO(content)
data = JSONParser().parse(stream)
#data = data.order_by('+hostname')
serializer = HostSerializer(data=data)
serializer.is_valid()
template = loader.get_template('hostnames/pesquisar.html')
page = request.GET.get('page', 1)
if(pagination == 'todos'):
paginator = Paginator(data, len(data))
else:
paginator = Paginator(data, pagination)
try:
hosts = paginator.page(page)
except PageNotAnInteger:
hosts = paginator.page(1)
except EmptyPage:
hosts = paginator.page(paginator.num_pages)
if('0' in ordem):
proxOrdem= '1'
else:
proxOrdem = '0'
c = RequestContext (request, {
'hosts' : hosts,
'pesquisa': pesquisa,
'ordem': ordem,
'proxOrdem': proxOrdem,
'paginacao': pagination
})
#responde com o template renderizado
return HttpResponse(template.render(c))
class HostList(generics.ListCreateAPIView):
queryset = Host.objects.all()
serializer_class = HostSerializer
class HostDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Host.objects.all()
serializer_class = HostSerializer
|
{"/hostnames_provider/hostnames/serializers.py": ["/hostnames_provider/hostnames/models.py"]}
|
17,352
|
DiegoAscanio/hostnames_provider
|
refs/heads/master
|
/hostnames_provider/hostnames/forms.py
|
'''
@file forms.py
@lastmod 8/11/2016
'''
# Importacoes
from django import forms
# Area de criacao de forms
#Form de Login (nao utilizado no momento)
class LoginForm(forms.Form):
user = forms.CharField(label='user')
password = forms.CharField(label='password')
#Form de Upload de Arquivo .csv
class UploadFileForm(forms.Form):
'''
Classe de Upload de Arquivo: Usada na validacao de um Formulario de Upload
'''
file = forms.FileField(label='file')
#Form de Criacao de Host
class CreateForm(forms.Form):
'''
Classe de Criacao de Host: Usada na validacao de um Host a ser adicionado
'''
hostname = forms.CharField(label='hostname')
mac = forms.CharField(label='mac', max_length=17)
ip = forms.CharField(label='ip', max_length=15)
#Form de Visualizacao de Host
class RetrieveForm(forms.Form):
hostname = forms.CharField(label='hostname')
mac = forms.CharField(label='mac', max_length=17)
ip = forms.CharField(label='ip', max_length=15)
#Form de Edicao de Host
class EditForm(forms.Form):
'''
Classe de Edicao de Host: Usada na validacao de um Host a ser atualizado
'''
id = forms.CharField(label='id')
hostname = forms.CharField(label='hostname')
mac = forms.CharField(label='mac', max_length=17)
ip = forms.CharField(label='ip', max_length=15)
#Form de Exclusao de Host
class DeleteForm(forms.Form):
'''
Classe de Exclusao de Host: Usada na validacao de um Host a ser excluido
'''
ip = forms.CharField(label='ip_address')
#Form do Contato
class ContactForm(forms.Form):
first_name = forms.CharField(label='first_name')
last_name = forms.CharField(label='last_name')
email = forms.EmailField(label='email')
phone = forms.CharField(label='phone')
comment = forms.CharField(label='comment')
|
{"/hostnames_provider/hostnames/serializers.py": ["/hostnames_provider/hostnames/models.py"]}
|
17,353
|
DiegoAscanio/hostnames_provider
|
refs/heads/master
|
/hostnames_provider/extract.py
|
#MODULO DE EXTRAÇÃO DE QQ CSV SEPARADO POR |
lines = [line.rstrip('\n') for line in open('dados.txt')]
print(lines)
n = len(lines)
hostname = ""
mac = ""
ip = ""
i = 0
while(i < n-1) :
array = lines[i].split("|");
hostname = array[0]
mac = array[1]
ip = array[2]
print("HOSTNAME = ", hostname, "MAC = ", mac,"IP = ", ip)
i = i + 1
|
{"/hostnames_provider/hostnames/serializers.py": ["/hostnames_provider/hostnames/models.py"]}
|
17,354
|
DiegoAscanio/hostnames_provider
|
refs/heads/master
|
/hostnames_provider/hostnames/serializers.py
|
'''
@file serializers.py
@lastmod 8/11/2016
'''
from rest_framework import serializers
from .models import Host
from django.db import models
# Serializer da Classe Host
class HostSerializer(serializers.ModelSerializer):
'''
Classe Serializadora de Host: Indica a Classe que servira de modelo e seus determinados campos
'''
class Meta:
model = Host
fields = ('hostname','mac_address','ip_address')
|
{"/hostnames_provider/hostnames/serializers.py": ["/hostnames_provider/hostnames/models.py"]}
|
17,358
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习算法原理及推导/其它/第二章——手写线性回归算法/LinearRegression/UnivariateLinearRegression.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from linear_regression import LinearRegression
data = pd.read_csv('../data/world-happiness-report-2017.csv') # 导入数据
# 得到训练和测试数据,以8:2切分
train_data = data.sample(frac=0.8)
test_data = data.drop(train_data.index)
input_param_name = 'Economy..GDP.per.Capita.' # 特征features
output_param_name = 'Happiness.Score' # 标签label
x_train = train_data[[input_param_name]].values # 构建数据
y_train = train_data[[output_param_name]].values
x_test = test_data[[input_param_name]].values
y_test = test_data[[output_param_name]].values
# 可视化展示 run, 可以看到训练数据和预测数据的分布
plt.scatter(x_train, y_train, label='Train data')
plt.scatter(x_test, y_test, label='Test data')
plt.xlabel(input_param_name)
plt.ylabel(output_param_name)
plt.title('Happy')
plt.legend()
plt.show()
# 训练线性回归模型
num_iterations = 500 # 迭代次数
learning_rate = 0.01 # 学习率
linear_regression = LinearRegression(x_train, y_train) # 初始化模型
(theta, cost_history) = linear_regression.train(learning_rate, num_iterations)
print('开始时的损失:', cost_history[0])
print('训练后的损失:', cost_history[-1])
plt.plot(range(num_iterations), cost_history)
plt.xlabel('Iteration')
plt.ylabel('Cost')
plt.title('GD')
plt.show()
# 测试线性回归模型
predictions_num = 100 # 预测100个
# 拿最大和最小值画一条线
x_predictions = np.linspace(x_train.min(), x_train.max(), predictions_num).reshape(predictions_num, 1)
y_predictions = linear_regression.predict(x_predictions)
plt.scatter(x_train, y_train, label='Train data')
plt.scatter(x_test, y_test, label='Test data')
plt.plot(x_predictions, y_predictions, 'r', label='Prediction')
plt.xlabel(input_param_name)
plt.ylabel(output_param_name)
plt.title('Happy')
plt.legend()
plt.show()
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,359
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习算法原理及推导/其它/第二章——手写线性回归算法/util/features/prepare_for_training.py
|
"""Prepares the dataset for training"""
import numpy as np
from .normalize import normalize
from .generate_polynomials import generate_polynomials
from .generate_sinusoids import generate_sinusoids
def prepare_for_training(data, polynomial_degree=0, sinusoid_degree=0, normalize_data=True):
# 计算样本总数
num_examples = data.shape[0]
data_processed = np.copy(data)
# 预处理
features_mean = 0
features_deviation = 0
data_normalized = data_processed
if normalize_data:
(
data_normalized,
features_mean,
features_deviation
) = normalize(data_processed)
data_processed = data_normalized
# 特征变换sinusoidal
if sinusoid_degree > 0:
sinusoids = generate_sinusoids(data_normalized, sinusoid_degree)
data_processed = np.concatenate((data_processed, sinusoids), axis=1)
# 特征变换polynomial
if polynomial_degree > 0:
polynomials = generate_polynomials(data_normalized, polynomial_degree)
data_processed = np.concatenate((data_processed, polynomials), axis=1)
# 加一列1
data_processed = np.hstack((np.ones((num_examples, 1)), data_processed))
return data_processed, features_mean, features_deviation
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,360
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/2_cross_fea_order_id_level.py
|
#coding=utf-8
"""
Author: Aigege
Code: https://github.com/AiIsBetter
"""
# date 2021.08.01
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.feature_extraction.text import CountVectorizer
import networkx as nx
import os
import gc
import warnings
from utils import parallel_apply_fea,add_features_in_group
from functools import partial
warnings.filterwarnings("ignore")
def last_k_cross_time_interval(gr, periods):
gr_ = gr.copy()
gr_ = gr_.iloc[::-1]
gr_['t_i_v'] = gr_['cross_time'].diff()
gr_['t_i_v'] = gr_['t_i_v']
gr_['t_i_v'] = gr_['t_i_v'].fillna(0)
gr_ = gr_.drop_duplicates().reset_index(drop = True)
# cross time变化
features = {}
for period in periods:
if period > 10e5:
period_name = 'zsl_cross_time_interval_all'
gr_period = gr_.copy()
else:
period_name = 'zsl_cross_time_interval_last_{}_'.format(period)
gr_period = gr_.iloc[:period]
features = add_features_in_group(features, gr_period, 't_i_v',
['mean','max', 'min', 'std','sum'],
period_name)
return features
# last k cross id time trend
def last_cross_time_features(gr,periods):
gr_ = gr.copy()
gr_ = gr_.iloc[::-1]
features = {}
for period in periods:
if period > 10e5:
period_name = 'zsl_all_'
gr_period = gr_.copy()
else:
period_name = 'zsl_last_{}_'.format(period)
gr_period = gr_.iloc[:period]
features = add_features_in_group(features, gr_period, 'cross_time',
['max', 'sum', 'mean','min','std'],
period_name)
return features
# last k cross id time trend
def trend_in_last_k_cross_id_time(gr, periods):
gr_ = gr.copy()
gr_ = gr_.iloc[::-1]
features = {}
for period in periods:
gr_period = gr_.iloc[:period]
features = add_trend_feature(features, gr_period,
'cross_time', 'zsl_{}_period_trend_'.format(period)
)
return features
# trend feature
def add_trend_feature(features, gr, feature_name, prefix):
y = gr[feature_name].values
try:
x = np.arange(0, len(y)).reshape(-1, 1)
lr = LinearRegression()
lr.fit(x, y)
trend = lr.coef_[0]
except:
trend = np.nan
features['{}{}'.format(prefix, feature_name)] = trend
return features
def slice_id_change(x):
hour = x * 5 / 60
hour = np.floor(hour)
hour += 8
if hour >= 24:
hour = hour - 24
return hour
if __name__ == '__main__':
nrows = None
root_path = '../data/giscup_2021/'
read_idkey = np.load(root_path + 'id_key_to_connected_allday.npy', allow_pickle=True).item()
read_grapheb = np.load(root_path + 'graph_embeddings_retp1_directed.npy', allow_pickle=True).item()
read_grapheb_retp = np.load(root_path + 'graph_embeddings_retp05_directed.npy', allow_pickle=True).item()
for i in read_grapheb:
read_grapheb[i] = list(read_grapheb[i]) + list(read_grapheb_retp[i])
del read_grapheb_retp
head_columns = ['order_id', 'ata', 'distance', 'simple_eta', 'driver_id','slice_id']
embedding_k = 256
fill_list = [0] * embedding_k
df = []
#######################################nextlinks #######################################
nextlinks = pd.read_csv(root_path+'nextlinks.txt', sep=' ', header=None)
nextlinks.columns=['from_id', 'to_id']
nextlinks['to_id'] = nextlinks['to_id'].astype('str')
nextlinks['to_id'] = nextlinks['to_id'].apply(lambda x: x.split(","))
nextlinks = pd.DataFrame({'from_id':nextlinks.from_id.repeat(nextlinks.to_id.str.len()),
'to_id':np.concatenate(nextlinks.to_id.values)})
from_id_weight = nextlinks['from_id'].value_counts()
from_id_weight = from_id_weight.to_frame()
from_id_weight['index'] = from_id_weight.index
from_id_weight.columns=['weight', 'from_id']
nextlinks = pd.merge(nextlinks,from_id_weight, 'left', on=['from_id'])
nextlinks = nextlinks.sort_values(by='weight',ascending=False)
G = nx.DiGraph()
from_id = nextlinks['from_id'].astype(str).to_list()
to_id = nextlinks['to_id'].to_list()
weight = nextlinks['weight'].to_list()
edge_tuple = list(zip(from_id, to_id,weight))
print('adding')
G.add_weighted_edges_from(edge_tuple)
dc = nx.algorithms.centrality.degree_centrality(G)
dc = sorted(dc.items(), key=lambda d: d[1],reverse=True)
dc = dc[:50000]
dc = [str(i[0]) for i in dc ]
#######################################cross #######################################
for name in os.listdir(root_path+'train/'):
data_time = name.split('.')[0]
if data_time=='20200803':
continue
train = pd.read_csv(root_path+'train/{}'.format(name),sep= ';;',header=None,nrows=nrows)
print("开始处理", data_time)
train_head = pd.DataFrame(train[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])
train_head['order_id'] = train_head['order_id'].astype(str)
train_head['ata'] = train_head['ata'].astype(float)
train_head['distance'] = train_head['distance'].astype(float)
train_head['simple_eta'] = train_head['simple_eta'].astype(float)
train_head['driver_id'] = train_head['driver_id'].astype(int)
train_head['slice_id'] = train_head['slice_id'].astype(int)
# 处理corss数据
data_cross = train[[2]]
data_cross['index'] = train_head.index
data_cross['order_id'] = train_head['order_id']
data_cross_split = data_cross[2].str.split(' ', expand=True).stack().to_frame()
data_cross_split = data_cross_split.reset_index(level=1, drop=True).rename(columns={0: 'cross_info'})
data_cross_split = data_cross[['index', 'order_id']].join(data_cross_split)
data_cross_split[['cross_id', 'cross_time']] = data_cross_split['cross_info'].str.split(':', 2, expand=True)
data_cross_split['cross_time'] = data_cross_split['cross_time'].astype(float)
tmp_cross_id = data_cross_split['cross_id'].str.split('_', expand=True)
tmp_cross_id.columns=['cross_id_in','cross_id_out']
data_cross_split = pd.concat([data_cross_split,tmp_cross_id],axis=1).drop(['cross_id','cross_info'],axis=1)
data_cross_split['date_time'] = data_time
data_cross_split = data_cross_split.drop('index',axis=1).reset_index(drop=True)
print('preprocess finish!')
print('start feature engineering')
feature = train_head[['order_id', 'distance']]
###################static fea#############################################
data_cross_split['zsl_cross_id_isnull'] =0
data_cross_split.loc[data_cross_split['cross_id_in'].isnull(),'zsl_cross_id_isnull'] = 1
data_cross_split.loc[data_cross_split['cross_id_in'].isnull(),'cross_id_in'] = '-1'
data_cross_split.loc[data_cross_split['cross_id_out'].isnull(),'cross_id_out'] = '-1'
#######################order cross_id count###############################
df = data_cross_split.groupby('order_id', as_index=False)
tmp_crossid_agg = df['cross_id_in'].agg({'zsl_order_cross_id_in_count': 'count'})
tmp_crossid_agg['zsl_order_cross_id_in_count_bins'] = 0
tmp_crossid_agg.loc[(tmp_crossid_agg['zsl_order_cross_id_in_count']>=5)&(tmp_crossid_agg['zsl_order_cross_id_in_count']<10),'zsl_order_cross_id_in_count_bins']=1
tmp_crossid_agg.loc[(tmp_crossid_agg['zsl_order_cross_id_in_count']>=10)&(tmp_crossid_agg['zsl_order_cross_id_in_count']<20),'zsl_order_cross_id_in_count_bins']=2
tmp_crossid_agg.loc[(tmp_crossid_agg['zsl_order_cross_id_in_count']>=20),'zsl_order_cross_id_in_count_bins']=3
feature = feature.merge(tmp_crossid_agg,on='order_id',how='left')
print('order cross_id count finish!')
#######################order cross id & distance###############################
feature['zsl_order_cross_is_highspeed'] = 0
feature.loc[(feature['distance']>90000)&(feature['zsl_order_cross_id_in_count']<30),'zsl_order_cross_is_highspeed'] = 1
print('order cross id & distance finish!')
#######################order cross id & nextlinks centry###############################
tmp = data_cross_split[data_cross_split['cross_id_in'].isin(dc)]
tmp = tmp.groupby('order_id', as_index=False)
tmp_linkid_centry_count = tmp['cross_id_in'].agg({'zsl_order_cross_id_in_centry_count': 'count'})
feature = feature.merge(tmp_linkid_centry_count,on='order_id',how='left')
feature['zsl_order_cross_id_in_centry_count'] = feature['zsl_order_cross_id_in_centry_count'].fillna(0)
tmp = data_cross_split[data_cross_split['cross_id_out'].isin(dc)]
tmp = tmp.groupby('order_id', as_index=False)
tmp_linkid_centry_count = tmp['cross_id_out'].agg({'zsl_order_cross_id_out_centry_count': 'count'})
feature = feature.merge(tmp_linkid_centry_count, on='order_id', how='left')
feature['zsl_order_cross_id_out_centry_count'] = feature['zsl_order_cross_id_out_centry_count'].fillna(0)
print('order cross_id & nextlinks centry finish!')
#######################order cross_time sum mean max min var std###############################
tmp_linktime_agg = df['cross_time'].agg({'zsl_order_cross_time_sum': 'sum','zsl_order_cross_time_mean': 'mean',
'zsl_order_cross_time_max': 'max','zsl_order_cross_time_min': 'min',
'zsl_order_cross_time_var': 'var'})
feature = feature.merge(tmp_linktime_agg,on='order_id',how='left')
print('order cross_time sum mean max min var std finish!')
#######################order distance/link_id_count###############################
feature['zsl_distance_div_cross_id_count'] = feature['distance']*10/feature['zsl_order_cross_id_in_count']
feature = feature.drop('distance', axis=1)
print('order distance div link_id_count finish!')
###################trend fea#############################################
###################trend cross time#####################################
groupby = data_cross_split.groupby(['order_id'])
func = partial(trend_in_last_k_cross_id_time, periods=[2, 5, 10, 20,100000000])
g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)
feature = feature.merge(g, on='order_id', how='left')
func = partial(last_cross_time_features, periods=[2, 5, 10, 20,100000000])
g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)
feature = feature.merge(g, on='order_id', how='left')
func = partial(last_k_cross_time_interval, periods=[2, 5, 10, 20, 100000000])
g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)
feature = feature.merge(g, on='order_id', how='left')
print('trend cross time finish!')
####################nextlinks graph embedding#######################
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].astype(int)
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].map(read_idkey)
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].fillna(0)
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].astype(int)
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].map(read_grapheb)
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].fillna('0')
def replace_list(x):
if isinstance(x, str):
x = fill_list
return x
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].apply(replace_list)
cross_id_in_col = ['zsl_cross_id_in_eb{}'.format(i) for i in range(embedding_k)]
agg_col = dict(zip(cross_id_in_col, ['mean'] * len(cross_id_in_col)))
cross_id_in_array = np.array(data_cross_split.pop('cross_id_in').to_list())
cross_id_in_array = pd.DataFrame(cross_id_in_array, columns=agg_col, dtype=np.float16)
data_cross_split = pd.concat([data_cross_split, cross_id_in_array], axis=1)
tmp = data_cross_split.groupby('order_id', as_index=False)
tmp_crossidin_agg = tmp.agg(agg_col)
feature = feature.merge(tmp_crossidin_agg, on='order_id', how='left')
print('trend cross_id_in eb finish!')
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].astype(int)
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].map(read_idkey)
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].fillna(0)
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].astype(int)
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].map(read_grapheb)
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].fillna('0')
def replace_list(x):
if isinstance(x, str):
x = fill_list
return x
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].apply(replace_list)
cross_id_out_col = ['zsl_cross_id_out_eb{}'.format(i) for i in range(embedding_k)]
agg_col = dict(zip(cross_id_out_col, ['mean'] * len(cross_id_out_col)))
cross_id_out_array = np.array(data_cross_split.pop('cross_id_out').to_list())
cross_id_out_array = pd.DataFrame(cross_id_out_array, columns=agg_col, dtype=np.float16)
data_cross_split = pd.concat([data_cross_split, cross_id_out_array], axis=1)
tmp = data_cross_split.groupby('order_id', as_index=False)
tmp_crossidout_agg = tmp.agg(agg_col)
feature = feature.merge(tmp_crossidout_agg, on='order_id', how='left')
print('trend cross_id_out eb finish!')
multipy_df = []
multipy_col = []
for col1, col2 in zip(cross_id_in_col, cross_id_out_col):
tmp = feature[col1] * feature[col2]
multipy_df.append(tmp)
multipy_col.append(col1 + '_mul_' + col2)
multipy_df = pd.concat(multipy_df, axis=1)
multipy_df.columns = multipy_col
feature = pd.concat([feature, multipy_df], axis=1)
print('trend cross_id_out eb multipy finish!')
feature.to_csv(root_path + 'feature/train/cross_fea_order_id_level_{}.csv'.format(data_time), index=False)
del train
gc.collect()
test = pd.read_csv(root_path+'20200901_test.txt',sep= ';;',header=None,nrows=nrows)
test_head = pd.DataFrame(test[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])
test_head['order_id'] = test_head['order_id'].astype(str)
test_head['ata'] = test_head['ata'].astype(float)
test_head['distance'] = test_head['distance'].astype(float)
test_head['simple_eta'] = test_head['simple_eta'].astype(float)
test_head['driver_id'] = test_head['driver_id'].astype(int)
test_head['slice_id'] = test_head['slice_id'].astype(int)
# 处理corss数据
data_cross = test[[2]]
data_cross['index'] = test_head.index
data_cross['order_id'] = test_head['order_id']
data_cross_split = data_cross[2].str.split(' ', expand=True).stack().to_frame()
data_cross_split = data_cross_split.reset_index(level=1, drop=True).rename(columns={0: 'cross_info'})
data_cross_split = data_cross[['index', 'order_id']].join(data_cross_split)
data_cross_split[['cross_id', 'cross_time']] = data_cross_split['cross_info'].str.split(':', 2, expand=True)
data_cross_split['cross_time'] = data_cross_split['cross_time'].astype(float)
tmp_cross_id = data_cross_split['cross_id'].str.split('_', expand=True)
tmp_cross_id.columns = ['cross_id_in', 'cross_id_out']
data_cross_split = pd.concat([data_cross_split, tmp_cross_id], axis=1).drop(['cross_id', 'cross_info'], axis=1)
data_cross_split['date_time'] = '20200901'
data_cross_split = data_cross_split.drop('index', axis=1).reset_index(drop=True)
print('preprocess finish!')
print('start feature engineering')
feature = test_head[['order_id', 'distance']]
###################static fea#############################################
data_cross_split['zsl_cross_id_isnull'] = 0
data_cross_split.loc[data_cross_split['cross_id_in'].isnull(), 'zsl_cross_id_isnull'] = 1
data_cross_split.loc[data_cross_split['cross_id_in'].isnull(), 'cross_id_in'] = '-1'
data_cross_split.loc[data_cross_split['cross_id_out'].isnull(), 'cross_id_out'] = '-1'
#######################order cross_id count###############################
df = data_cross_split.groupby('order_id', as_index=False)
tmp_crossid_agg = df['cross_id_in'].agg({'zsl_order_cross_id_in_count': 'count'})
tmp_crossid_agg['zsl_order_cross_id_in_count_bins'] = 0
tmp_crossid_agg.loc[(tmp_crossid_agg['zsl_order_cross_id_in_count'] >= 5) & (
tmp_crossid_agg['zsl_order_cross_id_in_count'] < 10), 'zsl_order_cross_id_in_count_bins'] = 1
tmp_crossid_agg.loc[(tmp_crossid_agg['zsl_order_cross_id_in_count'] >= 10) & (
tmp_crossid_agg['zsl_order_cross_id_in_count'] < 20), 'zsl_order_cross_id_in_count_bins'] = 2
tmp_crossid_agg.loc[(tmp_crossid_agg['zsl_order_cross_id_in_count'] >= 20), 'zsl_order_cross_id_in_count_bins'] = 3
feature = feature.merge(tmp_crossid_agg, on='order_id', how='left')
print('order cross_id count finish!')
#######################order cross id & distance###############################
feature['zsl_order_cross_is_highspeed'] = 0
feature.loc[(feature['distance'] > 90000) & (
feature['zsl_order_cross_id_in_count'] < 30), 'zsl_order_cross_is_highspeed'] = 1
print('order cross id & distance finish!')
#######################order cross id & nextlinks centry###############################
tmp = data_cross_split[data_cross_split['cross_id_in'].isin(dc)]
tmp = tmp.groupby('order_id', as_index=False)
tmp_linkid_centry_count = tmp['cross_id_in'].agg({'zsl_order_cross_id_in_centry_count': 'count'})
feature = feature.merge(tmp_linkid_centry_count, on='order_id', how='left')
feature['zsl_order_cross_id_in_centry_count'] = feature['zsl_order_cross_id_in_centry_count'].fillna(0)
tmp = data_cross_split[data_cross_split['cross_id_out'].isin(dc)]
tmp = tmp.groupby('order_id', as_index=False)
tmp_linkid_centry_count = tmp['cross_id_out'].agg({'zsl_order_cross_id_out_centry_count': 'count'})
feature = feature.merge(tmp_linkid_centry_count, on='order_id', how='left')
feature['zsl_order_cross_id_out_centry_count'] = feature['zsl_order_cross_id_out_centry_count'].fillna(0)
print('order cross_id & nextlinks centry finish!')
#######################order cross_time sum mean max min var std###############################
tmp_linktime_agg = df['cross_time'].agg({'zsl_order_cross_time_sum': 'sum', 'zsl_order_cross_time_mean': 'mean',
'zsl_order_cross_time_max': 'max', 'zsl_order_cross_time_min': 'min',
'zsl_order_cross_time_var': 'var'})
feature = feature.merge(tmp_linktime_agg, on='order_id', how='left')
print('order cross_time sum mean max min var std finish!')
#######################order distance/link_id_count###############################
feature['zsl_distance_div_cross_id_count'] = feature['distance'] * 10 / feature['zsl_order_cross_id_in_count']
feature = feature.drop('distance', axis=1)
print('order distance div link_id_count finish!')
###################trend fea#############################################
###################trend cross time#####################################
groupby = data_cross_split.groupby(['order_id'])
func = partial(trend_in_last_k_cross_id_time, periods=[2, 5, 10, 20, 100000000])
g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)
feature = feature.merge(g, on='order_id', how='left')
func = partial(last_cross_time_features, periods=[2, 5, 10, 20, 100000000])
g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)
feature = feature.merge(g, on='order_id', how='left')
func = partial(last_k_cross_time_interval, periods=[2, 5, 10, 20, 100000000])
g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)
feature = feature.merge(g, on='order_id', how='left')
print('trend cross time finish!')
####################nextlinks graph embedding#######################
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].astype(int)
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].map(read_idkey)
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].fillna(0)
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].astype(int)
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].map(read_grapheb)
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].fillna('0')
def replace_list(x):
if isinstance(x, str):
x = fill_list
return x
data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].apply(replace_list)
cross_id_in_col = ['zsl_cross_id_in_eb{}'.format(i) for i in range(embedding_k)]
agg_col = dict(zip(cross_id_in_col, ['mean'] * len(cross_id_in_col)))
cross_id_in_array = np.array(data_cross_split.pop('cross_id_in').to_list())
cross_id_in_array = pd.DataFrame(cross_id_in_array, columns=agg_col, dtype=np.float16)
data_cross_split = pd.concat([data_cross_split, cross_id_in_array], axis=1)
tmp = data_cross_split.groupby('order_id', as_index=False)
tmp_crossidin_agg = tmp.agg(agg_col)
feature = feature.merge(tmp_crossidin_agg, on='order_id', how='left')
print('trend cross_id_in eb finish!')
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].astype(int)
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].map(read_idkey)
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].fillna(0)
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].astype(int)
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].map(read_grapheb)
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].fillna('0')
def replace_list(x):
if isinstance(x, str):
x = fill_list
return x
data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].apply(replace_list)
cross_id_out_col = ['zsl_cross_id_out_eb{}'.format(i) for i in range(embedding_k)]
agg_col = dict(zip(cross_id_out_col, ['mean'] * len(cross_id_out_col)))
cross_id_out_array = np.array(data_cross_split.pop('cross_id_out').to_list())
cross_id_out_array = pd.DataFrame(cross_id_out_array, columns=agg_col, dtype=np.float16)
data_cross_split = pd.concat([data_cross_split, cross_id_out_array], axis=1)
tmp = data_cross_split.groupby('order_id', as_index=False)
tmp_crossidout_agg = tmp.agg(agg_col)
feature = feature.merge(tmp_crossidout_agg, on='order_id', how='left')
print('trend cross_id_out eb finish!')
multipy_df = []
multipy_col = []
for col1, col2 in zip(cross_id_in_col, cross_id_out_col):
tmp = feature[col1] * feature[col2]
multipy_df.append(tmp)
multipy_col.append(col1 + '_mul_' + col2)
multipy_df = pd.concat(multipy_df, axis=1)
multipy_df.columns = multipy_col
feature = pd.concat([feature, multipy_df], axis=1)
print('trend cross_id_out eb multipy finish!')
feature.to_csv(root_path + 'feature/test/cross_fea_order_id_level_20200901.csv', index=False)
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,361
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习算法原理及推导/其它/第二章——手写线性回归算法/util/features/normalize.py
|
"""Normalize features"""
"""数据标准化"""
import numpy as np
def normalize(features):
features_normalized = np.copy(features).astype(float)
# 计算均值
features_mean = np.mean(features, 0)
# 计算标准差
features_deviation = np.std(features, 0)
# 标准化操作
if features.shape[0] > 1:
features_normalized -= features_mean
# 防止除以0
features_deviation[features_deviation == 0] = 1
features_normalized /= features_deviation
return features_normalized, features_mean, features_deviation
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,362
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/DCN蒸馏_12953/dcn_model/process.py
|
import pandas as pd
import numpy as np
import joblib
from sklearn.preprocessing import StandardScaler, LabelEncoder
from tqdm import tqdm
from pandarallel import pandarallel
from sklearn.model_selection import train_test_split
# import random
import gc
import ast
import os
import sys
import warnings
os.environ["TF_CPP_MIN_LOG_LEVEL"]='3'
warnings.filterwarnings('ignore')
pd.options.mode.chained_assignment = None
#pandarallel.initialize(nb_workers=16)
pandarallel.initialize()
def pandas_list_to_array(df):
"""
Input: DataFrame of shape (x, y), containing list of length l
Return: np.array of shape (x, l, y)
"""
return np.transpose(
np.array(df.values.tolist()),
(0, 2, 1)
)
def preprocess_inputs(df, cols: list):
return pandas_list_to_array(
df[cols]
)
def append_all_data(files_list, file_head_path):
"""
concat all the data
:param files_list: the name of data
:param file_head_path: the path of data
:return: DataFrame of data for all
"""
data_all_path = file_head_path + files_list[0]
data_all = pd.read_csv(data_all_path)
data_all = data_all.head(0)
try:
del data_all['Unnamed: 0']
except KeyError as e:
pass
# 循环添加全部数据
for i in files_list:
data_path = file_head_path + i
print("当前文件为:", data_path)
data = pd.read_csv(data_path)
try:
del data['Unnamed: 0']
except KeyError as e:
pass
data_all = data_all.append(data)
return data_all
def file_name(file_dir):
files_list = []
for root, dirs, files in os.walk(file_dir):
# print("success")
for name in files:
files_list.append(name)
return files_list
def load_data(making_data_dir, link_data_dir, cross_data_dir, link_data_other_dir, head_data_dir,
win_order_data_dir, pre_arrival_sqe_dir,zsl_link_data_dir, arrival_data_dir=None, zsl_arrival_data_dir=None, arrival_sqe_data_dir=None):
"""
loading three path of data, then merge them
:return: all data by order_level
"""
print('-------------LOAD DATA for mk_data----------------')
mk_list = file_name(making_data_dir)
mk_list.sort()
mk_data = append_all_data(mk_list, making_data_dir)
#mk_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_order_xt/join_20200825.csv') # for test running
mk_data['date_time'] = mk_data['date_time'].astype(str)
# print(mk_data['date_time'].head())
mk_data['dayofweek'] = pd.to_datetime(mk_data['date_time'])
mk_data['dayofweek'] = mk_data['dayofweek'].dt.dayofweek + 1
weather_le = LabelEncoder()
mk_data['weather_le'] = weather_le.fit_transform(mk_data['weather'])
print('Remove the wk2_ and m1_')
del_cols = []
mk_cols = mk_data.columns.tolist()
for i in range(len(mk_cols)):
if 'wk2_' in mk_cols[i]:
del_cols.append(mk_cols[i])
if 'm1_' in mk_cols[i]:
del_cols.append(mk_cols[i])
if 'ratio' in mk_cols[i]:
del_cols.append(mk_cols[i])
del_cols = del_cols + ['weather', 'driver_id', 'date_time_dt', 'link_time_sum','date_time_sum']
print('*-' * 40, 'Will be drop the list:', del_cols)
mk_data.drop(columns=del_cols, axis=1, inplace=True)
print('The init shape of mk_data:', mk_data.shape)
#if arrival_data_dir:
# mk_data, _ = train_test_split(mk_data, test_size=0.4, random_state=42)
#print('*-'*40)
#print('The train_test_split shape of mk_data:', mk_data.shape)
print('-------------LOAD WIN DATA----------------')
win_order_list = file_name(win_order_data_dir)
win_order_list.sort()
win_order_data = append_all_data(win_order_list, win_order_data_dir)
#win_order_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/win_order_xw/win_for_slice_20200825.csv') # for test running
del_win_order_cols = []
win_order_cols = win_order_data.columns.tolist()
for i in range(len(win_order_cols)):
if 'last_wk_lk_current' in win_order_cols[i]:
del_win_order_cols.append(win_order_cols[i])
#if 'distance' in win_order_cols[i]:
# del_win_order_cols.append(win_order_cols[i])
#if '1_percent' in win_order_cols[i]:
# del_win_order_cols.append(win_order_cols[i])
#if '0_percent' in win_order_cols[i]:
# del_win_order_cols.append(win_order_cols[i])
del_win_order_cols = del_win_order_cols + ['slice_id', 'date_time']
win_order_data.drop(columns=del_win_order_cols, axis=1, inplace=True)
print('win_order_data.shape',win_order_data.shape)
mk_data = pd.merge(mk_data, win_order_data, how='left', on='order_id')
print('mk_data.shape',mk_data.shape)
del win_order_data
gc.collect()
"""
print('-------------LOAD ZSL DATA----------------')
zsl_link_list = file_name(zsl_link_data_dir)
zsl_link_list.sort()
zsl_link_data = append_all_data(zsl_link_list, zsl_link_data_dir)
#zsl_link_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/zsl_train_link/link_fea_order_id_level_20200825.csv') # for test running
get_zsl_link_cols = []
zsl_link_cols = zsl_link_data.columns.tolist()
for i in range(len(zsl_link_cols)):
if 'eb' in zsl_link_cols[i]:
get_zsl_link_cols.append(zsl_link_cols[i])
#print(get_zsl_link_cols)
get_zsl_link_cols.insert(0, 'order_id')
print(zsl_link_data.shape)
zsl_link_data = zsl_link_data[get_zsl_link_cols]
print('mk_data.shape',mk_data.shape)
mk_data = pd.merge(mk_data, zsl_link_data, on='order_id')
print('mk_data.shape',mk_data.shape)
del zsl_link_data
gc.collect()
"""
"""
#zsl_cross_list = file_name(zsl_cross_data_dir)
#zsl_cross_list.sort()
#zsl_cross_data = append_all_data(zsl_cross_list, zsl_cross_data_dir)
zsl_cross_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/zsl_train_cross_0703/cross_fea_order_id_level_20200825.csv') # for test running
get_zsl_cross_cols = []
zsl_cross_cols = zsl_cross_data.columns.tolist()
for i in range(len(zsl_cross_cols)):
if ('last' or 'div' or 'interval' or 'period') in zsl_cross_cols[i]:
get_zsl_cross_cols.append(zsl_cross_cols[i])
get_zsl_cross_cols.append('order_id')
print(zsl_cross_data.shape)
zsl_cross_data = zsl_cross_data[get_zsl_cross_cols]
print('mk_data.shape',mk_data.shape)
mk_data = pd.merge(mk_data, zsl_cross_data, on='order_id')
print('mk_data.shape',mk_data.shape)
del zsl_cross_data
gc.collect()
"""
print('-------------LOAD HEAD DATA----------------')
head_list = file_name(head_data_dir)
head_list.sort()
head_data = append_all_data(head_list, head_data_dir)
#head_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_head_link_data_clear/head_link_20200825.csv') # for test running
get_head_cols = ['len_tmp','status_0','status_1','status_2','status_3','status_4','rate_0','rate_1','rate_2','rate_3','rate_4']
get_head_cols.insert(0, 'order_id')
print('head_data.shape:',head_data.shape)
head_data = head_data[get_head_cols]
print('mk_data.shape',mk_data.shape)
mk_data = pd.merge(mk_data, head_data, how='left', on='order_id')
print('mk_data.shape',mk_data.shape)
del head_data
gc.collect()
print('-------------LOAD DATA for link_data----------------')
link_list = file_name(link_data_dir)
link_list.sort()
link_data = append_all_data(link_list, link_data_dir)
# for test running
#link_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_170_link_sqe_for_order/sqe_20200825_link.txt')
print('The init shape of link_data:', link_data.shape)
print('-------------LOAD DATA for arrival_sqe_data----------------')
arrival_sqe_list = file_name(pre_arrival_sqe_dir)
arrival_sqe_list.sort()
arrival_sqe_data = append_all_data(arrival_sqe_list, pre_arrival_sqe_dir)
#arrival_sqe_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/sqe_arrival_for_link/20200825.csv') # for test running
del arrival_sqe_data['slice_id']
arrival_cols = arrival_sqe_data.columns.tolist()
new_arrival_cols = ['future_'+i for i in arrival_cols if i != 'order_id']
new_arrival_cols.insert(0, 'order_id')
arrival_sqe_data.columns = new_arrival_cols
print('The init shape of arrival_sqe_data:', arrival_sqe_data.shape)
link_data = pd.merge(link_data, arrival_sqe_data, how='left', on='order_id')
del arrival_sqe_data
gc.collect()
"""
print('-------------LOAD DATA for arrival_link_data----------------')
arrival_link_list = file_name(pre_arrival_data_dir)
arrival_link_list.sort()
arrival_link_data = append_all_data(arrival_link_list, pre_arrival_data_dir)
#arrival_link_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/final_pre_arrival_data/sqe_20200825_link.txt') # for test running
print('The init shape of arrival_link_data:', arrival_link_data.shape)
link_data = pd.merge(link_data, arrival_link_data, how='left', on='order_id')
del arrival_link_data
gc.collect()
"""
"""
print('-------------LOAD DATA for h_s_link_data----------------')
h_s_link_list = file_name(h_s_for_link_dir)
h_s_link_list.sort()
h_s_link_data = append_all_data(h_s_link_list,h_s_for_link_dir)
#h_s_link_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_hightmp_slice_for_link_eb/20200825_link.txt') # for test running
h_s_link_data = h_s_link_data[['order_id', 'sqe_slice_id', 'sqe_hightemp', 'sqe_weather_le']]
print('The init shape of h_s_link_data:', h_s_link_data.shape)
link_data = pd.merge(link_data, h_s_link_data, how='left', on='order_id')
del h_s_link_data
gc.collect()
"""
print('-------------LOAD DATA for link_data_other----------------')
link_list_other = file_name(link_data_other_dir)
link_list_other.sort()
link_data_other = append_all_data(link_list_other, link_data_other_dir)
#link_data_other = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/for_0714_link_sqe_for_order_other/sqe_20200825_link.txt') # for test running
print('The init shape of link_data_other:', link_data_other.shape)
link_data = pd.merge(link_data, link_data_other, on='order_id')
# print(link_data.head(0))
# del link_data['lk_t_sub_by_min']
del_link_cols = ['lk_t_sub_by_min','lk_t_sub_by_q50', 'lk_t_sub_by_min', 'total_linktime_std']
# 'future_pre_arrival_status', 'future_arrive_slice_id'] # 'future_arrive_slice_id'
link_data.drop(columns=del_link_cols, axis=1, inplace=True)
print('The merge shape of link_data:', link_data.shape)
del link_data_other
gc.collect()
print('-------------LOAD DATA for link_data_arrival----------------')
if arrival_sqe_data_dir==None:
pass
else:
link_list_arrival = file_name(arrival_sqe_data_dir)
link_list_arrival.sort()
link_data_arrival = append_all_data(link_list_arrival, arrival_sqe_data_dir)
#link_data_arrival = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_170_lk_arrival_sqe_for_order/sqe_20200825_link.txt') # for test running
print('The init shape of link_data_arrival:', link_data_arrival.shape)
link_data = pd.merge(link_data, link_data_arrival, on='order_id')
print('The merge shape of link_data:', link_data.shape)
del link_data_arrival
gc.collect()
link_cols_list = ['link_id', 'link_time', 'link_current_status', 'pr',
'dc', 'link_arrival_status', 'future_pre_arrival_status', 'future_arrive_slice_id']
data = pd.merge(mk_data, link_data, how='left', on='order_id')
del mk_data
del link_data
gc.collect()
print('-------------LOAD DATA for arrival_data----------------')
if arrival_data_dir==None:
pass
else:
arrival_list = file_name(arrival_data_dir)
arrival_list.sort()
arrival_data = append_all_data(arrival_list, arrival_data_dir)
#arrival_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_link_sqe_for_order_arrival/sqe_20200825_link.txt')
arrival_cols = ['order_id', 'lk_arrival_0_percent', 'lk_arrival_1_percent','lk_arrival_2_percent', 'lk_arrival_3_percent', 'lk_arrival_4_percent']
#print(arrival_data.head(2))
data = pd.merge(data, arrival_data, how='left', on='order_id')
del arrival_data
gc.collect()
print('-------------LOAD DATA for zsl_arrival_data----------------')
if zsl_arrival_data_dir==None:
pass
else:
zsl_arrival_list = file_name(zsl_arrival_data_dir)
zsl_arrival_list.sort()
zsl_arrival_data = append_all_data(zsl_arrival_list, zsl_arrival_data_dir)
#zsl_arrival_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/zsl_arrival/link_fea_arrive_order_id_level_20200818.csv')
zsl_arrival_cols = zsl_arrival_data.columns.tolist()
zsl_arrival_cols.remove('order_id')
#print(zsl_arrival_data.head(2))
data = pd.merge(data, zsl_arrival_data, how='left', on='order_id')
del zsl_arrival_data
gc.collect()
print('-------------LOAD DATA for cross_data----------------')
cross_list = file_name(cross_data_dir)
cross_list.sort()
cross_data = append_all_data(cross_list, cross_data_dir)
# for test running
#cross_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/for_0714_cross_sqe_for_order/sqe_20200825_cross.txt')
del_cross_cols = ['cr_t_sub_by_min', 'cr_t_sub_by_q50', 'total_crosstime_std']
cross_data.drop(columns=del_cross_cols, axis=1, inplace=True)
cross_cols_list = ['cross_id', 'cross_time']
print('The init shape of cross_data:', cross_data.shape)
data = pd.merge(data, cross_data, how='left', on='order_id')
del cross_data
gc.collect()
# data['cross_id'] = data['cross_id'].str.replace('nan','0')
# print('working..............................')
mk_cols_list = data.columns.tolist()
remove_mk_cols = ['order_id', 'slice_id', 'hightemp', 'lowtemp', 'weather_le', 'dayofweek', 'date_time', 'ata', 'link_arrival_status']
mk_cols_list = list(set(mk_cols_list) - set(remove_mk_cols))
mk_cols_list = list(set(mk_cols_list) - set(link_cols_list))
mk_cols_list = list(set(mk_cols_list) - set(cross_cols_list))
if arrival_data_dir==None:
pass
else:
mk_cols_list = list(set(mk_cols_list) - set(arrival_cols))
mk_cols_list = list(set(mk_cols_list) - set(zsl_arrival_cols))
print('lenght of mk_cols_list', len(mk_cols_list))
print('*-' * 40)
print('The finish shape of data is:', data.shape)
return data, mk_cols_list, link_cols_list, cross_cols_list
def processing_data(data, link_cols_list, cross_cols_list, mk_cols_list, WIDE_COLS, is_test=False):
"""
fix data, ast.literal_eval + StandardScaler + train_test_split
:return: train_data, val_data, test_data
"""
#print('Now, Starting parallel_apply the arrival_status..................')
#for i in tqdm(['link_arrival_status']):
# data[i] = data[i].parallel_apply(ast.literal_eval)
print('Now, Starting parallel_apply the link..................')
for i in tqdm(link_cols_list):
data[i] = data[i].parallel_apply(ast.literal_eval)
gc.collect()
print('Now, Starting parallel_apply the cross..................')
for i in tqdm(cross_cols_list):
data[i] = data[i].parallel_apply(ast.literal_eval)
data = data.fillna(0)
# train, val
if is_test is True:
print('is_test is True')
ss = joblib.load('../model_h5/ss_scaler')
ss_cols = mk_cols_list + WIDE_COLS
data[ss_cols] = ss.transform(data[ss_cols])
return data
else:
ss_cols = mk_cols_list + WIDE_COLS
ss = StandardScaler()
ss.fit(data[ss_cols])
data[ss_cols] = ss.transform(data[ss_cols])
joblib.dump(ss, '../model_h5/ss_scaler')
print('is_test is False')
return data
def processing_inputs(data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS, arrival=True):
"""
change the data for model
:return:
"""
print('*-'*40, processing_inputs)
if arrival:
mk_cols_list = mk_cols_list + ['lk_arrival_0_percent', 'lk_arrival_1_percent','lk_arrival_2_percent', 'lk_arrival_3_percent', 'lk_arrival_4_percent']
mk_cols_list = mk_cols_list + ['zsl_link_arrival_status_mean','zsl_link_arrival_status_nunique','zsl_link_arrival_status0','zsl_link_arrival_status1','zsl_link_arrival_status2','zsl_link_arrival_status3']
if 'lk_arrival_0_percent' in mk_cols_list:
print('The lk_arrival_0_percent in the mk_cols_list')
#print('*-' * 40, 'EXIT')
#sys.exit(0)
print('111'*40, 'HAVE FEATURES OF ARRIVAL')
else:
print('222'*40, 'HAVENOT FEATURES OF ARRIVAL')
if 'ata' in mk_cols_list:
print('The ata in the mk_cols_list')
print('*-' * 40, 'EXIT')
sys.exit(0)
if 'ata' in link_cols_list:
print('The ata in the link_cols_list')
if 'ata' in cross_cols_list:
print('The ata in the cross_cols_list')
if 'ata' in WIDE_COLS:
print('The ata in the WIDE_COLS')
print('*-' * 40, 'EXIT')
sys.exit(0)
data_link_inputs = preprocess_inputs(data, cols=link_cols_list)
data.drop(columns=link_cols_list, axis=1, inplace=True)
gc.collect()
print('drop the link_cols_list')
# print(data_link_inputs[:, :, :1])
# data['cross_id'] = data['cross_id'].str.replace('nan','0')
data_cross_inputs = preprocess_inputs(data, cols=cross_cols_list)
data.drop(columns=cross_cols_list, axis=1, inplace=True)
gc.collect()
print('drop the cross_cols_list')
data_deep_input = data[mk_cols_list]
data_wide_input = data[WIDE_COLS].values
data_inputs_slice = data['slice_id'].values
data_labels = data['ata']
if arrival:
arrival_col = ['lk_arrival_0_percent', 'lk_arrival_1_percent',
'lk_arrival_2_percent', 'lk_arrival_3_percent', 'lk_arrival_4_percent']
data_arrival = data[arrival_col]
print('*-'*40, 'data_arrival', data_arrival.shape)
return data_link_inputs, data_cross_inputs, data_deep_input, data_wide_input, data_inputs_slice, data_labels, data_arrival
else:
return data_link_inputs, data_cross_inputs, data_deep_input, data_wide_input, data_inputs_slice, data_labels
def split_col(data, columns, fillna=None):
'''拆分成列
:param data: 原始数据
:param columns: 拆分的列名
:type data: pandas.core.frame.DataFrame
:type columns: list
'''
for c in columns:
new_col = data.pop(c)
max_len = max(list(map(lambda x:len(x) if isinstance(x, list) else 1, new_col.values))) # 最大长度
new_col = new_col.apply(lambda x: x+[fillna]*(max_len - len(x)) if isinstance(x, list) else [x]+[fillna]*(max_len - 1)) # 补空值,None可换成np.nan
new_col = np.array(new_col.tolist()).T # 转置
for i, j in enumerate(new_col):
data[c + str(i)] = j
return data
def list_to_np(x):
return np.array(x)
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,363
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/WD_128544/wd_model/main.py
|
import pandas as pd
import numpy as np
import gc
import process
import wd_model
import time
RANDOM_SEED = 42
# types of columns of the data_set DataFrame
WIDE_COLS = [
'weather_le', 'hightemp', 'lowtemp', 'dayofweek'
]
if __name__ == '__main__':
t1 = time.time()
print(wd_model.get_available_gpus()) # 返回格式为:['/device:GPU:0', '/device:GPU:1']
# LOAD DATA
print('*-' * 40, 'LOAD DATA')
making_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_order_xt/'
link_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_170_link_sqe_for_order/'
cross_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/for_0714_cross_sqe_for_order/'
head_link_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_head_link_data_clear/'
win_order_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/win_order_xw/'
pre_arrival_sqe_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/sqe_arrival_for_link/'
data_for_driver_xw = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/data_for_driver_xw/'
downstream_status_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/downstream_status_for_order/'
data, mk_cols_list, link_cols_list, cross_cols_list = process.load_data(making_data_dir,
link_data_dir,
cross_data_dir,
head_link_dir,
win_order_data_dir,
pre_arrival_sqe_dir,
data_for_driver_xw,
downstream_status_dir)
# PROCESSING DATA
print('*-' * 40, 'PROCESSING DATA')
train_data, val_data = process.processing_data(data, mk_cols_list, link_cols_list, cross_cols_list,
WIDE_COLS)
del data
gc.collect()
# print(train_data.columns.tolist())
# PROCESSING INPUTS
print('*-' * 40, 'PROCESSING INPUTS')
# SAVE LIST
a = np.array(mk_cols_list)
np.save('../model_h5/wd_mk_cols_list_0730_5.npy', a)
a = np.array(link_cols_list)
np.save('../model_h5/wd_link_cols_list_0730_5.npy', a)
a = np.array(cross_cols_list)
np.save('../model_h5/wd_cross_cols_list_0730_5.npy', cross_cols_list)
pred_cols = ['ata']
print('*-' * 40, 'PROCESSING INPUTS FOR TRAIN_DATA', train_data.shape)
train_link_inputs, train_cross_inputs, train_deep_input, train_wide_input, \
train_inputs_slice, train_labels = process.processing_inputs(
train_data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS)
del train_data
gc.collect()
print('*-' * 40, 'PROCESSING INPUTS FOR VAL_DATA', val_data.shape)
val_link_inputs, val_cross_inputs, val_deep_input, val_wide_input, \
val_inputs_slice, val_labels = process.processing_inputs(
val_data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS)
del val_data
gc.collect()
# MODEL_INIT
print('*-' * 40, 'MODEL_INIT')
deep_col_len, wide_col_len = train_deep_input.shape[1], train_wide_input.shape[1]
link_nf_size, cross_nf_size = train_link_inputs.shape[2], train_cross_inputs.shape[2]
link_size = 639877 + 2
cross_size = 44313 + 2
slice_size = 288
# link_seqlen, cross_seqlen = 170, 12 # 已默认
print("link_size:{},link_nf_size:{},cross_size:{},cross_nf_size:{},slice_size:{}".format(link_size, link_nf_size,
cross_size, cross_nf_size,
slice_size))
print("deep_col_len:{}, wide_col_len:{}".format(deep_col_len, wide_col_len))
model = wd_model.wd_model(link_size, cross_size, slice_size, deep_col_len, wide_col_len,
link_nf_size, cross_nf_size, conv='conv')
mc, es, lr = wd_model.get_mc_es_lr('0730_5', patience=4, min_delta=1e-4)
print('*-' * 40, 'MODEL_INIT END')
# MODEL_FIT
print('*-' * 40, 'MODEL_FIT_PREDICT')
history = model.fit(
[train_link_inputs, train_cross_inputs, train_deep_input, train_wide_input, train_inputs_slice], train_labels,
validation_data=(
[val_link_inputs, val_cross_inputs, val_deep_input, val_wide_input, val_inputs_slice], val_labels),
batch_size=2048, # 2048,256
epochs=100,
verbose=1,
callbacks=[es])
np.save('../model_h5/history_0730_5.npy', history.history)
model.save_weights("../model_h5/wd_model_0730_5.h5")
del train_link_inputs, train_cross_inputs, train_deep_input, \
train_wide_input, train_inputs_slice, train_labels
del val_link_inputs, val_cross_inputs, val_deep_input, val_wide_input, val_inputs_slice, val_labels
gc.collect()
print('*-' * 40, 'LOAD TEST DATA')
making_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/order_xt/'
link_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/max_170_link_sqe_for_order/'
cross_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/cross_sqe_for_order/'
head_link_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/head_link_data_clear/'
win_order_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/win_order_xw/'
pre_arrival_sqe_test_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/sqe_arrival_for_link/'
data_test_for_driver_xw = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/data_for_driver_xw/'
downstream_status_test_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/downstream_status_for_order/'
test_data, _, _, _ = process.load_data(making_data_dir,
link_data_dir,
cross_data_dir,
head_link_dir,
win_order_test_data_dir,
pre_arrival_sqe_test_dir,
data_test_for_driver_xw,
downstream_status_test_dir)
# PROCESSING DATA
print('*-' * 40, 'PROCESSING DATA')
test_data = process.processing_data(test_data, mk_cols_list, link_cols_list, cross_cols_list,
WIDE_COLS, is_test=True)
print('*-' * 40, 'PROCESSING INPUTS FOR TEST_DATA', test_data.shape)
test_link_inputs, test_cross_inputs, test_deep_input, test_wide_input, \
test_inputs_slice, test_labels = process.processing_inputs(
test_data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS)
test_pre = test_data[['order_id']]
del test_data
gc.collect()
# MODEL_RPEDICT
print('*-' * 40, 'MODEL_RPEDICT')
test_pre = test_pre.rename(columns={'order_id': 'id'})
test_pred = model.predict(
[test_link_inputs, test_cross_inputs, test_deep_input, test_wide_input, test_inputs_slice],
batch_size=2048)
test_pre['test_predict'] = test_pred
# test_pre['test_predict'] = test_pre['test_predict'].round(0)
test_pre = test_pre.rename(columns={'test_predict': 'result'}) # 更改列名
test_pre = test_pre[['id', 'result']]
print(test_pre.head())
result_save_path = '../result_csv/submit_w_0730_5.csv'
print('*-' * 40, 'CSV_SAVE_PATH:', result_save_path)
test_pre.to_csv(result_save_path, index=0) # 保存
print('..........Finish')
t2 = time.time()
print("Total time spent: {:.4f}".format((t2-t1)/3600))
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,364
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/WD_128544/wd_model/process.py
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, LabelEncoder
from tqdm import tqdm
from pandarallel import pandarallel
from sklearn.model_selection import train_test_split
# import random
import gc
import ast
import os
import warnings
import joblib
warnings.filterwarnings('ignore')
pd.options.mode.chained_assignment = None
pandarallel.initialize()
def pandas_list_to_array(df):
"""
Input: DataFrame of shape (x, y), containing list of length l
Return: np.array of shape (x, l, y)
"""
return np.transpose(
np.array(df.values.tolist()),
(0, 2, 1)
)
def preprocess_inputs(df, cols: list):
return pandas_list_to_array(
df[cols]
)
def append_all_data(files_list, file_head_path):
"""
concat all the data
:param files_list: the name of data
:param file_head_path: the path of data
:return: DataFrame of data for all
"""
data_all_path = file_head_path + files_list[0]
data_all = pd.read_csv(data_all_path)
data_all = data_all.head(0)
try:
del data_all['Unnamed: 0']
except KeyError as e:
pass
# 循环添加全部数据
for i in files_list:
data_path = file_head_path + i
print("当前文件为:", data_path)
data = pd.read_csv(data_path)
try:
del data['Unnamed: 0']
except KeyError as e:
pass
data_all = data_all.append(data)
return data_all
def file_name(file_dir):
files_list = []
for root, dirs, files in os.walk(file_dir):
# print("success")
for name in files:
files_list.append(name)
return files_list
def load_data(making_data_dir, link_data_dir, cross_data_dir, head_link_dir,
win_order_data_dir, pre_arrival_sqe_dir, data_for_driver_xw, downstream_status_dir):
"""
loading three path of data, then merge them
:return: all data by order_level
"""
print('-------------LOAD DATA for mk_data----------------')
mk_list = file_name(making_data_dir)
mk_list.sort()
mk_data = append_all_data(mk_list, making_data_dir)
#mk_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_order_xt/join_20200825.csv') # for test running
mk_data['date_time'] = mk_data['date_time'].astype(str)
mk_data['dayofweek'] = pd.to_datetime(mk_data['date_time'])
mk_data['dayofweek'] = mk_data['dayofweek'].dt.dayofweek+1
weather_le = LabelEncoder()
mk_data['weather_le'] = weather_le.fit_transform(mk_data['weather'])
mk_data['driver_id'] = mk_data['driver_id'].astype(str)
"""
print('-------------LOAD DATA for driver_data----------------')
driver_list = file_name(data_for_driver_xw)
driver_list.sort()
driver_data = append_all_data(driver_list, data_for_driver_xw)
#driver_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/data_for_driver_xw/driver_20200825_head.txt')
driver_data = driver_data[['driver_id','date_time','entropy','hour_mean','workday_order','weekend_order']]
driver_data['date_time'] = driver_data['date_time'].astype(str)
driver_data['driver_id'] = driver_data['driver_id'].astype(str)
mk_data = mk_data.merge(driver_data, on=['driver_id', 'date_time'], how='left')
del driver_data
"""
"""
print('-------------LOAD DATA for downstream_status_for_order----------------')
ds_data_list = file_name(downstream_status_dir)
ds_data_list.sort()
ds_link_data = append_all_data(ds_data_list, downstream_status_dir)
#ds_link_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/downstream_status_for_order/ds_for_order_20200825.csv')
mk_data = mk_data.merge(ds_link_data, on=['order_id'], how='left')
del ds_link_data
"""
"""
print('-------------LOAD DATA for rate_status_for_order----------------')
#rate_data_list = file_name(rate_status_for_order)
#rate_data_list.sort()
#rate_data = append_all_data(rate_data_list, rate_status_for_order)
rate_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/rate_status_for_order/rate_for_order_20200825.csv')
mk_data = mk_data.merge(rate_data, on=['order_id'], how='left')
del rate_data
"""
print('Remove the wk2_ and m1_ and ratio')
del_cols = []
mk_cols = mk_data.columns.tolist()
for i in range(len(mk_cols)):
if 'wk2_' in mk_cols[i]:
del_cols.append(mk_cols[i])
if 'm1_' in mk_cols[i]:
del_cols.append(mk_cols[i])
if 'ratio' in mk_cols[i]:
del_cols.append(mk_cols[i])
del_cols = del_cols + ['date_time_mean','weather', 'driver_id', 'date_time_dt', 'link_time_sum','date_time_sum']
print('*-' * 40, 'Will be drop the list:', del_cols)
mk_data.drop(columns=del_cols, axis=1, inplace=True)
print('The init shape of mk_data:', mk_data.shape)
print('-------------LOAD WIN DATA----------------')
win_order_list = file_name(win_order_data_dir)
win_order_list.sort()
win_order_data = append_all_data(win_order_list, win_order_data_dir)
#win_order_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/win_order_xw/win_for_slice_20200825.csv') # for test running
del_win_order_cols = []
win_order_cols = win_order_data.columns.tolist()
for i in range(len(win_order_cols)):
if 'last_wk_lk_current' in win_order_cols[i]:
del_win_order_cols.append(win_order_cols[i])
#if 'distance' in win_order_cols[i]:
# del_win_order_cols.append(win_order_cols[i])
#if '1_percent' in win_order_cols[i]:
# del_win_order_cols.append(win_order_cols[i])
#if '0_percent' in win_order_cols[i]:
# del_win_order_cols.append(win_order_cols[i])
del_win_order_cols = del_win_order_cols + ['slice_id', 'date_time']
win_order_data.drop(columns=del_win_order_cols, axis=1, inplace=True)
print('win_order_data.shape',win_order_data.shape)
mk_data = pd.merge(mk_data, win_order_data, how='left', on='order_id')
print('mk_data.shape',mk_data.shape)
del win_order_data
gc.collect()
print('-------------LOAD HEAD DATA----------------')
head_list = file_name(head_link_dir)
head_list.sort()
head_data = append_all_data(head_list, head_link_dir)
#head_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/head_link_data_clear/head_link_20200825.csv') # for test running
get_head_cols = ['len_tmp','status_0','status_1','status_2','status_3','status_4','rate_0','rate_1','rate_2','rate_3','rate_4']
get_head_cols.insert(0, 'order_id')
print('head_data.shape:',head_data.shape)
head_data = head_data[get_head_cols]
print('mk_data.shape',mk_data.shape)
mk_data = pd.merge(mk_data, head_data, how='left', on='order_id')
print('mk_data.shape',mk_data.shape)
del head_data
gc.collect()
print('-------------LOAD DATA for link_data----------------')
link_list = file_name(link_data_dir)
link_list.sort()
link_data = append_all_data(link_list, link_data_dir)
#link_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_170_link_sqe_for_order/sqe_20200825_link.txt') # for test running
#del_link_cols = ['link_time_sub','link_time_sub_sum','link_time_sub_mean', 'link_time_sub_std','link_time_sub_skew']
#link_data.drop(del_link_cols, axis=1, inplace=True)
print('The init shape of link_data:', link_data.shape)
gc.collect()
print('-------------LOAD DATA for arrival_sqe_data----------------')
arrival_sqe_list = file_name(pre_arrival_sqe_dir)
arrival_sqe_list.sort()
arrival_sqe_data = append_all_data(arrival_sqe_list, pre_arrival_sqe_dir)
#arrival_sqe_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/sqe_arrival_for_link/20200825.csv') # for test running
del arrival_sqe_data['slice_id']
del arrival_sqe_data['pre_arrival_status']
del arrival_sqe_data['arrive_slice_id']
arrival_cols = arrival_sqe_data.columns.tolist()
new_arrival_cols = ['future_'+i for i in arrival_cols if i != 'order_id']
new_arrival_cols.insert(0, 'order_id')
arrival_sqe_data.columns = new_arrival_cols
print('The init shape of arrival_sqe_data:', arrival_sqe_data.shape)
link_data = pd.merge(link_data, arrival_sqe_data, how='left', on='order_id')
del arrival_sqe_data
gc.collect()
link_cols_list = ['link_id', 'link_time', 'link_current_status', 'pr','dc']
print('-------------LOAD DATA for cross_data----------------')
cross_list = file_name(cross_data_dir)
cross_list.sort()
cross_data = append_all_data(cross_list, cross_data_dir)
#cross_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/for_0714_cross_sqe_for_order/sqe_20200825_cross.txt') # for test running
del_cross_cols = ['cr_t_sub_by_min', 'cr_t_sub_by_q50', 'total_crosstime_std']
cross_data.drop(columns=del_cross_cols, axis=1, inplace=True)
print('The init shape of cross_data:', cross_data.shape)
cross_cols_list = ['cross_id', 'cross_time']
data = pd.merge(mk_data, link_data, how='left', on='order_id')
del mk_data
del link_data
gc.collect()
data = pd.merge(data, cross_data, how='left', on='order_id')
del cross_data
gc.collect()
# remove the class type and id and label, for deep inputs
mk_cols_list = data.columns.tolist()
remove_mk_cols = ['order_id', 'slice_id', 'hightemp', 'lowtemp', 'weather_le', 'dayofweek', 'date_time', 'ata']
mk_cols_list = list(set(mk_cols_list) - set(remove_mk_cols))
mk_cols_list = list(set(mk_cols_list) - set(link_cols_list))
mk_cols_list = list(set(mk_cols_list) - set(cross_cols_list))
print('lenght of mk_cols_list', len(mk_cols_list))
print('*-' * 40)
print('The finish shape of data is:', data.shape)
return data, mk_cols_list, link_cols_list, cross_cols_list
def processing_data(data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS, is_test=False):
"""
fix data, ast.literal_eval + StandardScaler + train_test_split
:return: train_data, val_data, test_data
"""
print('Now, Starting parallel_apply the link..................')
for i in tqdm(link_cols_list):
data[i] = data[i].parallel_apply(ast.literal_eval)
print('Now, Starting parallel_apply the cross..................')
for i in tqdm(cross_cols_list):
data[i] = data[i].parallel_apply(ast.literal_eval)
# data = data.fillna(0)
data.fillna(data.median(),inplace=True)
ss_cols = mk_cols_list + WIDE_COLS
# train, val
if is_test is True:
print('is_test is True')
ss = joblib.load('../model_h5/ss_scaler')
data[ss_cols] = ss.transform(data[ss_cols])
return data
else:
ss = StandardScaler()
ss.fit(data[ss_cols])
data[ss_cols] = ss.transform(data[ss_cols])
joblib.dump(ss, '../model_h5/ss_scaler')
print('is_test is False')
data['date_time'] = data['date_time'].astype(int)
print("type(data['date_time']):", data['date_time'].dtype)
# print('Here train_test_split..................')
# all_train_data, _ = train_test_split(all_train_data, test_size=0.9, random_state=42)
print('*-' * 40, 'The data.shape:', data.shape)
train_data, val_data = train_test_split(data, test_size=0.15, random_state=42)
train_data = train_data.reset_index()
val_data = val_data.reset_index()
del train_data['index']
del val_data['index']
return train_data, val_data
def processing_inputs(data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS):
"""
change the data for model
:return:
"""
if 'ata' in mk_cols_list:
print('The ata in the mk_cols_list')
if 'ata' in link_cols_list:
print('The ata in the link_cols_list')
if 'ata' in cross_cols_list:
print('The ata in the cross_cols_list')
if 'ata' in WIDE_COLS:
print('The ata in the WIDE_COLS')
#link_cols_list = ['link_id', 'link_time','link_id_count','pr','dc',
# 'top_a','link_current_status','link_ratio']
#cross_cols_list = ['cross_id', 'cross_time']
data_link_inputs = preprocess_inputs(data, cols=link_cols_list)
data_cross_inputs = preprocess_inputs(data, cols=cross_cols_list)
data_deep_input = data[mk_cols_list].values
data_wide_input = data[WIDE_COLS].values
data_inputs_slice = data['slice_id'].values
# print('--------------------------------test, ', min(data['slice_id'].values.tolist()))
data_labels = data['ata'].values
return data_link_inputs, data_cross_inputs, data_deep_input, data_wide_input, data_inputs_slice, data_labels
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,365
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/DCN蒸馏_12953/dcn_model/main.py
|
import pandas as pd
import numpy as np
import gc
import tensorflow as tf
import process
import dcn_model
import sys
import random
import os
from sklearn.preprocessing import StandardScaler
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
tf.random.set_seed(42)
os.environ["TF_CPP_MIN_LOG_LEVEL"]='3'
RANDOM_SEED = 42
# types of columns of the data_set DataFrame
CATEGORICAL_COLS = [
'weather_le', 'hightemp', 'lowtemp', 'dayofweek',
'slice_id', 'link_current_status_4'
]
NUMERIC_COLS = [
'distance', 'simple_eta', 'link_time_sum', 'link_count',
'cr_t_sum', 'link_current_status_4_percent', 'link_current_status_mean',
'pr_mean', 'dc_mean','lk_arrival_0_percent', 'lk_arrival_1_percent',
'lk_arrival_2_percent', 'lk_arrival_3_percent', 'lk_arrival_4_percent'
]
WIDE_COLS = [
'weather_le', 'hightemp', 'lowtemp', 'dayofweek'
]
IGNORE_COLS = [
'order_id', 'ata'
]
TRAINING = True
VAL_TO_TEST = False
def set_seed(seed=42):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
if __name__ == '__main__':
set_seed(RANDOM_SEED)
print(dcn_model.get_available_gpus()) # 返回格式为:['/device:GPU:0', '/device:GPU:1']
# LOAD DATA
print('*-' * 40, 'LOAD DATA')
making_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_order_xt/'
link_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_170_link_sqe_for_order/'
cross_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/for_0714_cross_sqe_for_order/'
link_data_other_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/for_0714_link_sqe_for_order_other/'
head_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_head_link_data_clear/'
win_order_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/win_order_xw/'
#pre_arrival_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/final_pre_arrival_data/'
arrival_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_link_sqe_for_order_arrival/'
zsl_arrival_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/zsl_arrival/'
arrival_sqe_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_170_lk_arrival_sqe_for_order/'
#h_s_for_link_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_hightmp_slice_for_link_eb/'
pre_arrival_sqe_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/sqe_arrival_for_link/'
zsl_link_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/zsl_train_link/'
data, mk_cols_list, link_cols_list, cross_cols_list = process.load_data(making_data_dir,
link_data_dir,
cross_data_dir,
link_data_other_dir,
head_data_dir,
win_order_data_dir,
pre_arrival_sqe_dir,
zsl_link_data_dir,
#pre_arrival_data_dir,
#h_s_for_link_dir,
arrival_data_dir,
zsl_arrival_data_dir,
arrival_sqe_data_dir)
#fd = dcn_model.FeatureDictionary(data, numeric_cols=NUMERIC_COLS, ignore_cols=IGNORE_COLS,
# cate_cols=CATEGORICAL_COLS)
# PROCESSING DATA
data['date_time'] = data['date_time'].astype(int)
print("type(data['date_time']):", data['date_time'].dtype)
data = data[data['date_time'] != 20200901]
print('Here train_test_split..................')
# all_train_data, _ = train_test_split(all_train_data, test_size=0.9, random_state=42)
data = data.reset_index()
del data['index']
print('*-' * 40, 'The data.shape:', data.shape)
train_data, val_data = train_test_split(data, test_size=0.15, random_state=RANDOM_SEED)
train_data = train_data.reset_index()
val_data = val_data.reset_index()
del train_data['index']
del val_data['index']
print('Save End.................')
fb_list = CATEGORICAL_COLS+NUMERIC_COLS+IGNORE_COLS
data_bak = data[fb_list]
del data
data = data_bak.copy()
del data_bak
gc.collect()
print('*-' * 40, 'PROCESSING DATA FOR TRAIN')
train_data = process.processing_data(train_data, link_cols_list, cross_cols_list, mk_cols_list, WIDE_COLS)
#del data
#fb_list = CATEGORICAL_COLS+NUMERIC_COLS+IGNORE_COLS
#data = data[fb_list]
#gc.collect()
# print(train_data.columns.tolist())
# PROCESSING INPUTS
print('*-' * 40, 'PROCESSING INPUTS')
# SAVE LIST
a = np.array(mk_cols_list)
np.save('../model_h5/mk_cols_list_0720_2.npy', a)
a = np.array(link_cols_list)
np.save('../model_h5/link_cols_list_0720_2.npy', a)
a = np.array(cross_cols_list)
np.save('../model_h5/cross_cols_list_0720_2.npy', cross_cols_list)
a = np.array(CATEGORICAL_COLS)
np.save('../model_h5/CATEGORICAL_COLS_0720_2.npy', a)
del a
pred_cols = ['ata']
print('*-' * 40, 'PROCESSING INPUTS FOR TRAIN_DATA', train_data.shape)
train_link_inputs, train_cross_inputs, train_deep_input, train_wide_input, \
train_inputs_slice, train_labels, train_arrival = process.processing_inputs(
train_data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS)
X_train = dcn_model.preprocess(train_data, CATEGORICAL_COLS, NUMERIC_COLS)
train_pre = train_data[['order_id']]
del train_data
gc.collect()
print('*-' * 40, 'PROCESSING DATA FOR TRAIN')
val_data = process.processing_data(val_data, link_cols_list, cross_cols_list, mk_cols_list, WIDE_COLS, is_test=True)
print('*-' * 40, 'PROCESSING INPUTS FOR VAL_DATA', val_data.shape)
val_link_inputs, val_cross_inputs, val_deep_input, val_wide_input, \
val_inputs_slice, val_labels, val_arrival = process.processing_inputs(
val_data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS)
X_val = dcn_model.preprocess(val_data, CATEGORICAL_COLS, NUMERIC_COLS)
# val_data.to_csv('../model_h5/val_data.csv', index=0) # saving csv for test running
val_pre = val_data[['order_id']]
del val_data
gc.collect()
# MODEL_INIT
print('*-' * 40, 'T_MODEL_INIT')
deep_col_len, wide_col_len = train_deep_input.values.shape[1], train_wide_input.shape[1]
link_size = 639877 + 2
cross_size = 44313 + 2
link_nf_size, cross_nf_size = train_link_inputs.shape[2], train_cross_inputs.shape[2]
slice_size = 288
# link_seqlen, cross_seqlen = 170, 12 # 已默认
print("link_size:{},link_nf_size:{},cross_size:{},cross_nf_size:{},slice_size:{}".format(link_size, link_nf_size,
cross_size, cross_nf_size,
slice_size))
print("deep_col_len:{}, wide_col_len:{}".format(deep_col_len, wide_col_len))
fd = dcn_model.FeatureDictionary(data, numeric_cols=NUMERIC_COLS, ignore_cols=IGNORE_COLS,
cate_cols=CATEGORICAL_COLS)
inp_layer, inp_embed = dcn_model.embedding_layers(fd)
autoencoder, encoder = dcn_model.create_autoencoder(train_deep_input.values.shape[-1], 1, noise=0.1)
if TRAINING:
autoencoder.fit(train_deep_input.values, (train_deep_input.values, train_labels.values),
epochs=1000, # 1000
batch_size=2048, # 1024
validation_split=0.1,
callbacks=[tf.keras.callbacks.EarlyStopping('val_ata_output_loss', patience=10, restore_best_weights=True)])
encoder.save_weights('../model_h5/t_encoder.hdf5')
else:
encoder.load_weights('../model_h5/t_encoder.hdf5')
encoder.trainable = False
del autoencoder
t_model = dcn_model.DCN_model(inp_layer, inp_embed, link_size, cross_size, slice_size, deep_col_len, wide_col_len,
link_nf_size, cross_nf_size, encoder, conv=True, have_knowledge=False)
#del encoder
gc.collect()
mc, es, lr = dcn_model.get_mc_es_lr('0720_2', patience=5, min_delta=1e-4)
print('*-' * 40, 'MODEL_INIT END')
print('*-' * 40, 'ARRIVAL_MODEL_FIT')
t_history = t_model.fit(
[
X_train['weather_le'], X_train['hightemp'], X_train['lowtemp'], X_train['dayofweek'],
X_train['slice_id'], X_train['link_current_status_4'],
X_train['distance'], X_train['simple_eta'], X_train['link_time_sum'], X_train['link_count'],
X_train['cr_t_sum'], X_train['link_current_status_4_percent'], X_train['link_current_status_mean'],
X_train['pr_mean'], X_train['dc_mean'],
X_train['lk_arrival_0_percent'], X_train['lk_arrival_1_percent'],X_train['lk_arrival_2_percent'],
X_train['lk_arrival_3_percent'],X_train['lk_arrival_4_percent'],
train_link_inputs, train_cross_inputs, train_deep_input.values, train_wide_input, train_inputs_slice],
train_labels.values,
validation_data=(
[
X_val['weather_le'], X_val['hightemp'], X_val['lowtemp'], X_val['dayofweek'],
X_val['slice_id'], X_val['link_current_status_4'],
X_val['distance'], X_val['simple_eta'], X_val['link_time_sum'], X_val['link_count'],
X_val['cr_t_sum'], X_val['link_current_status_4_percent'], X_val['link_current_status_mean'],
X_val['pr_mean'], X_val['dc_mean'],
X_val['lk_arrival_0_percent'], X_val['lk_arrival_1_percent'],X_val['lk_arrival_2_percent'],
X_val['lk_arrival_3_percent'],X_val['lk_arrival_4_percent'],
val_link_inputs, val_cross_inputs, val_deep_input.values, val_wide_input, val_inputs_slice],
(val_labels.values),),
batch_size=2048, # 2048,1024
epochs=100, # 100
verbose=1,
# )
callbacks=[es]) # lr
np.save('../model_h5/t_model_0720_2.npy', t_history.history)
t_model.save_weights("../model_h5/t_model_0720_2.h5")
print('*-' * 40, 't_MODEL_PREDICT')
y_knowledge_train = t_model.predict(
[X_train['weather_le'], X_train['hightemp'], X_train['lowtemp'], X_train['dayofweek'],
X_train['slice_id'], X_train['link_current_status_4'],
X_train['distance'], X_train['simple_eta'], X_train['link_time_sum'], X_train['link_count'],
X_train['cr_t_sum'], X_train['link_current_status_4_percent'], X_train['link_current_status_mean'],
X_train['pr_mean'], X_train['dc_mean'],
X_train['lk_arrival_0_percent'], X_train['lk_arrival_1_percent'],X_train['lk_arrival_2_percent'],
X_train['lk_arrival_3_percent'],X_train['lk_arrival_4_percent'],
train_link_inputs, train_cross_inputs, train_deep_input.values, train_wide_input, train_inputs_slice],
batch_size=2048)
y_knowledge_val = t_model.predict(
[
X_val['weather_le'], X_val['hightemp'], X_val['lowtemp'], X_val['dayofweek'],
X_val['slice_id'], X_val['link_current_status_4'],
X_val['distance'], X_val['simple_eta'], X_val['link_time_sum'], X_val['link_count'],
X_val['cr_t_sum'], X_val['link_current_status_4_percent'], X_val['link_current_status_mean'],
X_val['pr_mean'], X_val['dc_mean'],
X_val['lk_arrival_0_percent'], X_val['lk_arrival_1_percent'],X_val['lk_arrival_2_percent'],
X_val['lk_arrival_3_percent'],X_val['lk_arrival_4_percent'],
val_link_inputs, val_cross_inputs, val_deep_input.values, val_wide_input, val_inputs_slice],
batch_size=2048)
print('*-'*40, 'TRAINFORME')
train_labels = pd.DataFrame(train_labels)
train_labels['y_knowledge_train'] = np.squeeze(y_knowledge_train)
print(np.squeeze(y_knowledge_train)[:2])
print(train_labels['y_knowledge_train'].head(2))
val_labels = pd.DataFrame(val_labels)
val_labels['y_knowledge_val'] = np.squeeze(y_knowledge_val)
print('*-' * 40, 't_MODEL_END')
zsl_arrival_cols = ['zsl_link_arrival_status_mean','zsl_link_arrival_status_nunique','zsl_link_arrival_status0','zsl_link_arrival_status1','zsl_link_arrival_status2','zsl_link_arrival_status3']
train_deep_input = train_deep_input.drop(['lk_arrival_0_percent','lk_arrival_1_percent','lk_arrival_2_percent','lk_arrival_3_percent','lk_arrival_4_percent'],axis=1)
train_deep_input = train_deep_input.drop(zsl_arrival_cols, axis=1)
val_deep_input = val_deep_input.drop(['lk_arrival_0_percent','lk_arrival_1_percent','lk_arrival_2_percent','lk_arrival_3_percent','lk_arrival_4_percent'],axis=1)
val_deep_input = val_deep_input.drop(zsl_arrival_cols, axis=1)
if 'ata' in train_deep_input.columns.tolist():
print('The ata in the train_deep_input')
print('*-' * 40, 'EXIT')
sys.exit(0)
if 'lk_arrival_0_percent' in train_deep_input.columns.tolist():
print('The lk_arrival_0_percent in the train_deep_input')
print('*-' * 40, 'EXIT')
sys.exit(0)
if 'lk_arrival_0_percent' in val_deep_input.columns.tolist():
print('The lk_arrival_0_percent in the val_deep_input')
print('*-' * 40, 'EXIT')
sys.exit(0)
if 'zsl_link_arrival_status_mean' in train_deep_input.columns.tolist():
print('The zsl_link_arrival_status_mean in the train_deep_input')
print('*-' * 40, 'EXIT')
sys.exit(0)
mk_cols_list = train_deep_input.columns.tolist()
print('*-' * 40, 'MODEL_FIT')
deep_col_len, wide_col_len = train_deep_input.values.shape[1], train_wide_input.shape[1]
print("deep_col_len:{}, wide_col_len:{}".format(deep_col_len, wide_col_len))
NUMERIC_COLS = list(set(NUMERIC_COLS)-set(['lk_arrival_0_percent','lk_arrival_1_percent','lk_arrival_2_percent',
'lk_arrival_3_percent','lk_arrival_4_percent']))
fb_list = CATEGORICAL_COLS+NUMERIC_COLS+IGNORE_COLS
if 'lk_arrival_0_percent' in fb_list:
print('The lk_arrival_0_percent in the fb_list')
print('*-' * 40, 'EXIT')
sys.exit(0)
data = data[fb_list]
fd = dcn_model.FeatureDictionary(data, numeric_cols=NUMERIC_COLS, ignore_cols=IGNORE_COLS,
cate_cols=CATEGORICAL_COLS)
inp_layer, inp_embed = dcn_model.embedding_layers(fd)
autoencoder, encoder = dcn_model.create_autoencoder(train_deep_input.values.shape[-1], 1, noise=0.1)
if TRAINING:
autoencoder.fit(train_deep_input.values, (train_deep_input.values, train_labels['ata'].values),
epochs=1000, # 1000
batch_size=2048, # 1024
validation_split=0.1,
callbacks=[tf.keras.callbacks.EarlyStopping('val_ata_output_loss', patience=10, restore_best_weights=True)])
encoder.save_weights('../model_h5/main_encoder.hdf5')
else:
encoder.load_weights('../model_h5/main_encoder.hdf5')
encoder.trainable = False
del autoencoder
#print(type(train_labels['y_knowledge_train']))
#print(type(train_labels))
#y_train = np.vstack((train_labels, train_pre['y_knowledge_train'])).T
#y_valid = np.vstack((val_labels, val_pre['y_knowledge_val'])).T
#print(train_labels.shape)
print(train_labels.head(1))
print(train_labels.values[0])
print('*-'*40, 'The shape of train_link_inputs before', train_link_inputs.shape)
train_link_inputs = np.concatenate((train_link_inputs[:, :, :5], train_link_inputs[:, :, 6:]), axis=2)
print('*-'*40, 'The shape of train_link_inputs after', train_link_inputs.shape)
val_link_inputs = np.concatenate((val_link_inputs[:, :, :5], val_link_inputs[:, :, 6:]), axis=2)
link_nf_size, cross_nf_size = train_link_inputs.shape[2], train_cross_inputs.shape[2]
mc, es, lr = dcn_model.get_mc_es_lr_for_student('0720_2', patience=5, min_delta=1e-4)
model = dcn_model.DCN_model(inp_layer, inp_embed, link_size, cross_size, slice_size, deep_col_len, wide_col_len,
link_nf_size, cross_nf_size, encoder, conv=True)
history = model.fit(
[
X_train['weather_le'], X_train['hightemp'], X_train['lowtemp'], X_train['dayofweek'],
X_train['slice_id'], X_train['link_current_status_4'],
X_train['distance'], X_train['simple_eta'], X_train['link_time_sum'], X_train['link_count'],
X_train['cr_t_sum'], X_train['link_current_status_4_percent'], X_train['link_current_status_mean'],
X_train['pr_mean'], X_train['dc_mean'],
train_link_inputs, train_cross_inputs, train_deep_input.values, train_wide_input, train_inputs_slice],
train_labels.values,
validation_data=(
[
X_val['weather_le'], X_val['hightemp'], X_val['lowtemp'], X_val['dayofweek'],
X_val['slice_id'], X_val['link_current_status_4'],
X_val['distance'], X_val['simple_eta'], X_val['link_time_sum'], X_val['link_count'],
X_val['cr_t_sum'], X_val['link_current_status_4_percent'], X_val['link_current_status_mean'],
X_val['pr_mean'], X_val['dc_mean'],
val_link_inputs, val_cross_inputs, val_deep_input.values, val_wide_input, val_inputs_slice],
(val_labels.values),),
batch_size=2048, # 2048,1024
epochs=100, # 100
verbose=1,
# )
callbacks=[es]) # lr
np.save('../model_h5/history_0720_2.npy', history.history)
model.save_weights("../model_h5/dcn_model_0720_2.h5")
# MODEL_RPEDICT
if VAL_TO_TEST:
print('*-'*40,'val_to_test')
val_pre = val_pre.rename(columns={'order_id': 'id'})
print(val_link_inputs.shape, val_cross_inputs.shape, X_val.shape)
print('*-' * 40, 'MODEL_RPEDICT')
val_pred = model.predict(
[
X_val['weather_le'], X_val['hightemp'], X_val['lowtemp'], X_val['dayofweek'],
X_val['slice_id'], X_val['link_current_status_4'],
X_val['distance'], X_val['simple_eta'], X_val['link_time_sum'], X_val['link_count'],
X_val['cr_t_sum'], X_val['link_current_status_4_percent'], X_val['link_current_status_mean'],
X_val['pr_mean'], X_val['dc_mean'],
val_link_inputs, val_cross_inputs, val_deep_input.values, val_wide_input, val_inputs_slice],
batch_size=2048)
val_pre['val_predict'] = np.squeeze(val_pred[:, 1])
val_pre['other_predict'] = np.squeeze(val_pred[:, 0])
# val_pre['val_predict'] = val_pre['val_predict'].round(0)
val_pre = val_pre.rename(columns={'val_predict': 'result'}) # 更改列名
val_pre = val_pre[['id', 'result', 'other_predict']]
val_pre['ata'] = val_labels['ata'].values
print(val_pre.head())
result_save_path = '../result_csv/val_0720_2.csv'
print('*-' * 40, 'CSV_SAVE_PATH:', result_save_path)
print('..........Finish')
del X_train, train_link_inputs, train_cross_inputs, train_deep_input, \
train_wide_input, train_inputs_slice, train_labels
del X_val, val_link_inputs, val_cross_inputs, val_deep_input, val_wide_input, val_inputs_slice, val_labels
gc.collect()
#print('*-' * 40, 'EXIT')
#sys.exit(0)
print('*-' * 40, 'LOAD TEST DATA')
making_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/order_xt/'
link_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/max_170_link_sqe_for_order/'
cross_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/cross_sqe_for_order/'
link_test_data_other_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/link_sqe_for_order_other/'
head_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/head_link_data_clear/'
win_order_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/win_order_xw/'
pre_arrival_sqe_test_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/sqe_arrival_for_link/'
#h_s_for_test_link_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/max_hightmp_slice_for_link_eb/'
#pre_arrival_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/final_pre_arrival_data/'
zsl_link_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/zsl_test_link/'
#zsl_cross_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/zsl_test_cross_0703/'
test_data, _, _, _ = process.load_data(making_test_data_dir,
link_test_data_dir,
cross_test_data_dir,
link_test_data_other_dir,
head_test_data_dir,
win_order_test_data_dir,
pre_arrival_sqe_test_dir,
zsl_link_test_data_dir) #,
#h_s_for_test_link_dir)
#pre_arrival_test_data_dir)
print('*-' * 40, 'PROCESSING DATA')
link_cols_list.remove('link_arrival_status')
test_data = process.processing_data(test_data, link_cols_list, cross_cols_list, mk_cols_list, WIDE_COLS, is_test=True)
gc.collect()
print('*-' * 40, 'PROCESSING INPUTS FOR TEST_DATA', test_data.shape)
test_link_inputs, test_cross_inputs, test_deep_input, test_wide_input, \
test_inputs_slice, _ = process.processing_inputs(
test_data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS, arrival=False)
X_test = dcn_model.preprocess(test_data, CATEGORICAL_COLS, NUMERIC_COLS)
test_pre = test_data[['order_id']]
test_arrival_pre = test_data[['order_id']]
gc.collect()
test_pre = test_pre.rename(columns={'order_id': 'id'})
print(test_link_inputs.shape, test_cross_inputs.shape, X_test.shape, test_deep_input.shape)
print('*-' * 40, 'MODEL_RPEDICT')
test_pred = model.predict(
[
X_test['weather_le'], X_test['hightemp'], X_test['lowtemp'], X_test['dayofweek'],
X_test['slice_id'], X_test['link_current_status_4'],
X_test['distance'], X_test['simple_eta'], X_test['link_time_sum'], X_test['link_count'],
X_test['cr_t_sum'], X_test['link_current_status_4_percent'], X_test['link_current_status_mean'],
X_test['pr_mean'], X_test['dc_mean'],
test_link_inputs, test_cross_inputs, test_deep_input.values, test_wide_input, test_inputs_slice],
batch_size=2048)
test_pre['test_predict'] = np.squeeze(test_pred[:, 1])
test_pre['other_predict'] = np.squeeze(test_pred[:, 0])
# test_pre['test_predict'] = test_pre['test_predict'].round(0)
test_pre = test_pre.rename(columns={'test_predict': 'result'}) # 更改列名
test_pre = test_pre[['id', 'result','other_predict']]
print(test_pre.head())
result_save_path = '../result_csv/submit_0720_2.csv'
print('*-' * 40, 'CSV_SAVE_PATH:', result_save_path)
test_pre.to_csv(result_save_path, index=0) # 保存
print('..........Finish')
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,366
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/utils.py
|
#coding=utf-8
"""
Author: Aigege
Code: https://github.com/AiIsBetter
"""
# date 2021.08.01
import multiprocessing as mp
import numpy as np
import pandas as pd
from tqdm import tqdm
from scipy.stats import kurtosis, iqr, skew
import gc
from sklearn.model_selection import KFold
from sklearn.linear_model import Ridge
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def chunk_groups(groupby_object, chunk_size):
n_groups = groupby_object.ngroups
group_chunk, index_chunk = [], []
for i, (index, df) in enumerate(groupby_object):
group_chunk.append(df)
index_chunk.append(index)
if (i + 1) % chunk_size == 0 or i + 1 == n_groups:
group_chunk_, index_chunk_ = group_chunk.copy(), index_chunk.copy()
group_chunk, index_chunk = [], []
yield index_chunk_, group_chunk_
def parallel_apply(groups, func, index_name='Index', num_workers=1, chunk_size=100000):
n_chunks = np.ceil(1.0 * groups.ngroups / chunk_size)
indeces, features = [], []
for index_chunk, groups_chunk in tqdm(chunk_groups(groups, chunk_size), total=n_chunks):
with mp.pool.Pool(num_workers) as executor:
features_chunk = executor.map(func, groups_chunk)
for i in features_chunk:
features.append(i)
return features
def parallel_apply_fea(groups, func, index_name='Index', num_workers=1, chunk_size=100000):
n_chunks = np.ceil(1.0 * groups.ngroups / chunk_size)
indeces, features = [], []
for index_chunk, groups_chunk in chunk_groups(groups, chunk_size):
with mp.pool.Pool(num_workers) as executor:
features_chunk = executor.map(func, groups_chunk)
features.extend(features_chunk)
indeces.extend(index_chunk)
features = pd.DataFrame(features)
features.index = indeces
features.index.name = index_name
return features
def add_features_in_group(features, gr_, feature_name, aggs, prefix):
for agg in aggs:
if agg == 'sum':
features['{}{}_sum'.format(prefix, feature_name)] = gr_[feature_name].sum()
elif agg == 'mean':
features['{}{}_mean'.format(prefix, feature_name)] = gr_[feature_name].mean()
elif agg == 'max':
features['{}{}_max'.format(prefix, feature_name)] = gr_[feature_name].max()
elif agg == 'min':
features['{}{}_min'.format(prefix, feature_name)] = gr_[feature_name].min()
elif agg == 'std':
features['{}{}_std'.format(prefix, feature_name)] = gr_[feature_name].std()
elif agg == 'count':
features['{}{}_count'.format(prefix, feature_name)] = gr_[feature_name].count()
elif agg == 'skew':
features['{}{}_skew'.format(prefix, feature_name)] = skew(gr_[feature_name])
elif agg == 'kurt':
features['{}{}_kurt'.format(prefix, feature_name)] = kurtosis(gr_[feature_name])
elif agg == 'iqr':
features['{}{}_iqr'.format(prefix, feature_name)] = iqr(gr_[feature_name])
elif agg == 'median':
features['{}{}_median'.format(prefix, feature_name)] = gr_[feature_name].median()
elif agg == 'nunique':
features['{}{}_nunique'.format(prefix, feature_name)] = gr_[feature_name].nunique()
return features
def reduce_mem_usage(df):
# print('reduce_mem_usage_parallel start!')
# chunk_size = df.columns.shape[0]
# start_mem = df.memory_usage().sum() / 1024 ** 2
# print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
# end_mem = df.memory_usage().sum() / 1024 ** 2
# print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
# print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def reduce_mem_usage_parallel(df_original,num_worker):
print('reduce_mem_usage_parallel start!')
# chunk_size = df_original.columns.shape[0]
start_mem = df_original.memory_usage().sum() / 1024 ** 2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
if df_original.columns.shape[0]>500:
group_chunk = []
for name in df_original.columns:
group_chunk.append(df_original[[name]])
with mp.Pool(num_worker) as executor:
df_temp = executor.map(reduce_mem_usage,group_chunk)
del group_chunk
gc.collect()
df_original = pd.concat(df_temp,axis = 1)
end_mem = df_original.memory_usage().sum() / 1024 ** 2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
del df_temp
gc.collect()
else:
df_original = reduce_mem_usage(df_original)
end_mem = df_original.memory_usage().sum() / 1024 ** 2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df_original
# 评估指标
def MAPE(true, pred):
diff = np.abs(np.array(pred) - np.array(true))
return np.mean(diff / true)
# 自定义lgb评估指标
def lgb_score_mape(train_data,preds):
labels = train_data
diff = np.abs(np.array(preds) - np.array(labels))
result = np.mean(diff / labels)
return 'mape',result, False
def ridge_feature_select(X_train, y_train, num_folds):
print("Starting feature select. Train shape: {}".format(X_train.shape))
skf = KFold(n_splits=num_folds, shuffle=True, random_state=2021)
feature_importance_df = pd.DataFrame()
oof_preds = np.zeros(X_train.shape[0])
k_fold_mape = []
for i, (trn_idx, val_idx) in enumerate(skf.split(X_train, y_train)):
clf = Ridge(alpha=1)
clf.fit(X_train.iloc[trn_idx].fillna(0), y_train.iloc[trn_idx])
oof_preds[val_idx] = clf.predict(X_train.iloc[val_idx].fillna(0))
k_fold_mape.append(MAPE(y_train.iloc[val_idx], oof_preds[val_idx]))
# print("kfold_{}_mape_score:{} ".format(i, k_fold_mape[i]))
full_mape = MAPE(y_train, oof_preds)
print("full_mape_score:{} ".format(full_mape))
return k_fold_mape,full_mape
def feature_select(X_train,y_train):
feature_importance_df_ = pd.read_csv('feature_importances.csv')
cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False).index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
best_features = best_features.groupby('feature',as_index = False)['importance'].mean()
best_features = best_features.sort_values(by = 'importance',ascending=False)
data=best_features.sort_values(by="importance", ascending=False)
feature_select = list(data['feature'].values)
feature_array = []
full_mape_all = 0
count = 0
for fea in feature_select:
print(count)
count = count + 1
feature_array.append(fea)
df_select = X_train[feature_array]
k_fold_mape, full_mape = ridge_feature_select(df_select,y_train, num_folds=5)
if count == 1:
full_mape_all = full_mape
file = open('feature_select_name.txt', 'a')
file.write(fea + '\n')
file.close()
file = open('feature_select_fullauc.txt', 'a')
file.write(str(full_mape_all) + '\n')
file.close()
file = open('feature_select_kfoldauc.txt', 'a')
file.write(str(k_fold_mape) + '\n')
file.close()
del df_select
gc.collect()
continue
if full_mape_all <= full_mape:
feature_array.remove(fea)
else:
full_mape_all = full_mape
file = open('feature_select_name.txt', 'a')
file.write(fea + '\n')
file.close()
file = open('feature_select_fullauc.txt', 'a')
file.write(str(full_mape_all) + '\n')
file.close()
file = open('feature_select_kfoldauc.txt', 'a')
file.write(str(k_fold_mape) + '\n')
file.close()
del df_select
gc.collect()
a = 1
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,367
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/3_link_fea_order_id_level.py
|
#coding=utf-8
"""
Author: Aigege
Code: https://github.com/AiIsBetter
"""
# date 2021.08.01
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.feature_extraction.text import CountVectorizer
import networkx as nx
import os
import gc
import warnings
from utils import parallel_apply_fea,add_features_in_group
from functools import partial
warnings.filterwarnings("ignore")
def last_k_link_time_interval(gr, periods):
gr_ = gr.copy()
gr_ = gr_.iloc[::-1]
gr_['t_i_v'] = gr_['link_time'].diff()
gr_['t_i_v'] = gr_['t_i_v']
gr_['t_i_v'] = gr_['t_i_v'].fillna(0)
gr_['c_s_v'] = gr_['link_current_status'].diff()
gr_['c_s_v'] = gr_['c_s_v']
gr_['c_s_v'] = gr_['c_s_v'].fillna(0)
gr_ = gr_.drop_duplicates().reset_index(drop = True)
# link time变化
features = {}
for period in periods:
if period > 10e5:
period_name = 'zsl_link_time_interval_all'
gr_period = gr_.copy()
else:
period_name = 'zsl_link_time_interval_last_{}_'.format(period)
gr_period = gr_.iloc[:period]
features = add_features_in_group(features, gr_period, 't_i_v',
['mean','max', 'min', 'std','skew','sum'],
# ['diff'],
period_name)
# current status变化
for period in periods:
if period > 10e5:
period_name = 'zsl_link_current_status_interval_all'
gr_period = gr_.copy()
else:
period_name = 'zsl_link_current_status_interval_last_{}_'.format(period)
gr_period = gr_.iloc[:period]
features = add_features_in_group(features, gr_period, 'c_s_v',
['mean', 'std', 'skew'],
# ['diff'],
period_name)
return features
# last k link id time trend
def last_link_time_features(gr,periods):
gr_ = gr.copy()
gr_ = gr_.iloc[::-1]
features = {}
for period in periods:
if period > 10e5:
period_name = 'zsl_all_'
gr_period = gr_.copy()
else:
period_name = 'zsl_last_{}_'.format(period)
gr_period = gr_.iloc[:period]
features = add_features_in_group(features, gr_period, 'link_time',
['max', 'sum', 'mean','min','skew','std'],
period_name)
features = add_features_in_group(features, gr_period, 'link_current_status',
['mean', 'nunique'],
period_name)
return features
# last k link id time trend
def trend_in_last_k_link_id_time(gr, periods):
gr_ = gr.copy()
gr_ = gr_.iloc[::-1]
features = {}
for period in periods:
gr_period = gr_.iloc[:period]
features = add_trend_feature(features, gr_period,
'link_time', 'zsl_{}_period_trend_'.format(period)
)
return features
# trend feature
def add_trend_feature(features, gr, feature_name, prefix):
y = gr[feature_name].values
try:
x = np.arange(0, len(y)).reshape(-1, 1)
lr = LinearRegression()
lr.fit(x, y)
trend = lr.coef_[0]
except:
trend = np.nan
features['{}{}'.format(prefix, feature_name)] = trend
return features
def slice_id_change(x):
hour = x * 5 / 60
hour = np.floor(hour)
hour += 8
if hour >= 24:
hour = hour - 24
return hour
if __name__ == '__main__':
nrows = None
root_path = '../data/giscup_2021/'
read_idkey = np.load(root_path + 'id_key_to_connected_allday.npy', allow_pickle=True).item()
read_grapheb = np.load(root_path + 'graph_embeddings_retp1_directed.npy', allow_pickle=True).item()
read_grapheb_retp = np.load(root_path + 'graph_embeddings_retp05_directed.npy', allow_pickle=True).item()
for i in read_grapheb:
read_grapheb[i] = list(read_grapheb[i]) + list(read_grapheb_retp[i])
del read_grapheb_retp
head_columns = ['order_id', 'ata', 'distance', 'simple_eta', 'driver_id','slice_id']
embedding_k = 256
fill_list = [0] * embedding_k
#######################################nextlinks #######################################
nextlinks = pd.read_csv(root_path+'nextlinks.txt', sep=' ', header=None)
nextlinks.columns=['from_id', 'to_id']
nextlinks['to_id'] = nextlinks['to_id'].astype('str')
nextlinks['to_id'] = nextlinks['to_id'].apply(lambda x: x.split(","))
nextlinks = pd.DataFrame({'from_id':nextlinks.from_id.repeat(nextlinks.to_id.str.len()),
'to_id':np.concatenate(nextlinks.to_id.values)})
from_id_weight = nextlinks['from_id'].value_counts()
from_id_weight = from_id_weight.to_frame()
from_id_weight['index'] = from_id_weight.index
from_id_weight.columns=['weight', 'from_id']
nextlinks = pd.merge(nextlinks,from_id_weight, 'left', on=['from_id'])
nextlinks = nextlinks.sort_values(by='weight',ascending=False)
G = nx.DiGraph()
from_id = nextlinks['from_id'].astype(str).to_list()
to_id = nextlinks['to_id'].to_list()
weight = nextlinks['weight'].to_list()
edge_tuple = list(zip(from_id, to_id,weight))
print('adding')
G.add_weighted_edges_from(edge_tuple)
dc = nx.algorithms.centrality.degree_centrality(G)
dc = sorted(dc.items(), key=lambda d: d[1],reverse=True)
dc = dc[:50000]
dc = [str(i[0]) for i in dc ]
#######################################link #######################################
for name in os.listdir(root_path+'train/'):
data_time = name.split('.')[0]
if data_time=='20200803':
continue
train = pd.read_csv(root_path+'train/{}'.format(name),sep= ';;',header=None,nrows=nrows)
print("开始处理", data_time)
train_head = pd.DataFrame(train[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])
train_head['order_id'] = train_head['order_id'].astype(str)
train_head['ata'] = train_head['ata'].astype(float)
train_head['distance'] = train_head['distance'].astype(float)
train_head['simple_eta'] = train_head['simple_eta'].astype(float)
train_head['driver_id'] = train_head['driver_id'].astype(int)
train_head['slice_id'] = train_head['slice_id'].astype(int)
#link preprocess
data_link = train[[1]]
data_link['index'] = train_head.index
data_link['order_id'] = train_head['order_id']
data_link['ata'] = train_head['ata']
data_link['distance'] = train_head['distance']
data_link['simple_eta'] = train_head['simple_eta']
data_link['slice_id'] = train_head['slice_id']
# data_link['slice_id'] = data_link['slice_id'].apply(slice_id_change)
gc.collect()
data_link_split = data_link[1].str.split(' ', expand=True).stack().to_frame()
data_link_split = data_link_split.reset_index(level=1, drop=True).rename(columns={0: 'link_info'})
# data_link_split = data_link_split.reset_index(drop=True)
data_link_split = data_link[['order_id', 'index', 'ata', 'distance', 'simple_eta', 'slice_id']].join(
data_link_split)
data_link_split = data_link_split.reset_index(drop=True)
data_link_split[['link_id',
'link_time',
'link_ratio',
'link_current_status',
'link_arrival_status']] = data_link_split['link_info'].str.split(':|,', 5, expand=True)
data_link_split = data_link_split.drop(['link_info'], axis=1)
data_link_split['link_ratio'] = data_link_split['link_ratio'].astype(float)
data_link_split['link_time'] = data_link_split['link_time'].astype(float)
data_link_split['link_current_status'] = data_link_split['link_current_status'].astype(int)
print('preprocess finish!')
print('start feature engineering')
feature = train_head[['order_id', 'distance']]
###################static fea#############################################
#######################order link id count###############################
df = data_link_split.groupby('order_id', as_index=False)
tmp_linkid_agg = df['link_id'].agg({'zsl_order_link_id_count': 'count'})
tmp_linkid_agg['zsl_order_link_id_count_bins'] = 0
tmp_linkid_agg.loc[(tmp_linkid_agg['zsl_order_link_id_count']>=75)&(tmp_linkid_agg['zsl_order_link_id_count']<100),'zsl_order_link_id_count_bins']=1
tmp_linkid_agg.loc[(tmp_linkid_agg['zsl_order_link_id_count']>=100)&(tmp_linkid_agg['zsl_order_link_id_count']<120),'zsl_order_link_id_count_bins']=2
tmp_linkid_agg.loc[(tmp_linkid_agg['zsl_order_link_id_count']>=120),'zsl_order_link_id_count_bins']=3
feature = feature.merge(tmp_linkid_agg,on='order_id',how='left')
print('order link id count finish!')
#######################order link id & distance###############################
feature['zsl_order_is_highspeed'] = 0
feature.loc[(feature['distance']>90000)&(feature['zsl_order_link_id_count']<300),'zsl_order_is_highspeed'] = 1
print('order link id & distance finish!')
#######################order link id & nextlinks centry###############################
tmp = data_link_split[data_link_split['link_id'].isin(dc)]
tmp = tmp.groupby('order_id', as_index=False)
tmp_linkid_centry_count = tmp['link_id'].agg({'zsl_order_link_id_centry_count': 'count'})
feature = feature.merge(tmp_linkid_centry_count,on='order_id',how='left')
feature['zsl_order_link_id_centry_count'] = feature['zsl_order_link_id_centry_count'].fillna(0)
print('order link id & nextlinks centry finish!')
#######################order link time sum mean max min var std###############################
tmp_linktime_agg = df['link_time'].agg({'zsl_order_link_time_sum': 'sum','zsl_order_link_time_mean': 'mean',
'zsl_order_link_time_max': 'max','zsl_order_link_time_min': 'min',
'zsl_order_link_time_var': 'var','zsl_order_link_time_skew': 'skew'})
feature = feature.merge(tmp_linktime_agg,on='order_id',how='left')
print('order link time sum mean max min var std finish!')
#######################order link current status mean nunique###############################
tmp_linktime_agg = df['link_current_status'].agg({'zsl_link_current_status_mean': 'mean', 'zsl_link_current_status_nunique': 'nunique'})
feature = feature.merge(tmp_linktime_agg, on='order_id', how='left')
print('order link current status mean nunique finish!')
#######################order link current status count vector###############################
data_link_split['link_current_status'] = data_link_split['link_current_status'].astype(str)
data_link_split.loc[data_link_split['link_current_status'].astype(int)<0,'link_current_status'] = '0'
data_link_split.loc[data_link_split['link_current_status'].astype(int)>3,'link_current_status'] = '3'
data = data_link_split.groupby('order_id')['link_current_status'].apply(lambda x: x.str.cat(sep=',')).reset_index()
cv_encode = CountVectorizer(token_pattern=u'(?u)\\b\\w+\\b')
train_x = cv_encode.fit_transform(data['link_current_status'])
train_x = train_x.toarray()
link_current_status = pd.DataFrame(train_x, columns=['zsl_link_current_status0', 'zsl_link_current_status1', 'zsl_link_current_status2',
'zsl_link_current_status3'])
data = pd.concat([data[['order_id']],link_current_status],axis=1)
feature = feature.merge(data, on='order_id', how='left')
print('order link current status count vector finish!')
#######################order distance/link_id_count###############################
feature['zsl_distance_div_link_id_count'] = feature['distance']*10/feature['zsl_order_link_id_count']
feature = feature.drop('distance', axis=1)
print('order distance div link_id_count finish!')
#######################order link ratio sum mean max min var std###############################
tmp_linkratio_agg = df['link_ratio'].agg({'zsl_order_link_ratio_sum': 'sum', 'zsl_order_link_ratio_mean': 'mean',
'zsl_order_link_ratio_min': 'min',
'zsl_order_link_ratio_var': 'var', 'zsl_order_link_ratio_skew': 'skew'})
feature = feature.merge(tmp_linkratio_agg, on='order_id', how='left')
print('order link ratio sum mean max min var std finish!')
#######################weather###################################################################
weather = pd.read_csv(root_path+'weather.csv')
weather_dict={'rainstorm':0,'heavy rain':1,'moderate rain':2,'cloudy':3,
'showers':4}
weather['weather'] = weather['weather'].map(weather_dict)
weather['date'] = weather['date'].astype(str)
weather=weather[weather['date']==data_time]
feature['weather'] = weather['weather'].values[0]
feature['hightemp'] = weather['hightemp'].values[0]
feature['lowtemp'] = weather['lowtemp'].values[0]
print('weather finish!')
###################trend fea#############################################
###################trend link time#####################################
data_link_split['link_current_status'] = data_link_split['link_current_status'].astype(int)
groupby = data_link_split.groupby(['order_id'])
func = partial(trend_in_last_k_link_id_time, periods=[2, 5, 7, 10, 15, 20, 30, 50, 80, 100, 100000000])
g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=20, chunk_size=10000)
feature = feature.merge(g, on='order_id', how='left')
func = partial(last_link_time_features, periods=[2, 5, 7, 10, 15, 20, 30, 50, 80, 100, 100000000])
g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=20, chunk_size=10000)
feature = feature.merge(g, on='order_id', how='left')
func = partial(last_k_link_time_interval, periods=[2, 5, 7, 10, 15, 20, 30, 50, 80, 100, 100000000])
g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=20, chunk_size=10000)
feature = feature.merge(g, on='order_id', how='left')
print('trend link time finish!')
####################nextlinks graph embedding#######################
data_link_split['link_id'] = data_link_split['link_id'].astype(int)
data_link_split['link_id'] = data_link_split['link_id'].map(read_idkey)
data_link_split['link_id'] = data_link_split['link_id'].fillna(0)
data_link_split['link_id'] = data_link_split['link_id'].astype(int)
data_link_split['link_id'] = data_link_split['link_id'].map(read_grapheb)
data_link_split['link_id'] = data_link_split['link_id'].fillna('0')
def replace_list(x):
if isinstance(x, str):
x = fill_list
return x
data_link_split['link_id'] = data_link_split['link_id'].apply(replace_list)
link_id_col = ['zsl_link_id_eb{}'.format(i) for i in range(embedding_k)]
agg_col = dict(zip(link_id_col, ['mean'] * len(link_id_col)))
link_id_array = np.array(data_link_split.pop('link_id').to_list())
link_id_array = pd.DataFrame(link_id_array, columns=agg_col, dtype=np.float16)
data_link_split = pd.concat([data_link_split, link_id_array], axis=1)
tmp = data_link_split.groupby('order_id', as_index=False)
tmp_linkid_agg = tmp.agg(agg_col)
feature = feature.merge(tmp_linkid_agg, on='order_id', how='left')
feature.to_csv(root_path + 'feature/train/link_fea_order_id_level_{}.csv'.format(data_time), index=False)
del train
gc.collect()
test = pd.read_csv(root_path+'20200901_test.txt',sep= ';;',header=None,nrows=nrows)
test_head = pd.DataFrame(test[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])
test_head['order_id'] = test_head['order_id'].astype(str)
test_head['ata'] = test_head['ata'].astype(float)
test_head['distance'] = test_head['distance'].astype(float)
test_head['simple_eta'] = test_head['simple_eta'].astype(float)
test_head['driver_id'] = test_head['driver_id'].astype(int)
test_head['slice_id'] = test_head['slice_id'].astype(int)
# link preprocess
data_link = test[[1]]
data_link['index'] = test_head.index
data_link['order_id'] = test_head['order_id']
data_link['ata'] = test_head['ata']
data_link['distance'] = test_head['distance']
data_link['simple_eta'] = test_head['simple_eta']
data_link['slice_id'] = test_head['slice_id']
# data_link['slice_id'] = data_link['slice_id'].apply(slice_id_change)
gc.collect()
data_link_split = data_link[1].str.split(' ', expand=True).stack().to_frame()
data_link_split = data_link_split.reset_index(level=1, drop=True).rename(columns={0: 'link_info'})
# data_link_split = data_link_split.reset_index(drop=True)
data_link_split = data_link[['order_id', 'index', 'ata', 'distance', 'simple_eta', 'slice_id']].join(
data_link_split)
data_link_split = data_link_split.reset_index(drop=True)
data_link_split[['link_id',
'link_time',
'link_ratio',
'link_current_status',
'link_arrival_status']] = data_link_split['link_info'].str.split(':|,', 5, expand=True)
data_link_split = data_link_split.drop(['link_info'], axis=1)
data_link_split['link_ratio'] = data_link_split['link_ratio'].astype(float)
data_link_split['link_time'] = data_link_split['link_time'].astype(float)
data_link_split['link_current_status'] = data_link_split['link_current_status'].astype(int)
print('preprocess finish!')
print('start feature engineering')
feature = test_head[['order_id', 'distance']]
###################static fea#############################################
#######################order link id count###############################
df = data_link_split.groupby('order_id', as_index=False)
tmp_linkid_agg = df['link_id'].agg({'zsl_order_link_id_count': 'count'})
tmp_linkid_agg['zsl_order_link_id_count_bins'] = 0
tmp_linkid_agg.loc[(tmp_linkid_agg['zsl_order_link_id_count'] >= 75) & (
tmp_linkid_agg['zsl_order_link_id_count'] < 100), 'zsl_order_link_id_count_bins'] = 1
tmp_linkid_agg.loc[(tmp_linkid_agg['zsl_order_link_id_count'] >= 100) & (
tmp_linkid_agg['zsl_order_link_id_count'] < 120), 'zsl_order_link_id_count_bins'] = 2
tmp_linkid_agg.loc[(tmp_linkid_agg['zsl_order_link_id_count'] >= 120), 'zsl_order_link_id_count_bins'] = 3
feature = feature.merge(tmp_linkid_agg, on='order_id', how='left')
print('order link id count finish!')
#######################order link id & distance###############################
feature['zsl_order_is_highspeed'] = 0
feature.loc[
(feature['distance'] > 90000) & (feature['zsl_order_link_id_count'] < 300), 'zsl_order_is_highspeed'] = 1
print('order link id & distance finish!')
#######################order link id & nextlinks centry###############################
tmp = data_link_split[data_link_split['link_id'].isin(dc)]
tmp = tmp.groupby('order_id', as_index=False)
tmp_linkid_centry_count = tmp['link_id'].agg({'zsl_order_link_id_centry_count': 'count'})
feature = feature.merge(tmp_linkid_centry_count, on='order_id', how='left')
feature['zsl_order_link_id_centry_count'] = feature['zsl_order_link_id_centry_count'].fillna(0)
print('order link id & nextlinks centry finish!')
#######################order link time sum mean max min var std###############################
tmp_linktime_agg = df['link_time'].agg({'zsl_order_link_time_sum': 'sum', 'zsl_order_link_time_mean': 'mean',
'zsl_order_link_time_max': 'max', 'zsl_order_link_time_min': 'min',
'zsl_order_link_time_var': 'var', 'zsl_order_link_time_skew': 'skew'})
feature = feature.merge(tmp_linktime_agg, on='order_id', how='left')
print('order link time sum mean max min var std finish!')
#######################order link current status mean nunique###############################
tmp_linktime_agg = df['link_current_status'].agg(
{'zsl_link_current_status_mean': 'mean', 'zsl_link_current_status_nunique': 'nunique'})
feature = feature.merge(tmp_linktime_agg, on='order_id', how='left')
print('order link current status mean nunique finish!')
#######################order link current status count vector###############################
data_link_split['link_current_status'] = data_link_split['link_current_status'].astype(str)
data_link_split.loc[data_link_split['link_current_status'].astype(int) < 0, 'link_current_status'] = '0'
data_link_split.loc[data_link_split['link_current_status'].astype(int) > 3, 'link_current_status'] = '3'
data = data_link_split.groupby('order_id')['link_current_status'].apply(lambda x: x.str.cat(sep=',')).reset_index()
cv_encode = CountVectorizer(token_pattern=u'(?u)\\b\\w+\\b')
test_x = cv_encode.fit_transform(data['link_current_status'])
test_x = test_x.toarray()
link_current_status = pd.DataFrame(test_x, columns=['zsl_link_current_status0', 'zsl_link_current_status1',
'zsl_link_current_status2',
'zsl_link_current_status3'])
data = pd.concat([data[['order_id']], link_current_status], axis=1)
feature = feature.merge(data, on='order_id', how='left')
print('order link current status count vector finish!')
#######################order distance/link_id_count###############################
feature['zsl_distance_div_link_id_count'] = feature['distance'] * 10 / feature['zsl_order_link_id_count']
feature = feature.drop('distance', axis=1)
print('order distance div link_id_count finish!')
#######################order link ratio sum mean max min var std###############################
tmp_linkratio_agg = df['link_ratio'].agg({'zsl_order_link_ratio_sum': 'sum', 'zsl_order_link_ratio_mean': 'mean',
'zsl_order_link_ratio_min': 'min',
'zsl_order_link_ratio_var': 'var', 'zsl_order_link_ratio_skew': 'skew'})
feature = feature.merge(tmp_linkratio_agg, on='order_id', how='left')
print('order link ratio sum mean max min var std finish!')
#######################weather###################################################################
weather = pd.read_csv(root_path + 'weather.csv')
weather_dict = {'rainstorm': 0, 'heavy rain': 1, 'moderate rain': 2, 'cloudy': 3,
'showers': 4}
weather['weather'] = weather['weather'].map(weather_dict)
weather['date'] = weather['date'].astype(str)
weather = weather[weather['date'] == data_time]
feature['weather'] = weather['weather'].values[0]
feature['hightemp'] = weather['hightemp'].values[0]
feature['lowtemp'] = weather['lowtemp'].values[0]
print('weather finish!')
###################trend fea#############################################
###################trend link time#####################################
data_link_split['link_current_status'] = data_link_split['link_current_status'].astype(int)
groupby = data_link_split.groupby(['order_id'])
func = partial(trend_in_last_k_link_id_time, periods=[2, 5, 7, 10, 15, 20, 30, 50, 80, 100, 100000000])
g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=20, chunk_size=10000)
feature = feature.merge(g, on='order_id', how='left')
func = partial(last_link_time_features, periods=[2, 5, 7, 10, 15, 20, 30, 50, 80, 100, 100000000])
g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=20, chunk_size=10000)
feature = feature.merge(g, on='order_id', how='left')
func = partial(last_k_link_time_interval, periods=[2, 5, 7, 10, 15, 20, 30, 50, 80, 100, 100000000])
g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=20, chunk_size=10000)
feature = feature.merge(g, on='order_id', how='left')
print('trend link time finish!')
####################nextlinks graph embedding#######################
data_link_split['link_id'] = data_link_split['link_id'].astype(int)
data_link_split['link_id'] = data_link_split['link_id'].map(read_idkey)
data_link_split['link_id'] = data_link_split['link_id'].fillna(0)
data_link_split['link_id'] = data_link_split['link_id'].astype(int)
data_link_split['link_id'] = data_link_split['link_id'].map(read_grapheb)
data_link_split['link_id'] = data_link_split['link_id'].fillna('0')
def replace_list(x):
if isinstance(x, str):
x = fill_list
return x
data_link_split['link_id'] = data_link_split['link_id'].apply(replace_list)
link_id_col = ['zsl_link_id_eb{}'.format(i) for i in range(embedding_k)]
agg_col = dict(zip(link_id_col, ['mean'] * len(link_id_col)))
link_id_array = np.array(data_link_split.pop('link_id').to_list())
link_id_array = pd.DataFrame(link_id_array, columns=agg_col, dtype=np.float16)
data_link_split = pd.concat([data_link_split, link_id_array], axis=1)
tmp = data_link_split.groupby('order_id', as_index=False)
tmp_linkid_agg = tmp.agg(agg_col)
feature = feature.merge(tmp_linkid_agg, on='order_id', how='left')
feature.to_csv(root_path+'feature/test/link_fea_order_id_level_20200901.csv',index=False)
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,368
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习算法原理及推导/其它/第二章——手写线性回归算法/LinearRegression/MultivariateLinearRegression.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly
import plotly.graph_objs as go
from linear_regression import LinearRegression
plotly.offline.init_notebook_mode() # 在线显示图标,更多功能
data = pd.read_csv('../data/world-happiness-report-2017.csv')
train_data = data.sample(frac=0.8)
test_data = data.drop(train_data.index)
# 与单特征模型相比,只是多了一个特征列
input_param_name_1 = 'Economy..GDP.per.Capita.'
input_param_name_2 = 'Freedom'
output_param_name = 'Happiness.Score'
# 双特征的loss为:0.08517538069974877
x_train = train_data[[input_param_name_1, input_param_name_2]].values
# 全特征的loss为:0.0019415807477718364
# feat_list = list(train_data.columns.drop(['Happiness.Score','Country']))
# x_train = train_data[feat_list].values
y_train = train_data[[output_param_name]].values
x_test = test_data[[input_param_name_1, input_param_name_2]].values
y_test = test_data[[output_param_name]].values
# Configure the plot with training dataset.
plot_training_trace = go.Scatter3d(
x=x_train[:, 0].flatten(),
y=x_train[:, 1].flatten(),
z=y_train.flatten(),
name='Training Set',
mode='markers',
marker={
'size': 10,
'opacity': 1,
'line': {
'color': 'rgb(255, 255, 255)',
'width': 1
},
}
)
plot_test_trace = go.Scatter3d(
x=x_test[:, 0].flatten(),
y=x_test[:, 1].flatten(),
z=y_test.flatten(),
name='Test Set',
mode='markers',
marker={
'size': 10,
'opacity': 1,
'line': {
'color': 'rgb(255, 255, 255)',
'width': 1
},
}
)
plot_layout = go.Layout(
title='Date Sets',
scene={
'xaxis': {'title': input_param_name_1},
'yaxis': {'title': input_param_name_2},
'zaxis': {'title': output_param_name}
},
margin={'l': 0, 'r': 0, 'b': 0, 't': 0}
)
plot_data = [plot_training_trace, plot_test_trace]
plot_figure = go.Figure(data=plot_data, layout=plot_layout)
plotly.offline.plot(plot_figure)
num_iterations = 500
learning_rate = 0.01
polynomial_degree = 0
sinusoid_degree = 0
linear_regression = LinearRegression(x_train, y_train, polynomial_degree, sinusoid_degree)
(theta, cost_history) = linear_regression.train(learning_rate, num_iterations)
print('开始时的损失:', cost_history[0])
print('训练后的损失:', cost_history[-1])
plt.plot(range(num_iterations), cost_history)
plt.xlabel('Iterations')
plt.ylabel('Cost')
plt.title('Gradient Descent')
plt.show()
predictions_num = 10
x_min = x_train[:, 0].min()
x_max = x_train[:, 0].max()
y_min = x_train[:, 1].min()
y_max = x_train[:, 1].max()
x_axis = np.linspace(x_min, x_max, predictions_num)
y_axis = np.linspace(y_min, y_max, predictions_num)
x_predictions = np.zeros((predictions_num * predictions_num, 1))
y_predictions = np.zeros((predictions_num * predictions_num, 1))
x_y_index = 0
for x_index, x_value in enumerate(x_axis):
for y_index, y_value in enumerate(y_axis):
x_predictions[x_y_index] = x_value
y_predictions[x_y_index] = y_value
x_y_index += 1
z_predictions = linear_regression.predict(np.hstack((x_predictions, y_predictions)))
plot_predictions_trace = go.Scatter3d(
x=x_predictions.flatten(),
y=y_predictions.flatten(),
z=z_predictions.flatten(),
name='Prediction Plane',
mode='markers',
marker={
'size': 1,
},
opacity=0.8,
surfaceaxis=2,
)
plot_data = [plot_training_trace, plot_test_trace, plot_predictions_trace]
plot_figure = go.Figure(data=plot_data, layout=plot_layout)
plotly.offline.plot(plot_figure)
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,369
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/WD_128544/wd_model/wd_model.py
|
import pandas as pd
import numpy as np
from tensorflow import keras
import tensorflow as tf
from sklearn.preprocessing import StandardScaler, LabelEncoder
import tensorflow.keras.layers as L
# import tensorflow.keras.models as M
import tensorflow.keras.backend as K
from tensorflow.python.client import device_lib
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
from keras_radam.training import RAdamOptimizer
from tensorflow.keras import initializers, regularizers, constraints, optimizers, layers, callbacks
from tensorflow.keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D, Conv1D
from tensorflow.keras.layers import Input, Dense, Lambda, Layer
from tensorflow.keras.initializers import Constant
from tensorflow.keras.models import Model
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
def gru_layer(hidden_dim, dropout):
return L.Bidirectional(L.GRU(
hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))
def lstm_layer(hidden_dim, dropout):
return L.Bidirectional(L.LSTM(
hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))
def preprocess(df, cate_cols, numeric_cols):
for cl in cate_cols:
le = LabelEncoder()
df[cl] = le.fit_transform(df[cl])
cols = cate_cols + numeric_cols
X_train = df[cols]
return X_train
def wd_model(link_size, cross_size, slice_size, input_deep_col, input_wide_col,
link_nf_size, cross_nf_size, link_seqlen=170, cross_seqlen=12, pred_len=1,
dropout=0.25, sp_dropout=0.1, embed_dim=64, hidden_dim=128, n_layers=3, lr=0.001,
kernel_size1=3, kernel_size2=2, conv_size=128, conv='conv'):
link_inputs = L.Input(shape=(link_seqlen, link_nf_size))
cross_inputs = L.Input(shape=(cross_seqlen, cross_nf_size))
deep_inputs = L.Input(shape=(input_deep_col,), name='deep_input')
slice_input = L.Input(shape=(1,))
wide_inputs = keras.layers.Input(shape=(input_wide_col,), name='wide_input')
# link----------------------------
categorical_fea1 = link_inputs[:, :, :1]
numerical_fea1 = link_inputs[:, :, 1:5]
embed = L.Embedding(input_dim=link_size, output_dim=embed_dim)(categorical_fea1)
reshaped = tf.reshape(embed, shape=(-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
#reshaped = L.SpatialDropout1D(sp_dropout)(reshaped)
hidden = L.concatenate([reshaped, numerical_fea1], axis=2)
hidden = L.SpatialDropout1D(sp_dropout)(hidden)
"""
categorical_ar_st = link_inputs[:, :, 5:6]
categorical_ar_st = L.Masking(mask_value=-1, name='categorical_ar_st')(categorical_ar_st)
embed_ar_st = L.Embedding(input_dim=(-1,289), output_dim=8)(categorical_ar_st)
reshaped_ar_st = tf.reshape(embed_ar_st, shape=(-1, embed_ar_st.shape[1], embed_ar_st.shape[2] * embed_ar_st.shape[3]))
reshaped_ar_st = L.SpatialDropout1D(sp_dropout)(reshaped_ar_st)
categorical_ar_sl = link_inputs[:, :, 6:7]
categorical_ar_sl = L.Masking(mask_value=-1, name='categorical_ar_sl')(categorical_ar_sl)
embed_ar_sl = L.Embedding(input_dim=(-1, 289), output_dim=8)(categorical_ar_sl)
reshaped_ar_sl = tf.reshape(embed_ar_sl, shape=(-1, embed_ar_sl.shape[1], embed_ar_sl.shape[2] * embed_ar_sl.shape[3]))
reshaped_ar_sl = L.SpatialDropout1D(sp_dropout)(reshaped_ar_sl)
hidden = L.concatenate([reshaped, reshaped_ar_st, reshaped_ar_sl, numerical_fea1],axis=2)
"""
for x in range(n_layers):
hidden = lstm_layer(hidden_dim, dropout)(hidden)
if conv=='conv':
#x_conv1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(hidden)
avg_pool1_gru = GlobalAveragePooling1D()(hidden)
max_pool1_gru = GlobalMaxPooling1D()(hidden)
truncated_link = concatenate([avg_pool1_gru, max_pool1_gru])
elif conv=='resnet50':
truncated_link = ResNet50(include_top=False, pooling='max', weights=None)(hidden)
else:
truncated_link = hidden[:, :pred_len]
truncated_link = L.Flatten()(truncated_link)
# cross----------------------------
categorical_fea2 = cross_inputs[:, :, :1]
numerical_fea2 = cross_inputs[:, :, 1:]
embed2 = L.Embedding(input_dim=cross_size, output_dim=embed_dim)(categorical_fea2)
reshaped2 = tf.reshape(embed2, shape=(-1, embed2.shape[1], embed2.shape[2] * embed2.shape[3]))
#reshaped2 = L.SpatialDropout1D(sp_dropout)(reshaped2)
hidden2 = L.concatenate([reshaped2, numerical_fea2], axis=2)
hidden2 = L.SpatialDropout1D(sp_dropout)(hidden2)
for x in range(n_layers):
hidden2 = lstm_layer(hidden_dim, dropout)(hidden2)
if conv=='conv':
#x_conv3 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(hidden2)
avg_pool3_gru = GlobalAveragePooling1D()(hidden2)
max_pool3_gru = GlobalMaxPooling1D()(hidden2)
truncated_cross = concatenate([avg_pool3_gru, max_pool3_gru])
elif conv=='resnet50':
truncated_cross = ResNet50(include_top=False, pooling='max', weights=None)(hidden2)
else:
truncated_cross = hidden2[:, :pred_len]
truncated_cross = L.Flatten()(truncated_cross)
# slice----------------------------
embed_slice = L.Embedding(input_dim=slice_size, output_dim=1)(slice_input)
embed_slice = L.Flatten()(embed_slice)
# deep_inputs
"""
dense_hidden1 = L.Dense(256, activation="relu")(deep_inputs)
dense_hidden1 = L.Dropout(dropout)(dense_hidden1)
dense_hidden2 = L.Dense(256, activation="relu")(dense_hidden1)
dense_hidden2 = L.Dropout(dropout)(dense_hidden2)
dense_hidden3 = L.Dense(128, activation="relu")(dense_hidden2)
"""
x = L.Dense(512, activation="relu")(deep_inputs)
x = L.BatchNormalization()(x)
x = L.Lambda(tf.keras.activations.swish)(x)
x = L.Dropout(0.25)(x)
for i in range(2):
x = L.Dense(256)(x)
x = L.BatchNormalization()(x)
x = L.Lambda(tf.keras.activations.swish)(x)
x = L.Dropout(0.25)(x)
dense_hidden3 = L.Dense(64,activation='linear')(x)
# main-------------------------------
truncated = L.concatenate([truncated_link, truncated_cross, dense_hidden3, wide_inputs, embed_slice]) # WD
"""
truncated = L.BatchNormalization()(truncated)
truncated = L.Dropout(dropout)(L.Dense(512, activation='relu') (truncated))
truncated = L.BatchNormalization()(truncated)
truncated = L.Dropout(dropout)(L.Dense(256, activation='relu') (truncated))
"""
truncated = L.BatchNormalization()(truncated)
truncated = L.Dropout(dropout)(L.Dense(1024, activation='relu') (truncated))
truncated = L.Dropout(dropout)(truncated)
for i in range(2):
truncated = L.Dense(512)(truncated)
truncated = L.BatchNormalization()(truncated)
truncated = L.Lambda(tf.keras.activations.swish)(truncated)
truncated = L.Dropout(dropout)(truncated)
out = L.Dense(1, activation='linear')(truncated)
model = tf.keras.Model(inputs=[link_inputs, cross_inputs, deep_inputs, wide_inputs, slice_input],
outputs=out) # WD
print(model.summary())
model.compile(loss='mape',
optimizer=RAdamOptimizer(learning_rate=1e-3),
metrics=['mape'])
return model
def get_mc_es_lr(model_name: str, patience=5, min_delta=1e-4):
mc = tf.keras.callbacks.ModelCheckpoint('../model_h5/model_{}.h5'.format(model_name)),
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min',
restore_best_weights=True, patience=patience)
lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=patience, mode='min',
min_delta=min_delta)
return mc, es, lr
class Mish(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(Mish, self).__init__(**kwargs)
self.supports_masking = True
def call(self, inputs):
return inputs * K.tanh(K.softplus(inputs))
def get_config(self):
base_config = super(Mish, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
def mish(x):
return tf.keras.layers.Lambda(lambda x: x*K.tanh(K.softplus(x)))(x)
tf.keras.utils.get_custom_objects().update({'mish': tf.keras.layers.Activation(mish)})
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,370
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习算法原理及推导/其它/第二章——手写线性回归算法/LinearRegression/linear_regression.py
|
import numpy as np
from util.features import prepare_for_training
class LinearRegression:
def __init__(self, data, labels, polynomial_degree=0, sinusoid_degree=0, normalize_data=True):
""":
1.对数据进行预处理操作
2.先得到所有的特征个数
3.初始化参数矩阵
data:数据
polynomial_degree: 是否做额外变换
sinusoid_degree: 是否做额外变换
normalize_data: 是否标准化数据
"""
(data_processed,
features_mean,
features_deviation) = prepare_for_training.prepare_for_training(data, polynomial_degree, sinusoid_degree,
normalize_data)
self.data = data_processed
self.labels = labels
self.features_mean = features_mean
self.features_deviation = features_deviation
self.polynomial_degree = polynomial_degree
self.sinusoid_degree = sinusoid_degree
self.normalize_data = normalize_data
num_features = self.data.shape[1]
self.theta = np.zeros((num_features, 1))
def train(self, alpha, num_iterations=500):
"""
训练模块,执行梯度下降得到theta值和损失值loss
alpha: 学习率
num_iterations: 迭代次数
"""
cost_history = self.gradient_descent(alpha, num_iterations)
return self.theta, cost_history
def gradient_descent(self, alpha, num_iterations):
"""
实际迭代模块
alpha: 学习率
num_iterations: 迭代次数
:return: 返回损失值 loss
"""
cost_history = [] # 收集每次的损失值
for _ in range(num_iterations): # 开始迭代
self.gradient_step(alpha) # 每次更新theta
cost_history.append(self.cost_function(self.data, self.labels))
return cost_history
def gradient_step(self, alpha):
"""
梯度下降参数更新计算方法,注意是矩阵运算
alpha: 学习率
"""
num_examples = self.data.shape[0] # 当前样本个数
# 根据当前数据和θ获取预测值
prediction = LinearRegression.hypothesis(self.data, self.theta)
delta = prediction - self.labels # 残差,即预测值减去真实值
theta = self.theta
# 依照小批量梯度下降法,写代码表示
theta = theta - alpha * (1/num_examples)*(np.dot(delta.T, self.data)).T
self.theta = theta # 计算完theta后更新当前theta
def cost_function(self, data, labels):
"""
损失计算方法,计算平均的损失而不是每个数据的损失值
"""
num_examples = data.shape[0]
delta = LinearRegression.hypothesis(data, self.theta) - labels # 预测值-真实值 得到残差
cost = np.dot(delta, delta.T) # 损失值
return cost[0][0]
@staticmethod
def hypothesis(data, theta):
"""
获取预测值
:param data: 矩阵数据
:param theta: 权重θ
:return: 返回预测值
"""
predictions = np.dot(data, theta)
return predictions
def get_cost(self, data, labels):
"""
得到当前损失
"""
data_processed = prepare_for_training.prepare_for_training(data,
self.polynomial_degree,
self.sinusoid_degree,
self.normalize_data)[0]
return self.cost_function(data_processed, labels)
def predict(self, data):
"""
用训练的参数模型,预测得到回归值的结果
"""
data_processed = prepare_for_training.prepare_for_training(data,
self.polynomial_degree,
self.sinusoid_degree,
self.normalize_data)[0]
predictions = LinearRegression.hypothesis(data_processed, self.theta)
return predictions
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,371
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习算法原理及推导/其它/第二章——手写线性回归算法/util/features/generate_sinusoids.py
|
import numpy as np
def generate_sinusoids(dataset, sinusoid_degree):
"""
sin(x).
"""
num_examples = dataset.shape[0]
sinusoids = np.empty((num_examples, 0))
for degree in range(1, sinusoid_degree+1):
sinusoid_features = np.sin(degree * dataset)
sinusoids = np.concatenate((sinusoids, sinusoid_features), axis=1)
return sinusoids
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,372
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/4_single_model.py
|
#coding=utf-8
"""
Author: Aigege
Code: https://github.com/AiIsBetter
"""
# date 2021.08.01
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
import lightgbm as lgb
from utils import reduce_mem_usage,reduce_mem_usage_parallel
import os
import gc
import warnings
import time
warnings.filterwarnings("ignore")
def slice_id_change(x):
hour = x * 5 / 60
hour = np.floor(hour)
hour += 8
if hour >= 24:
hour = hour - 24
return hour
# 评估指标
def MAPE(true, pred):
diff = np.abs(np.array(pred) - np.array(true))
return np.mean(diff / true)
# 自定义lgb评估指标
def lgb_score_mape(train_data,preds):
labels = train_data
diff = np.abs(np.array(preds) - np.array(labels))
result = np.mean(diff / labels)
return 'mape',result, False
head_columns = ['order_id', 'ata', 'distance', 'simple_eta', 'driver_id','slice_id']
result = []
result_time_weight = []
result_dis_weight = []
count = 0
df = []
nrows=None
root_path = '../data/giscup_2021/'
data_list = ['20200818', '20200819', '20200820', '20200821', '20200822', '20200823', '20200824',
'20200825', '20200826', '20200827', '20200828', '20200829', '20200830', '20200831']
#######################################本地验证#######################################
for name in os.listdir(root_path+'train/'):
data_time = name.split('.')[0]
if data_time not in data_list:
continue
train = pd.read_csv(root_path+'train/{}'.format(name),sep= ';;',header=None,nrows=nrows)
feature_cross = pd.read_csv(root_path+'feature/train/cross_fea_order_id_level_{}.csv'.format(data_time),nrows=nrows)
feature_link = pd.read_csv(root_path+'feature/train/link_fea_order_id_level_{}.csv'.format(data_time),nrows=nrows)
feature_head = pd.read_csv(root_path+'feature/train/head_link_{}.csv'.format(data_time),nrows=nrows)
feature_sqe = pd.read_csv(root_path + 'feature/train/{}.csv'.format(data_time),nrows=nrows)
feature_cross['order_id'] = feature_cross['order_id'].astype(str)
feature_link['order_id'] = feature_link['order_id'].astype(str)
feature_head['order_id'] = feature_head['order_id'].astype(str)
feature_sqe['order_id'] = feature_sqe['order_id'].astype(str)
print("开始处理", data_time)
# train.columns = ['head','link','cross']
# train['head'] = train['head'].apply(lambda x:x.split(' '))
train_head = pd.DataFrame(train[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])
train_head['order_id'] = train_head['order_id'].astype(str)
train_head['ata'] = train_head['ata'].astype(float)
train_head['distance'] = train_head['distance'].astype(float)
train_head['simple_eta'] = train_head['simple_eta'].astype(float)
train_head['driver_id'] = train_head['driver_id'].astype(int)
train_head['slice_id'] = train_head['slice_id'].astype(int)
train_head['date_time'] = int(data_time)
train_head = train_head.merge(feature_cross,on='order_id',how='left')
train_head = train_head.merge(feature_link,on='order_id',how='left')
feature_head = feature_head.drop(['ata', 'distance', 'simple_eta', 'driver_id', 'slice_id', 'index',
'date_time', 'link_count', 'link_time_sum', 'link_ratio_sum',
'date_time_dt', 'weekday', 'hour', 'weather', 'hightemp', 'lowtemp',
'len_tmp',
'link_time_mean', 'link_time_std'],
axis=1)
feature_sqe = feature_sqe.drop(['pre_arrival_status', 'arrive_slice_id', 'slice_id'], axis=1)
train_head = train_head.merge(feature_sqe, on='order_id', how='left')
train_head = train_head.merge(feature_head, on='order_id', how='left')
print('merge finish!')
train_head = reduce_mem_usage_parallel(train_head,28)
df.append(train_head.drop('order_id',axis=1))
del train
gc.collect()
count +=1
df = pd.concat(df,axis=0)
test = pd.read_csv(root_path+'20200901_test.txt',sep= ';;',header=None,nrows=nrows)
test_head = pd.DataFrame(test[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])
test_head['order_id'] = test_head['order_id'].astype(str)
test_head['ata'] = test_head['ata'].astype(float)
test_head['distance'] = test_head['distance'].astype(float)
test_head['simple_eta'] = test_head['simple_eta'].astype(float)
test_head['driver_id'] = test_head['driver_id'].astype(int)
test_head['slice_id'] = test_head['slice_id'].astype(int)
feature_cross = pd.read_csv(root_path + 'feature/test/cross_fea_order_id_level_{}.csv'.format('20200901'),nrows=nrows)
feature_link = pd.read_csv(root_path + 'feature/test/link_fea_order_id_level_{}.csv'.format('20200901'), nrows=nrows)
feature_head = pd.read_csv(root_path + 'feature/test/head_link_{}.csv'.format('20200901'),nrows=nrows)
feature_sqe = pd.read_csv(root_path + 'feature/test/{}.csv'.format('20200901'),nrows=nrows)
test_head['date_time'] = 20200901
feature_cross['order_id'] = feature_cross['order_id'].astype(str)
feature_link['order_id'] = feature_link['order_id'].astype(str)
feature_head['order_id'] = feature_head['order_id'].astype(str)
feature_sqe['order_id'] = feature_sqe['order_id'].astype(str)
test_head = test_head.merge(feature_cross, on='order_id', how='left')
test_head = test_head.merge(feature_link,on='order_id',how='left')
feature_head = feature_head.drop(['ata', 'distance', 'simple_eta', 'driver_id', 'slice_id', 'index',
'date_time', 'link_count', 'link_time_sum', 'link_ratio_sum',
'date_time_dt', 'weekday', 'hour', 'weather', 'hightemp', 'lowtemp',
'len_tmp',
'link_time_mean', 'link_time_std'],
axis=1)
feature_sqe = feature_sqe.drop(['pre_arrival_status', 'arrive_slice_id', 'slice_id'], axis=1)
test_head = test_head.merge(feature_sqe, on='order_id', how='left')
test_head = test_head.merge(feature_head, on='order_id', how='left')
test_head = reduce_mem_usage_parallel(test_head,28)
del feature_cross,feature_link
gc.collect()
X_train = df.drop('ata',axis=1)
y_train = df['ata']
X_test = test_head.drop(['order_id','ata'],axis=1)
folds = 5
skf = KFold(n_splits=folds, shuffle=True, random_state=2021)
train_mean = np.zeros(shape=[1,folds])
test_predict = np.zeros(shape=[X_test.shape[0], folds],dtype=float)
k_fold_mape = []
feature_importance_df = pd.DataFrame()
# Display/plot feature importance
def display_importances(feature_importance_df_):
feature_importance_df_.to_csv('feature_importances.csv',index=False)
cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False)[:100].index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
best_features = best_features.groupby('feature',as_index = False)['importance'].mean()
best_features = best_features.sort_values(by = 'importance',ascending=False)
plt.figure(figsize=(8, 10))
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
plt.savefig('feature_importances.jpg')
# plt.show()
scores = 0
threshold = 0
print('start training......')
print('训练集维度:',X_train.shape)
print('测试集维度:',X_test.shape)
for i, (trn_idx, val_idx) in enumerate(skf.split(X_train, y_train)):
clf = lgb.LGBMRegressor(
boosting_type='gbdt',
objective='regression',
n_estimators=10000,
learning_rate=0.1,
num_leaves=170,
max_bin=63,
max_depth=-1,
random_state = 2021,
subsample_for_bin=200000,
feature_fraction=0.84,
bagging_fraction=0.86,
bagging_freq=7,
min_child_samples=89,
lambda_l1=0.006237830242067111,
lambda_l2=2.016472023736186e-05,
metric=None,
n_jobs = 30,
# device='gpu'
)
clf.fit(X_train.iloc[trn_idx], y_train.iloc[trn_idx], eval_set=[(X_train.iloc[trn_idx], y_train.iloc[trn_idx])
, (X_train.iloc[val_idx], y_train.iloc[val_idx])],
eval_metric=lambda y_true, y_pred:[lgb_score_mape(y_true, y_pred)],
verbose=100, early_stopping_rounds=100)
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = X_train.columns
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = i + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('predicting')
val_predict = clf.predict(X_train.iloc[val_idx], num_iteration=clf.best_iteration_)
test_predict[:,i] = clf.predict(X_test, num_iteration=clf.best_iteration_)
k_fold_mape.append(MAPE(y_train.iloc[val_idx],val_predict))
print("kfold_{}_mape_score:{} ".format(i, k_fold_mape[i]))
print('Train set kfold {} mean mape:'.format(i), np.mean(k_fold_mape))
display_importances(feature_importance_df)
test_head['result'] = np.mean(test_predict,axis=1)
test_head['id'] = test_head['order_id']
test_head[['id','result']].to_csv('submission.csv',index=False)
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,373
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/DCN蒸馏_12953/dcn_model/dcn_model.py
|
import pandas as pd
import numpy as np
from tensorflow import keras
import tensorflow as tf
from sklearn.preprocessing import StandardScaler, LabelEncoder
import tensorflow.keras.layers as L
# import tensorflow.keras.models as M
import tensorflow.keras.backend as K
from tensorflow.python.client import device_lib
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
#from keras_radam import RAdam
from keras_radam.training import RAdamOptimizer
from tensorflow.keras import initializers, regularizers, constraints, optimizers, layers, callbacks
from tensorflow.keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D, Conv1D
from tensorflow.keras.layers import Input, Dense, Lambda, Layer
from tensorflow.keras.initializers import Constant
from tensorflow.keras.models import Model
import os
from tensorflow.keras.losses import mean_absolute_percentage_error
#from tensorflow.contrib.opt import AdamWOptimizer
os.environ["TF_CPP_MIN_LOG_LEVEL"]='3'
gamma = 2.0
alpha=.25
epsilon = K.epsilon()
def mape_2(y_true, y_pred):
y_true = y_true[:, :1]
y_pred = y_pred[:, :1]
return tf.py_function(mean_absolute_percentage_error, (y_true, y_pred), tf.float32)
def mape_3(y_true, y_pred):
y_true = y_true[:, :1]
y_pred = y_pred[:, 1:]
return tf.py_function(mean_absolute_percentage_error, (y_true, y_pred), tf.float32)
def knowledge_distillation_loss_withFL(y_true, y_pred, beta=0.1):
# Extract the groundtruth from dataset and the prediction from teacher model
y_true, y_pred_teacher = y_true[: , :1], y_true[: , 1:]
# Extract the prediction from student model
y_pred, y_pred_stu = y_pred[: , :1], y_pred[: , 1:]
loss = beta*focal_loss(y_true,y_pred) + (1-beta)*mean_absolute_percentage_error(y_pred_teacher, y_pred_stu)
return loss
def focal_loss(y_true, y_pred):
pt_1 = y_pred * y_true
pt_1 = K.clip(pt_1, epsilon, 1-epsilon)
CE_1 = -K.log(pt_1)
FL_1 = alpha* K.pow(1-pt_1, gamma) * CE_1
pt_0 = (1-y_pred) * (1-y_true)
pt_0 = K.clip(pt_0, epsilon, 1-epsilon)
CE_0 = -K.log(pt_0)
FL_0 = (1-alpha)* K.pow(1-pt_0, gamma) * CE_0
loss = K.sum(FL_1, axis=1) + K.sum(FL_0, axis=1)
return loss
def knowledge_distillation_loss_withBE(y_true, y_pred, beta=0.6):
# Extract the groundtruth from dataset and the prediction from teacher model
y_true, y_pred_teacher = y_true[: , :1], y_true[: , 1:]
# Extract the prediction from student model
y_pred, y_pred_stu = y_pred[: , :1], y_pred[: , 1:]
loss = beta*mean_absolute_percentage_error(y_true,y_pred) + (1-beta)*mean_absolute_percentage_error(y_pred_teacher, y_pred_stu)
return loss
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
def gru_layer(hidden_dim, dropout):
return L.Bidirectional(L.GRU(
hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))
def lstm_layer(hidden_dim, dropout):
return L.Bidirectional(L.LSTM(
hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))
class FeatureDictionary(object):
def __init__(self, df=None, numeric_cols=[], ignore_cols=[], cate_cols=[]):
self.df = df
self.cate_cols = cate_cols
self.numeric_cols = numeric_cols
self.ignore_cols = ignore_cols
self.gen_feat_dict() # feat_dict 获取cate feature每一列的字典长度。
def gen_feat_dict(self):
self.feat_cate_len = {}
tc = 0
for col in self.cate_cols:
# 获取每一列的类别
us = self.df[col].unique()
us_len = len(us)
# 获取每一列的类别对应的维度
self.feat_cate_len[col] = us_len
def embedding_layers(fd):
# 该函数主要是定义输入和embedding输入的网络层
embeddings_tensors = []
continus_tensors = []
cate_feature = fd.feat_cate_len
numeric_feature = fd.numeric_cols
for ec in cate_feature:
layer_name = ec + '_inp'
# for categorical features, embedding特征在维度保持在6×(category cardinality)**(1/4)
embed_dim = cate_feature[ec] if int(6 * np.power(cate_feature[ec], 1 / 4)) > cate_feature[ec] else int(
6 * np.power(cate_feature[ec], 1 / 4))
t_inp, t_embedding = embedding_input(layer_name, cate_feature[ec], embed_dim)
embeddings_tensors.append((t_inp, t_embedding))
del (t_inp, t_embedding)
for cc in numeric_feature:
layer_name = cc + '_in'
t_inp, t_build = continus_input(layer_name)
continus_tensors.append((t_inp, t_build))
del (t_inp, t_build)
# category feature的输入 这里的输入特征顺序要与xu
inp_layer = [et[0] for et in embeddings_tensors]
inp_embed = [et[1] for et in embeddings_tensors]
# numeric feature的输入
inp_layer += [ct[0] for ct in continus_tensors]
inp_embed += [ct[1] for ct in continus_tensors]
return inp_layer, inp_embed
def embedding_input(name, input_dim, output_dim):
inp = L.Input(shape=(1,), dtype='int64', name=name)
embeddings = L.Embedding(input_dim, output_dim, input_length=1)(inp)
return inp, embeddings
def continus_input(name):
inp = L.Input(shape=(1,), dtype='float32', name=name)
return inp, L.Reshape((1, 1))(inp)
class CrossLayer(L.Layer):
def __init__(self, output_dim, num_layer, **kwargs):
self.output_dim = output_dim
self.num_layer = num_layer
super(CrossLayer, self).__init__(**kwargs)
def get_config(self):
config = super().get_config().copy()
config.update({
'vocab_size': self.vocab_size,
'num_layers': self.num_layers,
'units': self.units,
'd_model': self.d_model,
'num_heads': self.num_heads,
'dropout': self.dropout,
})
return config
def build(self, input_shape):
self.input_dim = input_shape[2]
self.W = []
self.bias = []
for i in range(self.num_layer):
self.W.append(
self.add_weight(shape=[1, self.input_dim], initializer='glorot_uniform', name='w_{}'.format(i),
trainable=True))
self.bias.append(
self.add_weight(shape=[1, self.input_dim], initializer='zeros', name='b_{}'.format(i), trainable=True))
self.built = True
def call(self, input):
for i in range(self.num_layer):
if i == 0:
cross = L.Lambda(lambda x: K.batch_dot(K.dot(x, K.transpose(self.W[i])), x) + self.bias[i] + x)(input)
else:
cross = L.Lambda(lambda x: K.batch_dot(K.dot(x, K.transpose(self.W[i])), input) + self.bias[i] + x)(
cross)
return L.Flatten()(cross)
def compute_output_shape(self, input_shape):
return None, self.output_dim
def preprocess(df, cate_cols, numeric_cols):
for cl in cate_cols:
le = LabelEncoder()
df[cl] = le.fit_transform(df[cl])
cols = cate_cols + numeric_cols
X_train = df[cols]
return X_train
def DCN_model(inp_layer, inp_embed, link_size, cross_size, slice_size, input_deep_col, input_wide_col,
link_nf_size, cross_nf_size, encoder, link_seqlen=170, cross_seqlen=12, pred_len=1,
dropout=0.25, sp_dropout=0.1, embed_dim=64, hidden_dim=128, n_layers=3, lr=0.001,
kernel_size1=3, kernel_size2=2, conv_size=128, conv=False, have_knowledge=True):
inp = L.concatenate(inp_embed, axis=-1)
link_inputs = L.Input(shape=(link_seqlen, link_nf_size), name='link_inputs')
cross_inputs = L.Input(shape=(cross_seqlen, cross_nf_size), name='cross_inputs')
deep_inputs = L.Input(shape=(input_deep_col,), name='deep_input')
slice_input = L.Input(shape=(1,), name='slice_input')
wide_inputs = keras.layers.Input(shape=(input_wide_col,), name='wide_inputs')
# link----------------------------
categorical_link = link_inputs[:, :, :1]
embed_link = L.Embedding(input_dim=link_size, output_dim=embed_dim, mask_zero=True)(categorical_link)
reshaped_link = tf.reshape(embed_link, shape=(-1, embed_link.shape[1], embed_link.shape[2] * embed_link.shape[3]))
reshaped_link = L.SpatialDropout1D(sp_dropout)(reshaped_link)
"""
categorical_slice = link_inputs[:, :, 5:6]
embed_slice = L.Embedding(input_dim=289, output_dim=16, mask_zero=True)(categorical_slice)
reshaped_slice = tf.reshape(embed_slice, shape=(-1, embed_slice.shape[1], embed_slice.shape[2] * embed_slice.shape[3]))
reshaped_slice = L.SpatialDropout1D(sp_dropout)(reshaped_slice)
categorical_hightemp = link_inputs[:, :, 6:7]
embed_hightemp = L.Embedding(input_dim=33, output_dim=8, mask_zero=True)(categorical_hightemp)
reshaped_hightemp = tf.reshape(embed_hightemp, shape=(-1, embed_hightemp.shape[1], embed_hightemp.shape[2] * embed_hightemp.shape[3]))
reshaped_hightemp = L.SpatialDropout1D(sp_dropout)(reshaped_hightemp)
categorical_weather = link_inputs[:, :, 7:8]
embed_weather = L.Embedding(input_dim=7, output_dim=8, mask_zero=True)(categorical_weather)
reshaped_weather = tf.reshape(embed_weather, shape=(-1, embed_weather.shape[1], embed_weather.shape[2] * embed_weather.shape[3]))
reshaped_weather = L.SpatialDropout1D(sp_dropout)(reshaped_weather)
numerical_fea1 = link_inputs[:, :, 1:5]
numerical_fea1 = L.Masking(mask_value=0, name='numerical_fea1')(numerical_fea1)
hidden = L.concatenate([reshaped_link, numerical_fea1, reshaped_slice, reshaped_hightemp, reshaped_weather], axis=2)
"""
if have_knowledge:
numerical_fea1 = link_inputs[:, :, 1:5]
numerical_fea1 = L.Masking(mask_value=0, name='numerical_fea1')(numerical_fea1)
categorical_ar_st = link_inputs[:, :, 5:6]
categorical_ar_st = L.Masking(mask_value=-1, name='categorical_ar_st')(categorical_ar_st)
embed_ar_st = L.Embedding(input_dim=289, output_dim=8)(categorical_ar_st)
reshaped_ar_st = tf.reshape(embed_ar_st, shape=(-1, embed_ar_st.shape[1], embed_ar_st.shape[2] * embed_ar_st.shape[3]))
reshaped_ar_st = L.SpatialDropout1D(sp_dropout)(reshaped_ar_st)
categorical_ar_sl = link_inputs[:, :, 6:7]
categorical_ar_sl = L.Masking(mask_value=-1, name='categorical_ar_sl')(categorical_ar_sl)
embed_ar_sl = L.Embedding(input_dim=289, output_dim=8)(categorical_ar_sl)
reshaped_ar_sl = tf.reshape(embed_ar_sl, shape=(-1, embed_ar_sl.shape[1], embed_ar_sl.shape[2] * embed_ar_sl.shape[3]))
reshaped_ar_sl = L.SpatialDropout1D(sp_dropout)(reshaped_ar_sl)
hidden = L.concatenate([reshaped_link, reshaped_ar_st, reshaped_ar_sl, numerical_fea1],axis=2)
#hidden = L.concatenate([reshaped_link, numerical_fea1],axis=2)
else:
numerical_fea1 = link_inputs[:, :, 1:5]
numerical_fea1 = L.Masking(mask_value=0, name='numerical_fea1')(numerical_fea1)
categorical_arrival = link_inputs[:, :, 5:6]
categorical_arrival = L.Masking(mask_value=-1, name='categorical_arrival')(categorical_arrival)
embed_ar = L.Embedding(input_dim=5, output_dim=16)(categorical_arrival)
reshaped_ar = tf.reshape(embed_ar, shape=(-1, embed_ar.shape[1], embed_ar.shape[2] * embed_ar.shape[3]))
reshaped_ar = L.SpatialDropout1D(sp_dropout)(reshaped_ar)
categorical_ar_st = link_inputs[:, :, 6:7]
categorical_ar_st = L.Masking(mask_value=-1, name='categorical_ar_st')(categorical_ar_st)
embed_ar_st = L.Embedding(input_dim=289, output_dim=8)(categorical_ar_st)
reshaped_ar_st = tf.reshape(embed_ar_st, shape=(-1, embed_ar_st.shape[1], embed_ar_st.shape[2] * embed_ar_st.shape[3]))
reshaped_ar_st = L.SpatialDropout1D(sp_dropout)(reshaped_ar_st)
categorical_ar_sl = link_inputs[:, :, 7:8]
categorical_ar_sl = L.Masking(mask_value=-1, name='categorical_ar_sl')(categorical_ar_sl)
embed_ar_sl = L.Embedding(input_dim=289, output_dim=8)(categorical_ar_sl)
reshaped_ar_sl = tf.reshape(embed_ar_sl, shape=(-1, embed_ar_sl.shape[1], embed_ar_sl.shape[2] * embed_ar_sl.shape[3]))
reshaped_ar_sl = L.SpatialDropout1D(sp_dropout)(reshaped_ar_sl)
hidden = L.concatenate([reshaped_link, reshaped_ar, reshaped_ar_st, reshaped_ar_sl, numerical_fea1],axis=2)
#hidden = L.concatenate([reshaped_link, reshaped_ar, numerical_fea1],axis=2)
#hidden = L.Masking(mask_value=0)(hidden)
for x in range(n_layers):
hidden = gru_layer(hidden_dim, dropout)(hidden)
if conv:
x_conv1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(hidden)
avg_pool1_gru = GlobalAveragePooling1D()(x_conv1)
max_pool1_gru = GlobalMaxPooling1D()(x_conv1)
#x_conv2 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(hidden)
#avg_pool2_gru = GlobalAveragePooling1D()(x_conv2)
#max_pool2_gru = GlobalMaxPooling1D()(x_conv2)
truncated_link = concatenate([avg_pool1_gru, max_pool1_gru])
else:
truncated_link = hidden[:, :pred_len]
truncated_link = L.Flatten()(truncated_link)
# truncated_link = Attention(256)(hidden)
# CROSS----------------------------
categorical_fea2 = cross_inputs[:, :, :1]
embed2 = L.Embedding(input_dim=cross_size, output_dim=16, mask_zero=True)(categorical_fea2)
reshaped2 = tf.reshape(embed2, shape=(-1, embed2.shape[1], embed2.shape[2] * embed2.shape[3]))
reshaped2 = L.SpatialDropout1D(sp_dropout)(reshaped2)
numerical_fea2 = cross_inputs[:, :, 1:]
numerical_fea2 = L.Masking(mask_value=0, name='numerical_fea2')(numerical_fea2)
hidden2 = L.concatenate([reshaped2, numerical_fea2], axis=2)
# hidden2 = L.Masking(mask_value=0)(hidden2)
for x in range(n_layers):
hidden2 = gru_layer(hidden_dim, dropout)(hidden2)
if conv:
x_conv3 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(hidden2)
avg_pool3_gru = GlobalAveragePooling1D()(x_conv3)
max_pool3_gru = GlobalMaxPooling1D()(x_conv3)
#x_conv4 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(hidden2)
#avg_pool4_gru = GlobalAveragePooling1D()(x_conv4)
#max_pool4_gru = GlobalMaxPooling1D()(x_conv4)
truncated_cross = concatenate([avg_pool3_gru, max_pool3_gru])
else:
truncated_cross = hidden2[:, :pred_len]
truncated_cross = L.Flatten()(truncated_cross)
# truncated_cross = Attention(256)(hidden2)
# SLICE----------------------------
embed_slice = L.Embedding(input_dim=slice_size, output_dim=1)(slice_input)
embed_slice = L.Flatten()(embed_slice)
# DEEP_INPUS
x = encoder(deep_inputs)
x = L.Concatenate()([x, deep_inputs]) # use both raw and encoded features
x = L.BatchNormalization()(x)
x = L.Dropout(0.25)(x)
for i in range(3):
x = L.Dense(256)(x)
x = L.BatchNormalization()(x)
x = L.Lambda(tf.keras.activations.swish)(x)
x = L.Dropout(0.25)(x)
dense_hidden3 = L.Dense(64,activation='linear')(x)
# DCN
cross = CrossLayer(output_dim=inp.shape[2], num_layer=8, name="cross_layer")(inp)
# MAIN-------------------------------
truncated = L.concatenate([truncated_link, truncated_cross, cross, dense_hidden3, wide_inputs, embed_slice])
truncated = L.BatchNormalization()(truncated)
truncated = L.Dropout(dropout)(L.Dense(512, activation='relu') (truncated))
truncated = L.BatchNormalization()(truncated)
truncated = L.Dropout(dropout)(L.Dense(256, activation='relu') (truncated))
if have_knowledge:
out = L.Dense(2, activation='linear', name='out')(truncated)
model = tf.keras.Model(inputs=[inp_layer, link_inputs, cross_inputs, deep_inputs, wide_inputs, slice_input],
outputs=out)
print(model.summary())
model.compile(loss=knowledge_distillation_loss_withBE,
optimizer=RAdamOptimizer(learning_rate=1e-3), # 'adam' RAdam(warmup_proportion=0.1, min_lr=1e-7)
#metrics={'out':'mape'} # AdamWOptimizer(weight_decay=1e-4)
metrics=[mape_2,mape_3]
)
else:
out = L.Dense(1, activation='linear', name='out')(truncated)
model = tf.keras.Model(inputs=[inp_layer, link_inputs, cross_inputs, deep_inputs, wide_inputs, slice_input],
outputs=out)
print(model.summary())
model.compile(loss=['mape'],
optimizer=RAdamOptimizer(learning_rate=1e-3), # 'adam' RAdam(warmup_proportion=0.1, min_lr=1e-7)
#metrics={'out':'mape'}
metrics=['mape']
)
return model
def arrival_model(inp_layer, inp_embed, link_size, cross_size, slice_size, input_deep_col, input_wide_col,
link_nf_size, cross_nf_size, link_seqlen=170, cross_seqlen=12, pred_len=1,
dropout=0.25, sp_dropout=0.1, embed_dim=64, hidden_dim=128, n_layers=3, lr=0.001,
kernel_size1=3, kernel_size2=2, conv_size=128, conv=False):
inp = L.concatenate(inp_embed, axis=-1)
link_inputs = L.Input(shape=(link_seqlen, link_nf_size), name='link_inputs')
cross_inputs = L.Input(shape=(cross_seqlen, cross_nf_size), name='cross_inputs')
deep_inputs = L.Input(shape=(input_deep_col,), name='deep_input')
slice_input = L.Input(shape=(1,), name='slice_input')
wide_inputs = keras.layers.Input(shape=(input_wide_col,), name='wide_inputs')
# link----------------------------
categorical_link = link_inputs[:, :, :1]
embed_link = L.Embedding(input_dim=link_size, output_dim=embed_dim, mask_zero=True)(categorical_link)
reshaped_link = tf.reshape(embed_link, shape=(-1, embed_link.shape[1], embed_link.shape[2] * embed_link.shape[3]))
reshaped_link = L.SpatialDropout1D(sp_dropout)(reshaped_link)
"""
categorical_slice = link_inputs[:, :, 5:6]
embed_slice = L.Embedding(input_dim=289, output_dim=16, mask_zero=True)(categorical_slice)
reshaped_slice = tf.reshape(embed_slice, shape=(-1, embed_slice.shape[1], embed_slice.shape[2] * embed_slice.shape[3]))
reshaped_slice = L.SpatialDropout1D(sp_dropout)(reshaped_slice)
categorical_hightemp = link_inputs[:, :, 6:7]
embed_hightemp = L.Embedding(input_dim=33, output_dim=8, mask_zero=True)(categorical_hightemp)
reshaped_hightemp = tf.reshape(embed_hightemp, shape=(-1, embed_hightemp.shape[1], embed_hightemp.shape[2] * embed_hightemp.shape[3]))
reshaped_hightemp = L.SpatialDropout1D(sp_dropout)(reshaped_hightemp)
categorical_weather = link_inputs[:, :, 7:8]
embed_weather = L.Embedding(input_dim=7, output_dim=8, mask_zero=True)(categorical_weather)
reshaped_weather = tf.reshape(embed_weather, shape=(-1, embed_weather.shape[1], embed_weather.shape[2] * embed_weather.shape[3]))
reshaped_weather = L.SpatialDropout1D(sp_dropout)(reshaped_weather)
numerical_fea1 = link_inputs[:, :, 1:5]
numerical_fea1 = L.Masking(mask_value=0, name='numerical_fea1')(numerical_fea1)
hidden = L.concatenate([reshaped_link, numerical_fea1, reshaped_slice, reshaped_hightemp, reshaped_weather], axis=2)
"""
numerical_fea1 = link_inputs[:, :, 1:]
numerical_fea1 = L.Masking(mask_value=0, name='numerical_fea1')(numerical_fea1)
hidden = L.concatenate([reshaped_link, numerical_fea1],axis=2)
#hidden = L.Masking(mask_value=0)(hidden)
for x in range(n_layers):
hidden = gru_layer(hidden_dim, dropout)(hidden)
if conv:
x_conv1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(hidden)
avg_pool1_gru = GlobalAveragePooling1D()(x_conv1)
max_pool1_gru = GlobalMaxPooling1D()(x_conv1)
#x_conv2 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(hidden)
#avg_pool2_gru = GlobalAveragePooling1D()(x_conv2)
#max_pool2_gru = GlobalMaxPooling1D()(x_conv2)
truncated_link = concatenate([avg_pool1_gru, max_pool1_gru])
else:
truncated_link = hidden[:, :pred_len]
truncated_link = L.Flatten()(truncated_link)
# truncated_link = Attention(256)(hidden)
# CROSS----------------------------
categorical_fea2 = cross_inputs[:, :, :1]
embed2 = L.Embedding(input_dim=cross_size, output_dim=16, mask_zero=True)(categorical_fea2)
reshaped2 = tf.reshape(embed2, shape=(-1, embed2.shape[1], embed2.shape[2] * embed2.shape[3]))
reshaped2 = L.SpatialDropout1D(sp_dropout)(reshaped2)
numerical_fea2 = cross_inputs[:, :, 1:]
numerical_fea2 = L.Masking(mask_value=0, name='numerical_fea2')(numerical_fea2)
hidden2 = L.concatenate([reshaped2, numerical_fea2], axis=2)
# hidden2 = L.Masking(mask_value=0)(hidden2)
for x in range(n_layers):
hidden2 = gru_layer(hidden_dim, dropout)(hidden2)
if conv:
x_conv3 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(hidden2)
avg_pool3_gru = GlobalAveragePooling1D()(x_conv3)
max_pool3_gru = GlobalMaxPooling1D()(x_conv3)
#x_conv4 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(hidden2)
#avg_pool4_gru = GlobalAveragePooling1D()(x_conv4)
#max_pool4_gru = GlobalMaxPooling1D()(x_conv4)
truncated_cross = concatenate([avg_pool3_gru, max_pool3_gru])
else:
truncated_cross = hidden2[:, :pred_len]
truncated_cross = L.Flatten()(truncated_cross)
# truncated_cross = Attention(256)(hidden2)
# SLICE----------------------------
embed_slice = L.Embedding(input_dim=slice_size, output_dim=1)(slice_input)
embed_slice = L.Flatten()(embed_slice)
# DEEP_INPUS
x = L.BatchNormalization()(deep_inputs)
x = L.Dropout(0.25)(x)
for i in range(3):
x = L.Dense(256)(x)
x = L.BatchNormalization()(x)
x = L.Lambda(tf.keras.activations.swish)(x)
x = L.Dropout(0.25)(x)
dense_hidden3 = L.Dense(64,activation='linear')(x)
# DCN
cross = CrossLayer(output_dim=inp.shape[2], num_layer=8, name="cross_layer")(inp)
truncated = L.concatenate([truncated_link, truncated_cross, cross, dense_hidden3, wide_inputs, embed_slice])
truncated = L.BatchNormalization()(truncated)
truncated = L.Dropout(dropout)(L.Dense(512, activation='relu') (truncated))
truncated = L.BatchNormalization()(truncated)
truncated = L.Dropout(dropout)(L.Dense(256, activation='relu') (truncated))
arrival_0 = L.Dense(1, activation='linear', name='arrival_0')(truncated)
arrival_1 = L.Dense(1, activation='linear', name='arrival_1')(truncated)
arrival_2 = L.Dense(1, activation='linear', name='arrival_2')(truncated)
arrival_3 = L.Dense(1, activation='linear', name='arrival_3')(truncated)
arrival_4 = L.Dense(1, activation='linear', name='arrival_4')(truncated)
model = tf.keras.Model(inputs=[inp_layer,link_inputs, cross_inputs, deep_inputs, wide_inputs, slice_input],
outputs=[arrival_0,arrival_1,arrival_2,arrival_3,arrival_4])
print(model.summary())
model.compile(loss='mse',
optimizer=RAdamOptimizer(learning_rate=1e-3) # 'adam' RAdam(warmup_proportion=0.1, min_lr=1e-7)
)
return model
def get_mc_es_lr(model_name: str, patience=5, min_delta=1e-4):
mc = tf.keras.callbacks.ModelCheckpoint('../model_h5/model_{}.h5'.format(model_name)),
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min',
restore_best_weights=True, patience=patience)
lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=patience-1, mode='min',
min_delta=min_delta)
return mc, es, lr
def get_mc_es_lr_for_student(model_name: str, patience=5, min_delta=1e-4):
mc = tf.keras.callbacks.ModelCheckpoint('../model_h5/model_{}.h5'.format(model_name)),
es = tf.keras.callbacks.EarlyStopping(monitor='val_mape_2', mode='min',
restore_best_weights=True, patience=patience)
lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_mape_2', factor=0.8, patience=patience, mode='min',
min_delta=min_delta)
return mc, es, lr
def create_autoencoder(input_dim, output_dim, noise=0.05):
i = L.Input(input_dim)
encoded = L.BatchNormalization()(i)
encoded = L.GaussianNoise(noise)(encoded)
encoded = L.Dense(128, activation='relu')(encoded)
decoded = L.Dropout(0.2)(encoded)
decoded = L.Dense(input_dim,name='decoded')(decoded)
x = L.Dense(64, activation='relu')(decoded)
x = L.BatchNormalization()(x)
x = L.Dropout(0.2)(x)
x = L.Dense(output_dim, activation='linear', name='ata_output')(x)
encoder = keras.models.Model(inputs=i, outputs=decoded)
autoencoder = keras.models.Model(inputs=i, outputs=[decoded, x])
autoencoder.compile(optimizer=RAdamOptimizer(learning_rate=1e-3), loss={'decoded':'mse', 'ata_output': 'mape'})
return autoencoder, encoder
class Attention(L.Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),
K.reshape(self.W, (features_dim, 1))), (-1, step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim
class CustomMultiLossLayer(Layer):
def __init__(self, nb_outputs=2, **kwargs):
self.nb_outputs = nb_outputs
self.is_placeholder = True
super(CustomMultiLossLayer, self).__init__(**kwargs)
def build(self, input_shape=None):
# initialise log_vars
self.log_vars = []
for i in range(self.nb_outputs):
self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,),
initializer=Constant(0.), trainable=True)]
super(CustomMultiLossLayer, self).build(input_shape)
def multi_loss(self, ys_true, ys_pred):
assert len(ys_true) == self.nb_outputs and len(ys_pred) == self.nb_outputs
loss = 0
for y_true, y_pred, log_var in zip(ys_true, ys_pred, self.log_vars):
precision = K.exp(-log_var[0])
loss += K.sum(precision * (y_true - y_pred)**2. + log_var[0], -1)
return K.mean(loss)
def call(self, inputs):
ys_true = inputs[:self.nb_outputs]
ys_pred = inputs[self.nb_outputs:]
loss = self.multi_loss(ys_true, ys_pred)
self.add_loss(loss, inputs=inputs)
# We won't actually use the output.
return K.concatenate(inputs, -1)
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,374
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/1_sdne_embedding_allnext.py
|
#coding=utf-8
"""
Author: Aigege
Code: https://github.com/AiIsBetter
"""
# date 2021.08.01
import numpy as np
import networkx as nx
import pandas as pd
from gem.embedding.node2vec import node2vec
import os
from utils import parallel_apply
from functools import partial
import gc
def link_id_find(gr):
gr_ = gr.copy()
tmp = list(gr_['link_id'])
link_id_tuple = []
for i in range(len(tmp)-1):
link_id_tuple.append([tmp[i],tmp[i+1]])
return link_id_tuple
if __name__ == '__main__':
root_path = '../data/giscup_2021/'
nrows = None
######################################nextlinks #######################################
nextlinks = pd.read_csv(root_path + 'nextlinks.txt', sep=' ', header=None)
nextlinks.columns = ['from_id', 'to_id']
nextlinks['to_id'] = nextlinks['to_id'].astype('str')
nextlinks['to_id'] = nextlinks['to_id'].apply(lambda x: x.split(","))
nextlinks = pd.DataFrame({'from_id': nextlinks.from_id.repeat(nextlinks.to_id.str.len()),
'to_id': np.concatenate(nextlinks.to_id.values)})
nextlinks['from_id'] = nextlinks['from_id'].astype(int)
nextlinks['to_id'] = nextlinks['to_id'].astype(int)
from_id = nextlinks['from_id'].unique()
# nextlinks.to_csv('../data/giscup_2021/nextlink_all.csv',index=False)
# nextlinks = pd.read_csv('../data/giscup_2021/nextlink_all.csv')
######################################nextlinks #######################################
if 'nextlinks_allday.csv' in os.listdir(root_path):
nextlinks = pd.read_csv(root_path + 'nextlinks_allday.csv')
else:
nextlinks_new = []
for name in os.listdir(root_path + 'train/'):
data_time = name.split('.')[0]
if data_time == '20200803':
continue
train = pd.read_csv(root_path + 'train/{}'.format(name),sep= ';;',header=None,nrows=nrows)
train_head = pd.DataFrame(train[0].str.split(' ').tolist(),
columns=['order_id', 'ata', 'distance', 'simple_eta', 'driver_id', 'slice_id'])
train_head['order_id'] = train_head['order_id'].astype(str)
train_head['ata'] = train_head['ata'].astype(float)
train_head['distance'] = train_head['distance'].astype(float)
train_head['simple_eta'] = train_head['simple_eta'].astype(float)
train_head['driver_id'] = train_head['driver_id'].astype(int)
train_head['slice_id'] = train_head['slice_id'].astype(int)
data_link = train[[1]]
print("flag:", 1)
data_link['index'] = train_head.index
data_link['order_id'] = train_head['order_id']
print("flag:", 2)
data_link['ata'] = train_head['ata']
data_link['distance'] = train_head['distance']
data_link['simple_eta'] = train_head['simple_eta']
print("flag:", 3)
data_link['slice_id'] = train_head['slice_id']
print("flag:", 4)
data_link_split = data_link[1].str.split(' ', expand=True).stack().to_frame()
print("flag:", 5)
data_link_split = data_link_split.reset_index(level=1, drop=True).rename(columns={0: 'link_info'})
print("flag:", 6)
data_link_split = data_link[['order_id', 'index', 'ata', 'distance', 'simple_eta', 'slice_id']].join(
data_link_split)
print("flag:", 7)
data_link_split = data_link_split.reset_index(drop=True)
data_link_split[['link_id',
'link_time',
'link_ratio',
'link_current_status',
'link_arrival_status']] = data_link_split['link_info'].str.split(':|,', 5, expand=True)
print("flag:", 8)
data_link_split = data_link_split[['order_id','link_id']]
data_link_split['link_id'] = data_link_split['link_id'].astype(int)
features = pd.DataFrame({'order_id': data_link_split['order_id'].unique()})
groupby = data_link_split.groupby(['order_id'])
func = partial(link_id_find)
g = parallel_apply(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)
g = pd.DataFrame(g,columns=['from_id','to_id'])
g = g.drop_duplicates()
nextlinks_new.append(g)
nextlinks_new = pd.concat(nextlinks_new, axis=0)
nextlinks_new = nextlinks_new.drop_duplicates()
nextlinks_new = nextlinks_new.sort_values(by='from_id').reset_index(drop=True)
nextlinks = pd.concat([nextlinks,nextlinks_new],axis=0)
nextlinks = nextlinks.drop_duplicates()
nextlinks = nextlinks.sort_values(by='from_id').reset_index(drop=True)
print('save all csv')
nextlinks.to_csv(root_path+'nextlinks_allday.csv',index=False)
print('calcute weight')
nextlinks = nextlinks.sort_values(by='from_id').reset_index(drop=True)
nextlinks = nextlinks.drop_duplicates()
from_id_weight = nextlinks['from_id'].value_counts()
from_id_weight = from_id_weight.to_frame()
from_id_weight['index'] = from_id_weight.index
from_id_weight.columns = ['weight', 'from_id']
nextlinks = pd.merge(nextlinks, from_id_weight, 'left', on=['from_id'])
print('calcute weight finish!')
nextlinks['to_id'] = nextlinks['to_id'].astype(int)
nextlinks['from_id'] = nextlinks['from_id'].astype(int)
id_key = list(set(nextlinks['from_id'].unique().tolist() + nextlinks['to_id'].unique().tolist()))
id_key_to_connected = dict(zip(id_key, range(len(id_key))))
nextlinks['from_id'] = nextlinks['from_id'].map(id_key_to_connected)
nextlinks['to_id'] = nextlinks['to_id'].map(id_key_to_connected)
np.save(root_path + 'id_key_to_connected_allday.npy', id_key_to_connected)
print('id key save finish!')
print('start creating graph')
G = nx.DiGraph()
from_id = nextlinks['from_id'].to_list()
to_id = nextlinks['to_id'].to_list()
weight = nextlinks['weight'].to_list()
edge_tuple = list(zip(from_id, to_id,weight))
# edge_tuple = tuple(from_id,to_id,weight)
print('adding')
G.add_weighted_edges_from(edge_tuple)
G = G.to_directed()
print('finish create graph!')
print('start train n2v')
look_back = list(G.nodes())
embeddings = {}
models = []
models.append(node2vec(d=128, max_iter=10, walk_len=80, num_walks=10, con_size=10, ret_p=1, inout_p=1))
for embedding in models:
Y, t = embedding.learn_embedding(graph=G, edge_f=None,
is_weighted=True, no_python=True)
for i, embedding in enumerate(embedding.get_embedding()):
embeddings[look_back[i]] = embedding
np.save(root_path+'graph_embeddings_retp1.npy', embeddings)
print('nextlink graph embedding retp 1 finish!') # displays "world"
del models
gc.collect()
look_back = list(G.nodes())
embeddings = {}
models = []
models.append(node2vec(d=128, max_iter=10, walk_len=80, num_walks=10, con_size=10, ret_p=0.5, inout_p=1))
for embedding in models:
Y, t = embedding.learn_embedding(graph=G, edge_f=None,
is_weighted=True, no_python=True)
for i, embedding in enumerate(embedding.get_embedding()):
embeddings[look_back[i]] = embedding
np.save(root_path + 'graph_embeddings_retp05.npy', embeddings)
print('nextlink graph embedding retp 0.5 finish!')
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,375
|
iakirago/AiLearning-Theory-Applying
|
refs/heads/master
|
/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/5_model_final.py
|
#coding=utf-8
"""
Author: Aigege
Code: https://github.com/AiIsBetter
"""
# date 2021.08.01
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
import lightgbm as lgb
from utils import reduce_mem_usage,reduce_mem_usage_parallel,lgb_score_mape,MAPE
import gc
import warnings
import os,random,pickle
import optuna
warnings.filterwarnings("ignore")
def slice_id_change(x):
hour = x * 5 / 60
hour = np.floor(hour)
hour += 8
if hour >= 24:
hour = hour - 24
return hour
def optuna_print(tr_x, tr_y, te_x,te_y):
def objective(trial,tr_x, tr_y, te_x,te_y):
dtrain = lgb.Dataset(tr_x, label=tr_y)
dvalid = lgb.Dataset(te_x, label=te_y)
param = {
"objective": "regression",
"metric": "mape",
"verbosity": -1,
"boosting_type": "gbdt",
'min_split_gain': 0,
'random_state':2021,
'max_bin':trial.suggest_int('max_bin',63,250),
'subsample_for_bin': trial.suggest_int('subsample_for_bin', 40000, 300000),
"lambda_l1": trial.suggest_loguniform("lambda_l1", 1e-8, 10.0),
"lambda_l2": trial.suggest_loguniform("lambda_l2", 1e-8, 10.0),
"num_leaves": trial.suggest_int("num_leaves", 2, 256),
"feature_fraction": trial.suggest_uniform("feature_fraction", 0.4, 1.0),
"bagging_fraction": trial.suggest_uniform("bagging_fraction", 0.4, 1.0),
"bagging_freq": trial.suggest_int("bagging_freq", 1, 7),
"min_child_samples": trial.suggest_int("min_child_samples", 5, 100),
}
# Add a callback for pruning.
pruning_callback = optuna.integration.LightGBMPruningCallback(trial, "mape")
gbm = lgb.train(
param, dtrain, valid_sets=[dvalid], verbose_eval=False, callbacks=[pruning_callback]
)
preds = gbm.predict(te_x)
pred_labels = np.rint(preds)
mape = MAPE(te_y, pred_labels)
return mape
study = optuna.create_study(
pruner=optuna.pruners.MedianPruner(n_warmup_steps=10), direction="minimize"
)
study.optimize(lambda trial: objective(trial, tr_x, tr_y, te_x, te_y),
n_trials=100)
print("Number of finished trials: {}".format(len(study.trials)))
print("Best trial:")
trial = study.best_trial
print(" Value: {}".format(trial.value))
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
head_columns = ['order_id', 'ata', 'distance', 'simple_eta', 'driver_id','slice_id']
result = []
result_time_weight = []
result_dis_weight = []
count = 0
df = []
nrows=None
root_path = '../data/giscup_2021/'
data_list = ['20200818', '20200819', '20200820', '20200821', '20200822', '20200823', '20200824',
'20200825', '20200826', '20200827', '20200828', '20200829', '20200830', '20200831']
for name in os.listdir(root_path+'train/'):
data_time = name.split('.')[0]
if data_time not in data_list:
continue
train = pd.read_csv(root_path+'train/{}'.format(name),sep= ';;',header=None,nrows=nrows)
feature_cross = pd.read_csv(root_path+'feature/train/cross_fea_order_id_level_{}.csv'.format(data_time),nrows=nrows)
feature_link = pd.read_csv(root_path+'feature/train/link_fea_order_id_level_{}.csv'.format(data_time),nrows=nrows)
feature_head = pd.read_csv(root_path+'feature/train/head_link_{}.csv'.format(data_time),nrows=nrows)
feature_sqe = pd.read_csv(root_path + 'feature/train/{}.csv'.format(data_time),nrows=nrows)
feature_cross['order_id'] = feature_cross['order_id'].astype(str)
feature_link['order_id'] = feature_link['order_id'].astype(str)
feature_head['order_id'] = feature_head['order_id'].astype(str)
feature_sqe['order_id'] = feature_sqe['order_id'].astype(str)
print("开始处理", data_time)
# train.columns = ['head','link','cross']
# train['head'] = train['head'].apply(lambda x:x.split(' '))
train_head = pd.DataFrame(train[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])
train_head['order_id'] = train_head['order_id'].astype(str)
train_head['ata'] = train_head['ata'].astype(float)
train_head['distance'] = train_head['distance'].astype(float)
train_head['simple_eta'] = train_head['simple_eta'].astype(float)
train_head['driver_id'] = train_head['driver_id'].astype(int)
train_head['slice_id'] = train_head['slice_id'].astype(int)
train_head['date_time'] = int(data_time)
train_head = train_head.merge(feature_cross,on='order_id',how='left')
train_head = train_head.merge(feature_link,on='order_id',how='left')
feature_head = feature_head.drop(['ata', 'distance', 'simple_eta', 'driver_id', 'slice_id', 'index',
'date_time', 'link_count', 'link_time_sum', 'link_ratio_sum',
'date_time_dt', 'weekday', 'hour', 'weather', 'hightemp', 'lowtemp',
'len_tmp',
'link_time_mean', 'link_time_std'],
axis=1)
feature_sqe = feature_sqe.drop(['pre_arrival_status', 'arrive_slice_id', 'slice_id'], axis=1)
train_head = train_head.merge(feature_sqe, on='order_id', how='left')
train_head = train_head.merge(feature_head, on='order_id', how='left')
print('merge finish!')
train_head = reduce_mem_usage_parallel(train_head,28)
df.append(train_head.drop('order_id',axis=1))
del train
gc.collect()
count +=1
df = pd.concat(df,axis=0)
test = pd.read_csv(root_path+'20200901_test.txt',sep= ';;',header=None,nrows=nrows)
test_head = pd.DataFrame(test[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])
test_head['order_id'] = test_head['order_id'].astype(str)
test_head['ata'] = test_head['ata'].astype(float)
test_head['distance'] = test_head['distance'].astype(float)
test_head['simple_eta'] = test_head['simple_eta'].astype(float)
test_head['driver_id'] = test_head['driver_id'].astype(int)
test_head['slice_id'] = test_head['slice_id'].astype(int)
feature_cross = pd.read_csv(root_path + 'feature/test/cross_fea_order_id_level_{}.csv'.format('20200901'),nrows=nrows)
feature_link = pd.read_csv(root_path + 'feature/test/link_fea_order_id_level_{}.csv'.format('20200901'), nrows=nrows)
feature_head = pd.read_csv(root_path + 'feature/test/head_link_{}.csv'.format('20200901'),nrows=nrows)
feature_sqe = pd.read_csv(root_path + 'feature/test/{}.csv'.format('20200901'),nrows=nrows)
test_head['date_time'] = 20200901
feature_cross['order_id'] = feature_cross['order_id'].astype(str)
feature_link['order_id'] = feature_link['order_id'].astype(str)
feature_head['order_id'] = feature_head['order_id'].astype(str)
feature_sqe['order_id'] = feature_sqe['order_id'].astype(str)
test_head = test_head.merge(feature_cross, on='order_id', how='left')
test_head = test_head.merge(feature_link,on='order_id',how='left')
feature_head = feature_head.drop(['ata', 'distance', 'simple_eta', 'driver_id', 'slice_id', 'index',
'date_time', 'link_count', 'link_time_sum', 'link_ratio_sum',
'date_time_dt', 'weekday', 'hour', 'weather', 'hightemp', 'lowtemp',
'len_tmp',
'link_time_mean', 'link_time_std'],
axis=1)
feature_sqe = feature_sqe.drop(['pre_arrival_status', 'arrive_slice_id', 'slice_id'], axis=1)
test_head = test_head.merge(feature_sqe, on='order_id', how='left')
test_head = test_head.merge(feature_head, on='order_id', how='left')
test_head = reduce_mem_usage_parallel(test_head,28)
del feature_cross,feature_link
gc.collect()
X_train = df.drop('ata',axis=1)
y_train = df['ata']
X_test = test_head.drop(['order_id','ata'],axis=1)
#调参
#tr_x, te_x,tr_y,te_y = train_test_split(X_train,y_train,test_size=0.2,random_state=2021)
#optuna_print(tr_x, tr_y, te_x,te_y)
#del tr_x, te_x,tr_y,te_y
#gc.collect()
folds = 5
skf = KFold(n_splits=folds, shuffle=True, random_state=2021)
train_mean = np.zeros(shape=[1,folds])
test_predict = np.zeros(shape=[X_test.shape[0], folds],dtype=float)
k_fold_mape = []
feature_importance_df = pd.DataFrame()
# Display/plot feature importance
def display_importances(feature_importance_df_):
feature_importance_df_.to_csv('feature_importances.csv',index=False)
cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False)[:100].index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
best_features = best_features.groupby('feature',as_index = False)['importance'].mean()
best_features = best_features.sort_values(by = 'importance',ascending=False)
plt.figure(figsize=(8, 10))
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
plt.savefig('feature_importances.jpg')
# plt.show()
#use single model feature importance as best_feature_importances
feature_importance_df_ = pd.read_csv('best_feature_importances.csv')
cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False).index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
best_features = best_features.groupby('feature',as_index = False)['importance'].mean()
best_features = best_features.sort_values(by = 'importance',ascending=False)
data=best_features.sort_values(by="importance", ascending=False)
feature_select = list(data['feature'].values)
feature_cols = feature_select
random_seed = list(range(2021))
max_depth = [4,4,4,4,5,5,5,5,6,6,6,6,7,7,7]
lambd1 = np.arange(0, 1, 0.0001)
lambd2 = np.arange(0, 1, 0.0001)
bagging_fraction = [i / 1000.0 for i in range(700, 800)]
feature_fraction = [i / 1000.0 for i in range(700, 800)]
min_child_weight = [i / 100.0 for i in range(150, 250)]
n_feature = [i / 100.0 for i in range(1, 32,2)]
max_bin = list(range(130, 240))
subsample_for_bin = list(range(50000, 220000,10000))
bagging_freq = [1,2,3,4,5,6,7,8,9,10,1,2,3,4,5]
num_leaves = list(range(130, 250))
random.shuffle(random_seed)
random.shuffle(max_depth)
random.shuffle(lambd1)
random.shuffle(lambd2)
random.shuffle(bagging_fraction)
random.shuffle(feature_fraction)
random.shuffle(min_child_weight)
random.shuffle(max_bin)
random.shuffle(subsample_for_bin)
random.shuffle(bagging_freq)
random.shuffle(num_leaves)
random.shuffle(n_feature)
with open('params.pkl', 'wb') as f:
pickle.dump((random_seed, max_depth, lambd1,lambd2, bagging_fraction, feature_fraction, min_child_weight, max_bin,subsample_for_bin,bagging_freq,num_leaves,n_feature), f)
for iter in range(15):
print('max_depth:',max_depth[iter],'random_seed:',random_seed[iter],'feature_fraction:',feature_fraction[iter],
'bagging_fraction:',bagging_fraction[iter],'min_child_weight:',min_child_weight[iter],
'lambd1:',lambd1[iter],'lambd2:',lambd2[iter],'max_bin:',max_bin[iter],'num_leaves:',num_leaves[iter]
,'subsample_for_bin:',subsample_for_bin[iter],'bagging_freq:',bagging_freq[iter],'n_feature:',n_feature[iter])
nround = 5000
for iter in range(15):
if max_depth[iter]==4:
nround = 10000
elif max_depth[iter]==5:
nround = 8000
elif max_depth[iter]==6:
nround = 6000
elif max_depth[iter] == 7:
nround = 5000
X_train_r = X_train[feature_cols[:int(len(feature_cols)*0.7)]+
feature_cols[int(len(feature_cols)*0.7):int(len(feature_cols)*0.7)+int(len(feature_cols)*n_feature[iter])]]
X_test_r = X_test[feature_cols[:int(len(feature_cols) * 0.7)] +
feature_cols[int(len(feature_cols) * 0.7):int(len(feature_cols) * 0.7) + int(
len(feature_cols) * n_feature[iter])]]
scores = 0
threshold = 0
print('start training......')
print('训练集维度:',X_train_r.shape)
print('测试集维度:',X_test_r.shape)
for i, (trn_idx, val_idx) in enumerate(skf.split(X_train_r, y_train)):
clf = lgb.LGBMRegressor(
boosting_type='gbdt',
objective='regression',
n_estimators=nround,
learning_rate=0.08,
num_leaves=num_leaves[iter],
max_bin=max_bin[iter],
max_depth=max_depth[iter],
random_state=random_seed[iter],
subsample_for_bin=subsample_for_bin[iter],
feature_fraction=feature_fraction[iter],
bagging_fraction=bagging_fraction[iter],
bagging_freq=bagging_freq[iter],
min_child_weight=min_child_weight[iter],
lambda_l1=lambd1[iter],
lambda_l2=lambd2[iter],
metric=None,
n_jobs=30,
device='gpu'
)
clf.fit(X_train_r.iloc[trn_idx], y_train.iloc[trn_idx], eval_set=[(X_train_r.iloc[trn_idx], y_train.iloc[trn_idx]), (X_train_r.iloc[val_idx], y_train.iloc[val_idx])],eval_metric='mape',verbose=100, early_stopping_rounds=200)
print('predicting')
val_predict = clf.predict(X_train_r.iloc[val_idx], num_iteration=clf.best_iteration_)
test_predict[:,i] = clf.predict(X_test_r, num_iteration=clf.best_iteration_)
k_fold_mape.append(MAPE(y_train.iloc[val_idx],val_predict))
print("kfold_{}_mape_score:{} ".format(i, k_fold_mape[i]))
print('Train set kfold {} mean mape:'.format(i), np.mean(k_fold_mape))
#display_importances(feature_importance_df)
test_head['result'] = np.mean(test_predict,axis=1)
test_head['id'] = test_head['order_id']
test_head[['id','result']].to_csv('random_result/submission_{}.csv'.format(iter),index=False)
del X_train_r,X_test_r
gc.collect()
#merge
count = 0
result = 1
for name in os.listdir('random_result/'):
tmp = pd.read_csv('random_result/'+name)
if count == 0:
result = tmp[['id']]
tmp = tmp.rename(columns={'result':'result{}'.format(count)})
result = result.merge(tmp,on='id',how='left')
count += 1
result['result'] = result.drop('id',axis=1).sum(axis=1)
result['result'] = result['result']/count
result[['id','result']].to_csv('submission_merge.csv',index=False)
|
{"/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/prepare_for_training.py": ["/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/normalize.py", "/\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u539f\u7406\u53ca\u63a8\u5bfc/\u5176\u5b83/\u7b2c\u4e8c\u7ae0\u2014\u2014\u624b\u5199\u7ebf\u6027\u56de\u5f52\u7b97\u6cd5/util/features/generate_sinusoids.py"]}
|
17,427
|
seizans/sandbox-django
|
HEAD
|
/sandbox/settings/_stg.py
|
# coding=utf8
# ステージング用の環境で共通の設定
# base系の後で import * されるので、上書きをする挙動になる
ALLOWED_HOSTS = ['ステージング環境で使うホスト名を入れる']
SECRET_KEY = 'n+i_fly3y8v%(hgp#n(9h3@brw6qjiae)$gauqd)mee1t3dp1u'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dbname',
'USER': 'dbuser',
'PASSWORD': 'password',
'HOST': 'hostname',
'PORT': '',
}
}
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,428
|
seizans/sandbox-django
|
HEAD
|
/sandbox/api/jsonschemas.py
|
# coding=utf8
notes_requests = {
}
notes_response = {
'type': 'object',
'required': ['hoge'],
'properties': {
'hoge': {'type': 'string'},
'mado': {
'type': 'array',
'minItems': 1,
'items': {'type': 'integer'},
'uniqueItems': True,
},
'three': {'type': 'integer'},
},
}
notes2_response = notes_response
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,429
|
seizans/sandbox-django
|
HEAD
|
/sandbox/core/search_indexes.py
|
# coding=utf8
from celery_haystack.indexes import CelerySearchIndex
from django.utils import timezone
from haystack import indexes
from .models import Note
class NoteIndex(CelerySearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=False)
title = indexes.CharField(model_attr='title')
author = indexes.CharField(model_attr='author')
created = indexes.DateTimeField(model_attr='created')
updated = indexes.DateTimeField(model_attr='updated')
def get_model(self):
return Note
def index_queryset(self, using=None):
return self.get_model().objects.filter(updated__lte=timezone.now())
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,430
|
seizans/sandbox-django
|
HEAD
|
/sandbox/store/views.py
|
# coding=utf8
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import render
from django.views.generic.edit import CreateView
from haystack.query import SearchQuerySet
from core.models import Note
from core.search_indexes import NoteIndex
from .tasks import add
def hello(request):
result = add.delay(3, 8)
while not result.ready():
print 'hoge'
print result.get()
d = {'from_hello_view': 'From Hello View'}
return render(request, 'store/hello.html', d)
def insert_note(request):
note1 = Note.objects.create(title=u'タイトル', author=u'著者', content=u'内容')
NoteIndex().update_object(note1)
note2 = Note.objects.create(title='title1', author='author1', content='content1')
NoteIndex().update_object(note2)
d = {'from_hello_view': 'From Hello View'}
return render(request, 'store/hello.html', d)
class NoteView(CreateView):
model = Note
template_name = 'store/note.html'
success_url = reverse_lazy('note')
def get_context_data(self, **kwargs):
context = super(NoteView, self).get_context_data(**kwargs)
context['notes'] = Note.objects.all()
context['query'] = SearchQuerySet().models(Note).filter(title=u'定食')
return context
note = NoteView.as_view()
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,431
|
seizans/sandbox-django
|
HEAD
|
/sandbox/core/middleware.py
|
# coding=utf8
from django.core.urlresolvers import resolve
class JsonSchemaValidateMiddleware(object):
def process_request(self, request):
print request.path_info
resolver_match = resolve(request.path_info)
print resolver_match.url_name
print resolver_match.func
print resolver_match.view_name
print resolver_match.namespace
print resolver_match.namespaces
def process_view(self, request, view_func, view_args, view_kwargs):
pass
def process_response(self, request, response):
return response
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,432
|
seizans/sandbox-django
|
HEAD
|
/sandbox/api/tests.py
|
# coding=utf8
from django.test import TestCase
import jsonschema
import simplejson as json
from . import jsonschemas
class JsonSchemaTestCase(TestCase):
def assertSchema(self, schema, content):
try:
jsonschema.validate(json.loads(content), schema)
except json.JSONDecodeError as e:
# json 形式が不正の場合
self.fail(e.message)
except jsonschema.ValidationError as e:
if e.validator == 'required':
self.fail(e.message)
else:
self.fail(e.message)
class NotesTest(JsonSchemaTestCase):
def test_schema(self):
response = self.client.get('/api/notes')
self.assertEqual(response.status_code, 200)
self.assertSchema(jsonschemas.notes_response, response.content)
self.assertEqual(response['Content-Type'], 'application/json')
class Notes2Test(JsonSchemaTestCase):
def test_schema(self):
response = self.client.get('/api/notes2')
self.assertEqual(response.status_code, 200)
self.assertSchema(jsonschemas.notes2_response, response.content)
self.assertEqual(response['Content-Type'], 'application/json')
# print response.content
# data = json.loads(response.content)
# jsonschema.validate(data, schema)
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,433
|
seizans/sandbox-django
|
HEAD
|
/sandbox/store/urls.py
|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'store.views',
url(r'^hello$', 'hello'),
url(r'^note$', 'note', name='note'),
url(r'^insert-note$', 'insert_note'),
)
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,434
|
seizans/sandbox-django
|
HEAD
|
/sandbox/api/urls.py
|
from django.conf.urls import include, patterns, url
from rest_framework import viewsets, routers
from core.models import Note
class NoteViewSet(viewsets.ModelViewSet):
model = Note
router = routers.DefaultRouter()
router.register(r'notes', NoteViewSet)
urlpatterns = patterns(
'api.views',
url('^notes$', 'notes'),
url('^notes2$', 'notes2'),
url('^rest/', include(router.urls)),
url('^auth/', include('rest_framework.urls', namespace='rest_framework')),
)
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,435
|
seizans/sandbox-django
|
HEAD
|
/sandbox/settings/_base.py
|
# coding=utf8
# 全アプリ、全環境に共通の設定
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'haystack',
'elasticstack',
'celery_haystack',
'core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'core.middleware.JsonSchemaValidateMiddleware',
)
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
WSGI_APPLICATION = 'core.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# haystack
HAYSTACK_SIGNAL_PROCESSOR = 'celery_haystack.signals.CelerySignalProcessor'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'elasticstack.backends.ConfigurableElasticSearchEngine',
'URL': '127.0.0.1:9200',
'INDEX_NAME': 'sandbox',
},
}
ELASTICSEARCH_DEFAULT_ANALYZER = 'kuromoji_analyzer'
ELASTICSEARCH_INDEX_SETTINGS = {
'settings': {
"analysis": {
"analyzer": {
"kuromoji_analyzer": {
"type": "custom",
"tokenizer": "kuromoji_tokenizer",
"filter": "lowercase",
},
"ngram_analyzer": {
"type": "custom",
"tokenizer": "lowercase",
"filter": ["haystack_ngram"],
},
"edgengram_analyzer": {
"type": "custom",
"tokenizer": "lowercase",
"filter": ["haystack_edgengram"],
}
},
"tokenizer": {
"haystack_ngram_tokenizer": {
"type": "nGram",
"min_gram": 3,
"max_gram": 15,
},
"haystack_edgengram_tokenizer": {
"type": "edgeNGram",
"min_gram": 2,
"max_gram": 15,
"side": "front",
},
},
"filter": {
"haystack_ngram": {
"type": "nGram",
"min_gram": 3,
"max_gram": 15,
},
"haystack_edgengram": {
"type": "edgeNGram",
"min_gram": 5,
"max_gram": 15,
},
},
},
},
}
# For tests
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--cover-html',
'--with-coverage',
'--cover-package=core,store',
]
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,436
|
seizans/sandbox-django
|
HEAD
|
/sandbox/back/views.py
|
# coding=utf8
from django.shortcuts import render
def hello(request):
d = {'back_hello_string': 'HELLO BACK APPLICATION'}
return render(request, 'back/hello.html', d)
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,437
|
seizans/sandbox-django
|
HEAD
|
/sandbox/core/models.py
|
# coding=utf8
from django.db import models
class Note(models.Model):
title = models.CharField(max_length=255, null=False, blank=False)
author = models.CharField(max_length=255, null=False, blank=False)
content = models.TextField(null=False, blank=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Company(models.Model):
class Meta:
db_table = 'company'
name = models.CharField(max_length=100)
class Staff(models.Model):
class Meta:
db_table = 'staff'
name = models.CharField(max_length=100)
belong = models.OneToOneField('Company')
# belong = models.ForeignKey('Company')
company_name = models.CharField(max_length=100)
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,438
|
seizans/sandbox-django
|
HEAD
|
/sandbox/store/tests/test_01.py
|
# coding=utf8
from django.test import TestCase
from core.factories import StaffFactory
class FactoryBoyTest(TestCase):
def setUp(self):
pass
def test_factory(self):
staff1 = StaffFactory()
print staff1.name
print staff1.belong.name
print staff1.company_name
staff2 = StaffFactory()
print staff2.name
print staff2.belong.name
print staff2.company_name
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,439
|
seizans/sandbox-django
|
HEAD
|
/sandbox/settings/store_stg.py
|
# coding=utf8
# 表側アプリケーションの、ステージング環境用の設定
from ._store_base import * # NOQA
from ._stg import * # NOQA
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,440
|
seizans/sandbox-django
|
HEAD
|
/sandbox/settings/store_dev.py
|
# coding=utf8
# 表側アプリケーションの、開発環境用の設定
from ._store_base import * # NOQA
from ._dev import * # NOQA
# .dev で定義されている追加分を追加する
INSTALLED_APPS += INSTALLED_APPS_PLUS
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,441
|
seizans/sandbox-django
|
HEAD
|
/sandbox/back/urls.py
|
# coding=utf8
from django.conf.urls import patterns, url
urlpatterns = patterns(
'back.views',
url(r'^hello$', 'hello'),
)
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,442
|
seizans/sandbox-django
|
HEAD
|
/sandbox/settings/_store_base.py
|
# coding=utf8
# storeアプリケーション(表側)に共通の設定
from ._base import * # NOQA
ROOT_URLCONF = 'core.store_urls'
SESSION_COOKIE_AGE = 60 * 60 * 24 * 30 # 30日間
INSTALLED_APPS += (
'store',
)
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,443
|
seizans/sandbox-django
|
HEAD
|
/sandbox/api/views.py
|
# coding=utf8
from django.http import JsonResponse
def notes(request):
content = {
'hoge': 'fuga',
'mado': [1, 3, 5],
}
return JsonResponse(content)
from django.http import HttpResponse
import simplejson as json
def notes2(request):
content = {
'hoge': 'fuga',
'mado': [1, 3, 5],
}
body = json.dumps(content)
return HttpResponse(body, content_type='application/json')
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,444
|
seizans/sandbox-django
|
HEAD
|
/sandbox/settings/_dev.py
|
# coding=utf8
# 開発用の環境で共通の設定
# base系の後で import * されるので、上書きをする挙動になる
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n+i_fly3y8v%(hgp#n(9h3@brw6qjiae)$gauqd)mee1t3dp1u'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
INSTALLED_APPS_PLUS = (
'debug_toolbar',
)
# Celery
BROKER_URL = 'redis://localhost'
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_BACKEND = 'redis'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
17,445
|
seizans/sandbox-django
|
HEAD
|
/sandbox/settings/_back_base.py
|
# coding=utf8
# backアプリケーション(管理用)に共通の設定
from ._base import * # NOQA
ROOT_URLCONF = 'core.back_urls'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True # 管理アプリではブラウザ閉じるとセッション期限切れにする
INSTALLED_APPS += (
'django.contrib.admin',
'back',
)
|
{"/sandbox/core/search_indexes.py": ["/sandbox/core/models.py"], "/sandbox/settings/store_stg.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/store_dev.py": ["/sandbox/settings/_store_base.py", "/sandbox/settings/_dev.py"], "/sandbox/settings/_store_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/_back_base.py": ["/sandbox/settings/_base.py"], "/sandbox/settings/back_stg.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_stg.py"], "/sandbox/settings/back_dev.py": ["/sandbox/settings/_back_base.py", "/sandbox/settings/_dev.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.