content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from abc import ABC, abstractmethod
from enum import Enum, auto
from functools import reduce, singledispatch
from typing import Any, Generic, TypeVar
from iceberg.files import StructProtocol
from iceberg.schema import Accessor, Schema
from iceberg.types import NestedField
from iceberg.utils.singleton import Singleton
T = TypeVar("T")
class Operation(Enum):
"""Operations to be used as components in expressions
Operations can be negated by calling the negate method.
>>> Operation.TRUE.negate()
<Operation.FALSE: 2>
>>> Operation.IS_NULL.negate()
<Operation.NOT_NULL: 4>
The above example uses the OPERATION_NEGATIONS map which maps each enum
to it's opposite enum.
Raises:
ValueError: This is raised when attempting to negate an operation
that cannot be negated.
"""
TRUE = auto()
FALSE = auto()
IS_NULL = auto()
NOT_NULL = auto()
IS_NAN = auto()
NOT_NAN = auto()
LT = auto()
LT_EQ = auto()
GT = auto()
GT_EQ = auto()
EQ = auto()
NOT_EQ = auto()
IN = auto()
NOT_IN = auto()
NOT = auto()
AND = auto()
OR = auto()
def negate(self) -> "Operation":
"""Returns the operation used when this is negated."""
try:
return OPERATION_NEGATIONS[self]
except KeyError as e:
raise ValueError(f"No negation defined for operation {self}") from e
OPERATION_NEGATIONS = {
Operation.TRUE: Operation.FALSE,
Operation.FALSE: Operation.TRUE,
Operation.IS_NULL: Operation.NOT_NULL,
Operation.NOT_NULL: Operation.IS_NULL,
Operation.IS_NAN: Operation.NOT_NAN,
Operation.NOT_NAN: Operation.IS_NAN,
Operation.LT: Operation.GT_EQ,
Operation.LT_EQ: Operation.GT,
Operation.GT: Operation.LT_EQ,
Operation.GT_EQ: Operation.LT,
Operation.EQ: Operation.NOT_EQ,
Operation.NOT_EQ: Operation.EQ,
Operation.IN: Operation.NOT_IN,
Operation.NOT_IN: Operation.IN,
}
class Literal(Generic[T], ABC):
"""Literal which has a value and can be converted between types"""
@property
@abstractmethod
class BooleanExpression(ABC):
"""base class for all boolean expressions"""
@abstractmethod
class And(BooleanExpression):
"""AND operation expression - logical conjunction"""
@property
@property
class Or(BooleanExpression):
"""OR operation expression - logical disjunction"""
@property
@property
class Not(BooleanExpression):
"""NOT operation expression - logical negation"""
class AlwaysTrue(BooleanExpression, ABC, Singleton):
"""TRUE expression"""
class AlwaysFalse(BooleanExpression, ABC, Singleton):
"""FALSE expression"""
class BoundReference:
"""A reference bound to a field in a schema
Args:
field (NestedField): A referenced field in an Iceberg schema
accessor (Accessor): An Accessor object to access the value at the field's position
"""
@property
def field(self) -> NestedField:
"""The referenced field"""
return self._field
def eval(self, struct: StructProtocol) -> Any:
"""Returns the value at the referenced field's position in an object that abides by the StructProtocol
Args:
struct (StructProtocol): A row object that abides by the StructProtocol and returns values given a position
Returns:
Any: The value at the referenced field's position in `struct`
"""
return self._accessor.get(struct)
class UnboundReference:
"""A reference not yet bound to a field in a schema
Args:
name (str): The name of the field
Note:
An unbound reference is sometimes referred to as a "named" reference
"""
@property
def bind(self, schema: Schema, case_sensitive: bool) -> BoundReference:
"""Bind the reference to an Iceberg schema
Args:
schema (Schema): An Iceberg schema
case_sensitive (bool): Whether to consider case when binding the reference to the field
Raises:
ValueError: If an empty name is provided
Returns:
BoundReference: A reference bound to the specific field in the Iceberg schema
"""
field = schema.find_field(name_or_id=self.name, case_sensitive=case_sensitive)
if not field:
raise ValueError(f"Cannot find field '{self.name}' in schema: {schema}")
return BoundReference(field=field, accessor=schema.accessor_for_field(field.field_id))
@singledispatch
def visit(obj, visitor: BooleanExpressionVisitor[T]) -> T:
"""A generic function for applying a boolean expression visitor to any point within an expression
The function traverses the expression in post-order fashion
Args:
obj(BooleanExpression): An instance of a BooleanExpression
visitor(BooleanExpressionVisitor[T]): An instance of an implementation of the generic BooleanExpressionVisitor base class
Raises:
NotImplementedError: If attempting to visit an unsupported expression
"""
raise NotImplementedError(f"Cannot visit unsupported expression: {obj}")
@visit.register(AlwaysTrue)
def _(obj: AlwaysTrue, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit an AlwaysTrue boolean expression with a concrete BooleanExpressionVisitor"""
return visitor.visit_true()
@visit.register(AlwaysFalse)
def _(obj: AlwaysFalse, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit an AlwaysFalse boolean expression with a concrete BooleanExpressionVisitor"""
return visitor.visit_false()
@visit.register(Not)
def _(obj: Not, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit a Not boolean expression with a concrete BooleanExpressionVisitor"""
child_result: T = visit(obj.child, visitor=visitor)
return visitor.visit_not(child_result=child_result)
@visit.register(And)
def _(obj: And, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit an And boolean expression with a concrete BooleanExpressionVisitor"""
left_result: T = visit(obj.left, visitor=visitor)
right_result: T = visit(obj.right, visitor=visitor)
return visitor.visit_and(left_result=left_result, right_result=right_result)
@visit.register(Or)
def _(obj: Or, visitor: BooleanExpressionVisitor[T]) -> T:
"""Visit an Or boolean expression with a concrete BooleanExpressionVisitor"""
left_result: T = visit(obj.left, visitor=visitor)
right_result: T = visit(obj.right, visitor=visitor)
return visitor.visit_or(left_result=left_result, right_result=right_result)
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 2.985425 | 2,470 |
from osrsmath.general.skills import *
import unittest
| [
6738,
28686,
3808,
11018,
13,
24622,
13,
8135,
2171,
1330,
1635,
201,
198,
11748,
555,
715,
395,
201,
198,
201,
198
] | 2.761905 | 21 |
import socket
import serial
import time
import sys
import glob
import signal
from sys import exit
address = '127.0.0.1'
port = 8080
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
serialCom = serial.Serial(port)
serialCom.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
signal.signal(signal.SIGINT, handler) # ctlr + c
signal.signal(signal.SIGTSTP, handler) # ctlr + z
global server
# next create a socket object
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket successfully created.")
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((address, port))
print("Socket binded to %s." %(port))
# put the socket into listening mode
server.listen(5)
print("Socket is listening.")
openSerial()
while True:
# Establish connection with client.
try:
c, addr = server.accept()
except:
# server has been closed
break
with c:
print('Connected by', addr)
while True:
try:
x = ser.read(1) # read one byte
# print(type(x))
print(int.from_bytes(x, "big"))
except Exception as e:
print("Serial communication lost.")
print(e)
openSerial()
break
try:
c.send(x)
# pass
except:
break
#x = b'1'
# read serial
# if not data: break
#sleep(1)
print("Client disconnected.")
| [
11748,
17802,
198,
11748,
11389,
198,
11748,
640,
220,
220,
220,
198,
11748,
25064,
198,
11748,
15095,
198,
11748,
6737,
198,
6738,
25064,
1330,
8420,
198,
198,
21975,
796,
705,
16799,
13,
15,
13,
15,
13,
16,
6,
198,
634,
796,
4019,
... | 2.097865 | 1,124 |
import socket
import thread
import time
__author__ = "Sushant Raikar"
__email__ = "sushantraikar123@yahoo.com"
class SocketClient:
"""
=================
Pub Sub Generic Client
=================
Description: This is a generic client implementation. All interaction
with the broker is done through this class. It continuously listens
for published messages in a thread, provides api for publishing mess-
ages. A client can subscribe to more than one channels at a time.
API:
publish(channel_name, message)
uses broker's PUB API.
subscribe(channel_name)
uses broker's SUB API.
exiter()
uses broker's EXIT API.
set_callback(function)
function will be triggered with the message, ie. function(message)
,when a message is received from subscribed channel.
"""
def __init__(self, host, port):
"""
Initializes client with host and port. Starts a new thread for li-
stening to incoming messages.
"""
self.host = host
self.port = port
self.callback = None
self.sock = socket.socket()
self.sock.connect((host, port))
thread.start_new_thread(SocketClient.clientthread,(self.sock, self.__message_received_callback))
@staticmethod
def clientthread(conn, callback):
"""
Listens for incoming message.
Raises RuntimeError, if server connection breaks abruptly.
"""
while True:
try:
data = conn.recv(1024)
callback(data)
except:
raise RuntimeError("Server crashed")
conn.close()
def __message_received_callback(self, msg):
"""
Triggers callback function if its set.
"""
if self.callback:
self.callback(msg)
def __send(self, data):
"""
Send function, sleep after sending to avoid socket combining con-
secutive messages.
"""
self.sock.send(data)
time.sleep(0.01)
def set_callback(self, fn):
"""
Api for setting callback function.
"""
self.callback = fn
def publish(self, channel, msg):
"""
Api for publishing message.
"""
send_data = "PUB %s %s"%(channel, msg)
self.__send(send_data)
def subscribe(self, channel):
"""
Api for subscribing to a channel.
"""
send_data = "SUB %s"%(channel)
self.__send(send_data)
def exiter(self):
"""
Api for closing connection.
"""
send_data = "EXIT "
self.__send(send_data)
class Publisher:
"""
=================
Pub Sub Publisher
=================
Description: This is a wrapper over client implementation, for publisher
specific events. Publisher is initialized with a channel name. All mess-
ages are published only on this channel.
API:
send(message)
publishes message on the channel.
stop()
stop connection.
"""
class Subscriber:
"""
=================
Pub Sub Subscriber
=================
Description: This is a wrapper over client implementation, for subscrib-
er specific events. Subscriber is initialized with a channel name. All
messages received will only be from this channel. This class also provi-
des api for setting callback. If callback is not set, messages received
are stored in a message queue. Subsequent calls to recv(), will dequeue
messages one at a time. It is recommended to use recv() and set_callback
exclusively.
API:
recv()
Checks if there are any messages in message queue. If callback is s-
et this api will return None.
set_callback(function)
triggers `function(message)`.
stop()
disconnect and stop receiving messages.
"""
| [
11748,
17802,
198,
11748,
4704,
198,
11748,
640,
198,
198,
834,
9800,
834,
796,
366,
50,
1530,
415,
7567,
1134,
283,
1,
198,
834,
12888,
834,
796,
366,
82,
1530,
415,
430,
1134,
283,
10163,
31,
40774,
13,
785,
1,
198,
198,
4871,
4... | 2.565977 | 1,546 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Accelerating.
Provide auto accelerating for network, such as Less BN, Gradient Freeze.
"""
from .acc import *
from .base import *
from .less_batch_normalization import *
from .grad_freeze import *
__all__ = ['AutoAcc',
'OptimizerProcess', 'ParameterProcess',
'LessBN',
'GradientFreeze', 'FreezeOpt', 'freeze_cell',
'GradientAccumulation']
| [
2,
15069,
33448,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 3.476974 | 304 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, re
from frappe.website.website_generator import WebsiteGenerator
from frappe.website.render import clear_cache
from frappe import _
from frappe.utils import today
| [
2,
15069,
357,
66,
8,
2211,
11,
5313,
11822,
21852,
18367,
83,
13,
12052,
13,
290,
25767,
669,
198,
2,
17168,
13789,
13,
4091,
5964,
13,
14116,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
5... | 3.521739 | 92 |
#Siege
import bs
import bsUtils
import random
| [
2,
50,
14566,
198,
11748,
275,
82,
198,
11748,
275,
82,
18274,
4487,
198,
11748,
4738,
198
] | 2.705882 | 17 |
import serial | [
11748,
11389
] | 6.5 | 2 |
# encoding: utf-8
from __future__ import print_function
from functools import wraps
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
import matplotlib.gridspec as gridspec
import seaborn as sns
from . import performance as pfm
import jaqs.util as jutil
DECIMAL_TO_BPS = 10000
DECIMAL_TO_PCT = 100
COLOR_MAP = cm.get_cmap('rainbow') # cm.get_cmap('RdBu')
MPL_RCPARAMS = {'figure.facecolor': '#F6F6F6',
'axes.facecolor': '#F6F6F6',
'axes.edgecolor': '#D3D3D3',
'text.color': '#555555',
'grid.color': '#B1B1B1',
'grid.alpha': 0.3,
# scale
'axes.linewidth': 2.0,
'axes.titlepad': 12,
'grid.linewidth': 1.0,
'grid.linestyle': '-',
# font size
'font.size': 13,
'axes.titlesize': 18,
'axes.labelsize': 14,
'legend.fontsize': 'small',
'lines.linewidth': 2.5,
}
mpl.rcParams.update(MPL_RCPARAMS)
# -----------------------------------------------------------------------------------
# plotting settings
def customize(func):
"""
Decorator to set plotting context and axes style during function call.
"""
@wraps(func)
return call_w_context
def plotting_context(context='notebook', font_scale=1.5, rc=None):
"""
Create signaldigger default plotting style context.
Under the hood, calls and returns seaborn.plotting_context() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
context : str, optional
Name of seaborn context.
font_scale : float, optional
Scale font by signal font_scale.
rc : dict, optional
Config flags.
By default, {'lines.linewidth': 1.5}
is being used and will be added to any
rc passed in, unless explicitly overriden.
Returns
-------
seaborn plotting context
Example
-------
with signaldigger.plotting.plotting_context(font_scale=2):
signaldigger.create_full_report(..., set_context=False)
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {'lines.linewidth': 1.5}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale, rc=rc)
def axes_style(style='darkgrid', rc=None):
"""Create signaldigger default axes style context.
Under the hood, calls and returns seaborn.axes_style() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
style : str, optional
Name of seaborn style.
rc : dict, optional
Config flags.
Returns
-------
seaborn plotting context
Example
-------
with signaldigger.plotting.axes_style(style='whitegrid'):
signaldigger.create_full_report(..., set_context=False)
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.axes_style(style=style, rc=rc)
# -----------------------------------------------------------------------------------
# Functions to Plot Tables
def plot_table(table, name=None, fmt=None):
"""
Pretty print a pandas DataFrame.
Uses HTML output if running inside Jupyter Notebook, otherwise
formatted text output.
Parameters
----------
table : pd.Series or pd.DataFrame
Table to pretty-print.
name : str, optional
Table name to display in upper left corner.
fmt : str, optional
Formatter to use for displaying table elements.
E.g. '{0:.2f}%' for displaying 100 as '100.00%'.
Restores original setting after displaying.
"""
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if isinstance(table, pd.DataFrame):
table.columns.name = name
prev_option = pd.get_option('display.float_format')
if fmt is not None:
pd.set_option('display.float_format', lambda x: fmt.format(x))
print(table)
if fmt is not None:
pd.set_option('display.float_format', prev_option)
# -----------------------------------------------------------------------------------
# Functions to Plot Returns
'''
def plot_quantile_returns_bar(mean_ret_by_q,
# ylim_percentiles=None,
ax=None):
"""
Plots mean period wise returns for signal quantiles.
Parameters
----------
mean_ret_by_q : pd.DataFrame
DataFrame with quantile, (group) and mean period wise return values.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
mean_ret_by_q = mean_ret_by_q.copy().loc[:, ['mean']]
ymin = None
ymax = None
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
mean_ret_by_q.multiply(DECIMAL_TO_BPS) \
.plot(kind='bar',
title="Mean Return (on symbol, time) By signal Quantile", ax=ax)
ax.set(xlabel='Quantile', ylabel='Mean Return (bps)',
ylim=(ymin, ymax))
return ax
'''
def plot_quantile_returns_ts(mean_ret_by_q, ax=None):
"""
Plots mean period wise returns for signal quantiles.
Parameters
----------
mean_ret_by_q : pd.DataFrame
DataFrame with quantile, (group) and mean period wise return values.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
ret_wide = pd.concat({k: v['mean'] for k, v in mean_ret_by_q.items()}, axis=1)
ret_wide.index = pd.to_datetime(ret_wide.index, format="%Y%m%d")
ret_wide = ret_wide.mul(DECIMAL_TO_PCT)
# ret_wide = ret_wide.rolling(window=22).mean()
ret_wide.plot(lw=1.2, ax=ax, cmap=COLOR_MAP)
df = pd.DataFrame()
ax.legend(loc='upper left')
ymin, ymax = ret_wide.min().min(), ret_wide.max().max()
ax.set(ylabel='Return (%)',
title="Daily Quantile Return (equal weight within quantile)",
xlabel='Date',
# yscale='symlog',
# yticks=np.linspace(ymin, ymax, 5),
ylim=(ymin, ymax))
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.axhline(1.0, linestyle='-', color='black', lw=1)
return ax
def plot_mean_quantile_returns_spread_time_series(mean_returns_spread, period,
std_err=None,
bandwidth=1,
ax=None):
"""
Plots mean period wise returns for signal quantiles.
Parameters
----------
mean_returns_spread : pd.Series
Series with difference between quantile mean returns by period.
std_err : pd.Series
Series with standard error of difference between quantile
mean returns each period.
bandwidth : float
Width of displayed error bands in standard deviations.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if False: # isinstance(mean_returns_spread, pd.DataFrame):
if ax is None:
ax = [None for a in mean_returns_spread.columns]
ymin, ymax = (None, None)
for (i, a), (name, fr_column) in zip(enumerate(ax),
mean_returns_spread.items()):
stdn = None if std_err is None else std_err[name]
stdn = mean_returns_spread.loc
a = plot_mean_quantile_returns_spread_time_series(fr_column,
std_err=stdn,
ax=a)
ax[i] = a
curr_ymin, curr_ymax = a.get_ylim()
ymin = curr_ymin if ymin is None else min(ymin, curr_ymin)
ymax = curr_ymax if ymax is None else max(ymax, curr_ymax)
for a in ax:
a.set_ylim([ymin, ymax])
return ax
periods = period
title = ('Top Minus Bottom Quantile Return'
.format(periods if periods is not None else ""))
if ax is None:
f, ax = plt.subplots(figsize=(18, 6))
mean_returns_spread.index = pd.to_datetime(mean_returns_spread.index, format="%Y%m%d")
mean_returns_spread_bps = mean_returns_spread['mean_diff'] * DECIMAL_TO_PCT
std_err_bps = mean_returns_spread['std'] * DECIMAL_TO_PCT
upper = mean_returns_spread_bps.values + (std_err_bps * bandwidth)
lower = mean_returns_spread_bps.values - (std_err_bps * bandwidth)
mean_returns_spread_bps.plot(alpha=0.4, ax=ax, lw=0.7, color='navy')
mean_returns_spread_bps.rolling(22).mean().plot(color='green',
alpha=0.7,
ax=ax)
# ax.fill_between(mean_returns_spread.index, lower, upper,
# alpha=0.3, color='indianred')
ax.axhline(0.0, linestyle='-', color='black', lw=1, alpha=0.8)
ax.legend(['mean returns spread', '1 month moving avg'], loc='upper right')
ylim = np.nanpercentile(abs(mean_returns_spread_bps.values), 95)
ax.set(ylabel='Difference In Quantile Mean Return (%)',
xlabel='',
title=title,
ylim=(-ylim, ylim))
return ax
def plot_cumulative_return(ret, ax=None, title=None):
"""
Plots the cumulative returns of the returns series passed in.
Parameters
----------
ret : pd.Series
Period wise returns of dollar neutral portfolio weighted by signal
value.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
ret = ret.copy()
cum = ret # pfm.daily_ret_to_cum(ret)
cum.index = pd.to_datetime(cum.index, format="%Y%m%d")
cum = cum.mul(DECIMAL_TO_PCT)
cum.plot(ax=ax, lw=3, color='indianred', alpha=1.0)
ax.axhline(0.0, linestyle='-', color='black', lw=1)
metrics = pfm.calc_performance_metrics(cum, cum_return=True, compound=False)
ax.text(.85, .30,
"Ann.Ret. = {:.1f}%\nAnn.Vol. = {:.1f}%\nSharpe = {:.2f}".format(metrics['ann_ret'],
metrics['ann_vol'],
metrics['sharpe']),
fontsize=12,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top')
if title is None:
title = "Cumulative Return"
ax.set(ylabel='Cumulative Return (%)',
title=title,
xlabel='Date')
return ax
def plot_cumulative_returns_by_quantile(quantile_ret, ax=None):
"""
Plots the cumulative returns of various signal quantiles.
Parameters
----------
quantile_ret : int: pd.DataFrame
Cumulative returns by signal quantile.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
"""
if ax is None:
f, ax = plt.subplots(1, 1, figsize=(18, 6))
cum_ret = quantile_ret
cum_ret.index = pd.to_datetime(cum_ret.index, format="%Y%m%d")
cum_ret = cum_ret.mul(DECIMAL_TO_PCT)
cum_ret.plot(lw=2, ax=ax, cmap=COLOR_MAP)
ax.axhline(0.0, linestyle='-', color='black', lw=1)
ax.legend(loc='upper left')
ymin, ymax = cum_ret.min().min(), cum_ret.max().max()
ax.set(ylabel='Cumulative Returns (%)',
title='Cumulative Return of Each Quantile (equal weight within quantile)',
xlabel='Date',
# yscale='symlog',
# yticks=np.linspace(ymin, ymax, 5),
ylim=(ymin, ymax))
sharpes = ["sharpe_{:d} = {:.2f}".format(col, pfm.calc_performance_metrics(ser, cum_return=True,
compound=False)['sharpe'])
for col, ser in cum_ret.iteritems()]
ax.text(.02, .30,
'\n'.join(sharpes),
fontsize=12,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top')
ax.yaxis.set_major_formatter(ScalarFormatter())
return ax
# -----------------------------------------------------------------------------------
# Functions to Plot IC
def plot_ic_ts(ic, period, ax=None):
"""
Plots Spearman Rank Information Coefficient and IC moving
average for a given signal.
Parameters
----------
ic : pd.DataFrame
DataFrame indexed by date, with IC for each forward return.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
ic = ic.copy()
if isinstance(ic, pd.DataFrame):
ic = ic.iloc[:, 0]
mean, std = ic.mean(), ic.std()
if ax is None:
num_plots = 1
f, ax = plt.subplots(num_plots, 1, figsize=(18, num_plots * 7))
ax = np.asarray([ax]).flatten()
ic.plot(ax=ax, lw=0.6, color='navy', label='daily IC', alpha=0.8)
ic.rolling(22).mean().plot(ax=ax, color='royalblue', lw=2, alpha=0.6, label='1 month MA')
ax.axhline(0.0, linestyle='-', color='black', linewidth=1, alpha=0.8)
ax.text(.05, .95,
"Mean {:.3f} \n Std. {:.3f}".format(mean, std),
fontsize=16,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top',
)
ymin, ymax = (None, None)
curr_ymin, curr_ymax = ax.get_ylim()
ymin = curr_ymin if ymin is None else min(ymin, curr_ymin)
ymax = curr_ymax if ymax is None else max(ymax, curr_ymax)
ax.legend(loc='upper right')
ax.set(ylabel='IC', xlabel="", ylim=[ymin, ymax],
title="Daily IC and Moving Average".format(period))
return ax
def plot_ic_hist(ic, period, ax=None):
"""
Plots Spearman Rank Information Coefficient histogram for a given signal.
Parameters
----------
ic : pd.DataFrame
DataFrame indexed by date, with IC for each forward return.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
ic = ic.copy()
if isinstance(ic, pd.DataFrame):
ic = ic.iloc[:, 0]
mean, std = ic.mean(), ic.std()
if ax is None:
v_spaces = 1
f, ax = plt.subplots(v_spaces, 3, figsize=(18, v_spaces * 6))
ax = ax.flatten()
sns.distplot(ic.replace(np.nan, 0.), ax=ax,
hist_kws={'color': 'royalblue'},
kde_kws={'color': 'navy', 'alpha': 0.5},
# hist_kws={'weights':},
)
ax.axvline(mean, color='indianred', linestyle='dashed', linewidth=1.0, label='Mean')
ax.text(.05, .95,
"Mean {:.3f} \n Std. {:.3f}".format(mean, std),
fontsize=16,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=ax.transAxes,
verticalalignment='top')
ax.set(title="Distribution of Daily IC",
xlabel='IC',
xlim=[-1, 1])
ax.legend(loc='upper right')
return ax
def plot_monthly_ic_heatmap(mean_monthly_ic, period, ax=None):
"""
Plots a heatmap of the information coefficient or returns by month.
Parameters
----------
mean_monthly_ic : pd.DataFrame
The mean monthly IC for N periods forward.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
MONTH_MAP = {1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'May',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Nov',
12: 'Dec'}
mean_monthly_ic = mean_monthly_ic.copy()
num_plots = 1.0
v_spaces = ((num_plots - 1) // 3) + 1
if ax is None:
f, ax = plt.subplots(v_spaces, 3, figsize=(18, v_spaces * 6))
ax = ax.flatten()
new_index_year = []
new_index_month = []
for date in mean_monthly_ic.index:
new_index_year.append(date.year)
new_index_month.append(MONTH_MAP[date.month])
mean_monthly_ic.index = pd.MultiIndex.from_arrays(
[new_index_year, new_index_month],
names=["year", "month"])
ic_year_month = mean_monthly_ic['ic'].unstack()
sns.heatmap(
ic_year_month,
annot=True,
alpha=1.0,
center=0.0,
annot_kws={"size": 7},
linewidths=0.01,
linecolor='white',
cmap=cm.get_cmap('RdBu'),
cbar=False,
ax=ax)
ax.set(ylabel='', xlabel='')
ax.set_title("IC Monthly Mean".format(period))
return ax
# -----------------------------------------------------------------------------------
# Functions to Plot Others
'''
def plot_event_dist_NEW(df_events, axs, grouper=None):
i = 0
def _plot(ser):
ax = axs[i]
sns.distplot(ser, ax=ax)
ax.axvline(ser.mean(), lw=1, ls='--', label='Average', color='red')
ax.legend(loc='upper left')
ax.set(xlabel='Return (%)', ylabel='',
title="Distribution of return after {:d} trade dats".format(period))
if grouper is None:
for (date, period), row in df_events.iterrows():
ax = axs[i]
sns.distplot(ser, ax=ax)
ax.axvline(ser.mean(), lw=1, ls='--', label='Average', color='red')
ax.legend(loc='upper left')
ax.set(xlabel='Return (%)', ylabel='',
title="Distribution of return after {:d} trade dats".format(period))
# self.show_fig(fig, 'event_return_{:d}days.png'.format(my_period))
i += 1
# print(mean)
'''
def plot_batch_backtest(df, ax):
"""
Parameters
----------
df : pd.DataFrame
ax : axes
"""
df = df.copy()
df.index = jutil.convert_int_to_datetime(df.index)
df.mul(DECIMAL_TO_PCT).plot(# marker='x',
lw=1.2, ax=ax, cmap=COLOR_MAP)
ax.axhline(0.0, color='k', ls='--', lw=0.7, alpha=.5)
ax.set(xlabel="Date", ylabel="Cumulative Return (%)",
title="Cumulative Return for Different Buy Condition", )
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
2603,
... | 2.088223 | 9,374 |
# Copyright (c) 2014-2015, Doug Kelly
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django import forms
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.forms.extras.widgets import SelectDateWidget
from django.utils import timezone
from register.models import Convention, Registration, PaymentMethod, RegistrationLevel, DealerRegistrationLevel, ShirtSize, CouponCode, CouponUse
from datetime import date, datetime
import re
import os
import codecs
BIRTH_YEAR_CHOICES = list(range(date.today().year, 1900, -1))
| [
2,
15069,
357,
66,
8,
1946,
12,
4626,
11,
15115,
9077,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
2810,
326,
262,
170... | 3.540117 | 511 |
# from .constants import *
from rcosautomation.discord.constants import MATTERMOST_USERNAME, MATTERMOST_PASSWORD, VOICE_CHANNEL
from rcosautomation.discord.channels import add_channel_if_not_exists
import requests
from mattermostdriver import Driver
# mattermost = Driver({
# 'url': '54.197.25.170',
# 'login_id': MATTERMOST_USERNAME,
# 'password': MATTERMOST_PASSWORD
# })
# mattermost.login()
# The ID of the Project Pairing category
project_pairing_category_id = '748650123092820140'
# You can copy-paste project names here on each line and it will split and trim them
project_text = '''The Hotbox
Padlock News
Sage
Submitty
Insomnia Dialogue System
Exalendar
DormDesign
RPI Housing Finder
Spiral Football Stats
Lavender Programming Language
useCloudFS
Used Car Data Playground
OpenCircuits
TutorBase
Smartrider
ShuttleTracker
Poll Buddy
Telescope
AIPS
Pipeline
YACS
Venue
Taper'''
projects = list(map(str.strip, project_text.splitlines()))
| [
2,
422,
764,
9979,
1187,
1330,
1635,
198,
6738,
374,
6966,
2306,
296,
341,
13,
15410,
585,
13,
9979,
1187,
1330,
36775,
5781,
44,
10892,
62,
29904,
20608,
11,
36775,
5781,
44,
10892,
62,
47924,
54,
12532,
11,
30578,
8476,
62,
3398,
... | 2.948012 | 327 |
import chess_diagrams
# setup for all tests. See https://docs.pytest.org/en/2.7.3/xunit_setup.html
#
# Test for a single response. See http://flask.pocoo.org/docs/1.0/testing/
#
| [
11748,
19780,
62,
10989,
6713,
82,
628,
198,
2,
9058,
329,
477,
5254,
13,
4091,
3740,
1378,
31628,
13,
9078,
9288,
13,
2398,
14,
268,
14,
17,
13,
22,
13,
18,
14,
87,
20850,
62,
40406,
13,
6494,
198,
2,
628,
198,
2,
6208,
329,
... | 2.520548 | 73 |
import base64
import hashlib
from Crypto import Random
from Crypto.Cipher import DES3
class TDESCipher(object):
"""
Triple DES (Data Encryption Standard)
Enchaine 3 applications successives de l'algorithme DES sur le meme bloc de donnees de 64 bits, avec 2 ou 3 clef DES differentes.
Le TDES est cryptographiquement securise, il n'est ni aussi sur ni aussi rapide que AES.
Taille(s) du bloc : 64 bits (8 octets)
Longueur(s) de la cle : 168(21)ou 112(14) bits
Nombre de tours 3x16 tours du DES
"""
@staticmethod
#padding permettant d'utiliser n'importe quelle taille de message
@staticmethod
| [
11748,
2779,
2414,
198,
11748,
12234,
8019,
198,
198,
6738,
36579,
1330,
14534,
198,
6738,
36579,
13,
34,
10803,
1330,
22196,
18,
198,
198,
4871,
13320,
1546,
34,
10803,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1... | 2.773504 | 234 |
import os
from pymodm import fields, MongoModel, connect
from pymodm.errors import DoesNotExist
from passlib.hash import pbkdf2_sha256
connect("mongodb://localhost:27017/database")
def add_user(username, password):
"""Creates new user if user does not exist in the mongo database
:param username: user email as string type which serves as user id
:param password: user password as string type
:returns: updates user information in mongo database
"""
try:
user = User.objects.raw({'_id': username}).first()
except DoesNotExist:
user = User(username, password=pbkdf2_sha256.hash(password))
user.save()
def get_user(username):
"""Gets user by unique username
:param username: user email as string type which serves as user id
:returns: user information
"""
try:
user = User.objects.raw({'_id': username}).first()
return user
except DoesNotExist:
return None
def delete_user(username):
"""Deletes user from mongo database
:param username: user email as string type which serves as user id
"""
try:
user = User.objects.raw({'_id': username}).first()
user.delete()
except DoesNotExist:
pass
return False
def login_user(username, password):
"""Returns true if user exists and has the correct password
:param username: user email as string type which serves as user id
:param password: user password as string type
:returns: True if password is correct, False if incorrect
"""
try:
user = User.objects.raw({'_id': username}).first()
if user.password and pbkdf2_sha256.verify(password, user.password):
return True
except DoesNotExist:
pass
return False
def save_original_image_uuid(username, uuid):
"""Updates existing user by adding the uuid of a user-uploaded image
:param username: user email as string type which serves as user id
:param uuid: UUID4 of user-uploaded image
:returns: adds uuid of user-uploaded image to mongo database
"""
try:
user = User.objects.raw({'_id': username}).first()
user.original_image = uuid
user.save()
except DoesNotExist:
return None
def save_processed_image_uuid(username, uuid):
"""Updates existing user by adding the uuid of the processed image
:param username: user email as string type which serves as user id
:param uuid: UUID4 of processed image
:returns: adds uuid of processed image to mongo database
"""
try:
user = User.objects.raw({'_id': username}).first()
user.processed_image = uuid
user.save()
except DoesNotExist:
return None
def get_original_image(username):
"""Gets the original image uuid for a user
:param username: user email as string type which serves as user id
:returns: uuid of user's original image as a string
"""
try:
user = User.objects.raw({'_id': username}).first()
return user.original_image
except DoesNotExist:
return None
def get_processed_image(username):
"""Gets the processed image uuid for a user
:param username: user email as string type which serves as user id
:returns: uuid (UUID4) of user's processed image as a string
"""
try:
user = User.objects.raw({'_id': username}).first()
return user.processed_image
except DoesNotExist:
return None
def delete_image(name):
"""Deletes image stored in server
:param name: name (uuid) of an image stored in the VM server
"""
for f in os.listdir('images/'):
if f.startswith(name):
os.remove('images/' + f)
return
def remove_images(username):
"""Removes all images associated with a user
:param username: user email as string type which serves as user id
"""
try:
user = User.objects.raw({'_id': username}).first()
if user.original_image is not None:
delete_image(user.original_image)
if user.processed_image is not None:
delete_image(user.processed_image)
return True
except DoesNotExist:
return False
| [
11748,
28686,
198,
6738,
12972,
4666,
76,
1330,
7032,
11,
42591,
17633,
11,
2018,
198,
6738,
12972,
4666,
76,
13,
48277,
1330,
8314,
3673,
3109,
396,
198,
6738,
1208,
8019,
13,
17831,
1330,
279,
65,
74,
7568,
17,
62,
26270,
11645,
198... | 2.623011 | 1,634 |
from flask import Flask, request, send_from_directory, jsonify
import nltk
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
app = Flask(__name__, static_url_path='/static')
@app.route('/js/<path:path>')
@app.route("/")
@app.route("/get_sentiment", methods=['GET', 'POST'])
if __name__ == '__main__':
app.run()
| [
6738,
42903,
1330,
46947,
11,
2581,
11,
3758,
62,
6738,
62,
34945,
11,
33918,
1958,
198,
11748,
299,
2528,
74,
198,
77,
2528,
74,
13,
15002,
10786,
85,
5067,
62,
2588,
4749,
11537,
198,
6738,
299,
2528,
74,
13,
34086,
3681,
13,
85,
... | 2.644928 | 138 |
from __future__ import unicode_literals
import pytest
@pytest.fixture(autouse=True)
@pytest.fixture
@pytest.fixture(autouse=True)
@pytest.fixture(autouse=True)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
2306,
1076,
28,
17821,
8,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
6... | 2.560606 | 66 |
from model.group import Group
testdata = [
Group(name='Name1', header='header1', footer='footer1'),
Group(name='Name2', header='header2', footer='footer2')
] | [
6738,
2746,
13,
8094,
1330,
4912,
198,
198,
9288,
7890,
796,
685,
198,
220,
220,
220,
4912,
7,
3672,
11639,
5376,
16,
3256,
13639,
11639,
25677,
16,
3256,
2366,
263,
11639,
5898,
263,
16,
33809,
198,
220,
220,
220,
4912,
7,
3672,
11... | 2.766667 | 60 |
##### This script splits of the assembly in subcontigs wherever there is a "N" stretch longer than 30N
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
import glob
Assemblies = glob.glob("/media/avneesh/AneeshHDDfat/AssembledScaffolds/*")
N_stretch_length = 100
for file in Assemblies:
NewFILEPath = str(file) + str("_splitted")
newAssembly = open(NewFILEPath, "a")
for seq in SeqIO.parse(file, "fasta"):
base = -1
seq_end = "no"
new_sub_number = 0
while base < len(seq.seq)-1:
base += 1
N_count = 0
if seq.seq[base] != "N":
N_count = 0
start = base
for a in range(start, len(seq.seq),1):
if seq.seq[a] != "N":
if a+1 == len(seq.seq):
seq_end = "yes"
else:
for b in range(a, len(seq.seq)+1,1):
if seq.seq[b] == "N":
N_count += 1
else:
base = b-1
break
if N_count > N_stretch_length:
new_sub_number += 1
stop = a
old_split_ID = seq.id.split("_cov_")
old_split_ID[1] = "%s%s%s" % (str(old_split_ID[1]), str("_"), str(new_sub_number))
new_sequence = SeqRecord(Seq(str(seq.seq[start:stop])), id = "_cov_".join(old_split_ID),description="") ### create new SeqRecord object
SeqIO.write(new_sequence, newAssembly, "fasta") ### and write it to the new file
break
elif seq_end == "yes":
new_sub_number += 1
stop = a + 1
base = len(seq.seq) ## stops while loop
old_split_ID = seq.id.split("_cov_")
old_split_ID[1] = "%s%s%s" % (str(old_split_ID[1]), str("_"), str(new_sub_number))
new_sequence = SeqRecord(Seq(str(seq.seq[start:stop])), id = "_cov_".join(old_split_ID),description="") ### create new SeqRecord object
SeqIO.write(new_sequence, newAssembly, "fasta") ### and write it to the new file
break
else:
pass
else:
pass
print "%s%s" % (str(file.split("/")[-1]), " - done!")
| [
198,
4242,
2,
770,
4226,
30778,
286,
262,
10474,
287,
850,
3642,
9235,
14530,
612,
318,
257,
366,
45,
1,
7539,
2392,
621,
1542,
45,
198,
198,
6738,
16024,
1330,
1001,
80,
9399,
198,
6738,
16024,
13,
4653,
80,
1330,
1001,
80,
198,
... | 2.047236 | 995 |
from flask import Blueprint, render_template, request, jsonify
from helpers.database import db
from model.models import Project, Component
comp = Blueprint('component', __name__)
@comp.route('/component', methods=['GET'])
@comp.route('/component', methods=['POST'])
@comp.route('/component', methods=['PUT'])
@comp.route('/component', methods=['DELETE'])
| [
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
11,
2581,
11,
33918,
1958,
198,
6738,
49385,
13,
48806,
1330,
20613,
198,
6738,
2746,
13,
27530,
1330,
4935,
11,
35100,
198,
198,
5589,
796,
39932,
10786,
42895,
3256,
11593,
3672,
834,
8,
... | 3.37963 | 108 |
from cloudferry.lib.base.action import action
| [
6738,
6279,
2232,
563,
13,
8019,
13,
8692,
13,
2673,
1330,
2223,
628
] | 3.615385 | 13 |
import ice
import torch
from ice.core.loss import LossNode
from ice.core.metric import MetricNode
from torch import autocast, nn
from torch.nn import functional as F
from torch.optim import Adam
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
# arguments
ice.args.setdefault("lr", 0.0001, float, hparam=True)
# initialization
ice.init_autocast()
ice.make_configurable(Adam)
ice.set_gradient_accumulate(2)
# node
@ice.configurable
# define VGG 16
# hypergraph
ice.add("cifar10", make_cifar10(train=True, batch_size=200), tags="train")
ice.add("cifar10", make_cifar10(train=False, batch_size=200), tags="val")
ice.add("net", ice.ModuleNode(
module=Net(),
forward=lambda n, x: n.module(x['cifar10'][0]),
optimizers=ice.Optimizer(Adam(lr=ice.args.lr))
))
ice.add("nll_loss", LossNode(forward=lambda n, x: F.nll_loss(x["net"], x["cifar10"][1])))
ice.add("avg_nll_loss",
ice.MetricNode(
ice.AverageMeter(),
forward=lambda n, x: (x['nll_loss'], x['cifar10'][1].size(0)),
epoch_end=report,
))
ice.print_forward_output("nll_loss", every=200)
# training shedule
ice.run(
[
ice.Repeat([
ice.Task(train=True, epochs=5, tags="train"),
ice.SaveCheckpointTask(),
ice.Task(train=False, epochs=5, tags="val"),
], times=5)
],
devices="cuda:1",
omp_num_threads=6,
monitor_interval=1,
tee="3"
) | [
11748,
4771,
198,
11748,
28034,
198,
6738,
4771,
13,
7295,
13,
22462,
1330,
22014,
19667,
198,
6738,
4771,
13,
7295,
13,
4164,
1173,
1330,
3395,
1173,
19667,
198,
6738,
28034,
1330,
1960,
420,
459,
11,
299,
77,
198,
6738,
28034,
13,
2... | 2.33122 | 631 |
import requests
| [
11748,
7007,
628
] | 5.666667 | 3 |
"""Contract test cases for main."""
from typing import Any
import pytest
import requests
@pytest.mark.contract
def test_main(http_service: Any) -> None:
"""Should return 200 and html."""
url = f"{http_service}"
response = requests.get(url)
assert response.status_code == 200
assert response.headers["content-type"] == "text/html; charset=utf-8"
assert len(response.text) > 0
| [
37811,
45845,
1332,
2663,
329,
1388,
526,
15931,
198,
6738,
19720,
1330,
4377,
198,
198,
11748,
12972,
9288,
198,
11748,
7007,
628,
198,
31,
9078,
9288,
13,
4102,
13,
28484,
198,
4299,
1332,
62,
12417,
7,
4023,
62,
15271,
25,
4377,
8,... | 3.014925 | 134 |
"""ESI slack bot for tweetfleet."""
import os
import time
from slackclient import SlackClient
from esi_bot import ESI
from esi_bot import ESI_CHINA
from esi_bot import LOG
from esi_bot import request
from esi_bot.processor import Processor
from esi_bot.commands import ( # noqa: F401; # pylint: disable=unused-import
get_help, issue_details, issue_new, links, misc, status_esi, status_server, type_info)
def main():
"""Connect to the slack RTM API and pull messages forever."""
LOG.info("ESI bot launched")
request.do_refresh(ESI)
request.do_refresh(ESI_CHINA)
LOG.info("Loaded ESI specs")
slack = SlackClient(os.environ["SLACK_TOKEN"])
processor = Processor(slack)
while True:
if slack.rtm_connect(auto_reconnect=True):
if not processor.on_server_connect():
raise SystemExit("Could not join channels")
LOG.info("Connected to Slack")
cycle = 0
while slack.server.connected is True:
cycle += 1
for msg in slack.rtm_read():
processor.process_event(msg)
if cycle > 10:
processor.garbage_collect()
cycle = 0
time.sleep(1) # rtm_read should block, but it doesn't :/
else:
raise SystemExit("Connection to slack failed :(")
if __name__ == '__main__':
main()
| [
37811,
1546,
40,
30740,
10214,
329,
6126,
33559,
526,
15931,
198,
198,
11748,
28686,
198,
11748,
640,
198,
198,
6738,
30740,
16366,
1330,
36256,
11792,
198,
198,
6738,
1658,
72,
62,
13645,
1330,
412,
11584,
198,
6738,
1658,
72,
62,
1364... | 2.313821 | 615 |
# Copyright (c) 2017 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import itertools
from . import scheduler
from signac.common.six import with_metaclass
import uuid
# def _fn_bundle(self, bundle_id):
# return os.path.join(self.root_directory(), '.bundles', bundle_id)
#
# def _store_bundled(self, operations):
# """Store all job session ids part of one bundle.
#
# The job session ids are stored in a text file in the project's
# root directory. This is necessary to be able to identify each
# job's individual status from the bundle id."""
# if len(operations) == 1:
# return operations[0].get_id()
# else:
# h = '.'.join(op.get_id() for op in operations)
# bid = '{}-bundle-{}'.format(self, sha1(h.encode('utf-8')).hexdigest())
# fn_bundle = self._fn_bundle(bid)
# _mkdir_p(os.path.dirname(fn_bundle))
# with open(fn_bundle, 'w') as file:
# for operation in operations:
# file.write(operation.get_id() + '\n')
# return bid
#
# def _expand_bundled_jobs(self, scheduler_jobs):
# "Expand jobs which were submitted as part of a bundle."
# for job in scheduler_jobs:
# if job.name().startswith('{}-bundle-'.format(self)):
# with open(self._fn_bundle(job.name())) as file:
# for line in file:
# yield manage.ClusterJob(line.strip(), job.status())
# else:
# yield job
| [
2,
15069,
357,
66,
8,
2177,
383,
3310,
658,
286,
262,
2059,
286,
7055,
198,
2,
1439,
2489,
10395,
13,
198,
2,
770,
3788,
318,
11971,
739,
262,
347,
10305,
513,
12,
2601,
682,
13789,
13,
198,
11748,
340,
861,
10141,
198,
6738,
764,... | 2.329803 | 661 |
import cv2 as cv
img = cv.imread("testeOpenCV.jpg")
cinza = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
print(cinza.shape)
cv.imshow("Joelma Cinza", cinza)
cv.waitKey(0)
| [
11748,
269,
85,
17,
355,
269,
85,
198,
9600,
796,
269,
85,
13,
320,
961,
7203,
9288,
68,
11505,
33538,
13,
9479,
4943,
198,
17879,
4496,
796,
269,
85,
13,
33967,
83,
10258,
7,
9600,
11,
269,
85,
13,
46786,
62,
33,
10761,
17,
38,... | 2 | 81 |
import pandas as pd
import matplotlib.pyplot as plt
plt.switch_backend('Qt4Agg')
import os
data_folder = "C:\\Users\\jeroe\\PycharmProjects\\PythonDataScienceWorkshops\\data"
os.chdir(data_folder)
temp = pd.read_csv("mean_temperature.csv", delimiter="\t", header=None)
print(temp.head()) | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
489,
83,
13,
31943,
62,
1891,
437,
10786,
48,
83,
19,
46384,
11537,
198,
198,
11748,
28686,
198,
7890,
62,
43551,
796,
366,
34,
25,
... | 2.675926 | 108 |
from .stats_influx import StatsInflux
from pymongo import MongoClient, database, collection
from urllib.parse import quote_plus
| [
6738,
764,
34242,
62,
10745,
22564,
1330,
20595,
18943,
22564,
198,
6738,
279,
4948,
25162,
1330,
42591,
11792,
11,
6831,
11,
4947,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
9577,
62,
9541,
628,
628,
628,
628,
628,
628,
628,
628,
62... | 3.395349 | 43 |
from torchvision.models.resnet import ResNet, Bottleneck, model_urls
| [
6738,
28034,
10178,
13,
27530,
13,
411,
3262,
1330,
1874,
7934,
11,
14835,
43163,
11,
2746,
62,
6371,
82,
628
] | 3.5 | 20 |
s = raw_input()
n = len(s)
global dp
dp = [[False]*n for x in range(n)]
count = 0
for i in range(n-1):
if s[i:i+2] in ["()","??","(?","?)"]:
# print "NEtered"
dp[i][i+1] = True
#for i in range(n):
# for j in range(n):
# if dp[i][j]:count+=1;print i,j,s[i:j+1]
if n%2==0:
recur(s,n,0,n-1)
for i in range(4,n+1,2):
for j in range(n-i+1):
recur(s[j:j+i],i,j,j+i-1)
else:
recur(s[1:],n-1,1,n-1)
recur(s[:n-1],n-1,0,n-2)
k = s
s = k[1:]
n = len(s)
for i in range(4,n+1,2):
for j in range(n-i+1):
recur(s[j:j+i],i,j+1,j+i)
s = k[0:n-1]
n = len(k)
for i in range(4,n+1,2):
for j in range(n-i+1):
#print "recur",k[j:j+i]
recur(s[j:j+i],i,j,j+i-1)
s = k
for i in range(n):
for j in range(n):
if dp[i][j]==1:count+=1#;print i,j,s[i:j+1]
print count
| [
198,
82,
796,
8246,
62,
15414,
3419,
198,
77,
796,
18896,
7,
82,
8,
198,
20541,
288,
79,
198,
26059,
796,
16410,
25101,
60,
9,
77,
329,
2124,
287,
2837,
7,
77,
15437,
198,
9127,
796,
657,
198,
1640,
1312,
287,
2837,
7,
77,
12,
... | 1.571429 | 553 |
from .element import Element
from .mixin import ReqInjectScriptMixin
from .menu import Menu, MenuItem
from .icon import Icon
class SideBar(Element, ReqInjectScriptMixin):
"""Sidebar widget (sidebar_menu, nav_menu, content)
Example: append sidebar_menu::
sidebar = uio.SideBar()
sidebar.sidebar_menu.append(
uio.Image(url_for('static', filename='vlogo.png'), _class='ui small centered image'),
uio.MenuHeaderItem('Brand Name'),
uio.MenuItem('Admin', url='admin'),
uio.MenuItem('CRM', url='crm'),
uio.MenuItem('CUS', url='cus'),
)
Example: append nav_menu::
sidebar.nav_menu.append(
uio.MenuHeaderItem('Example'),
uio.MenuItem('System'),
uio.MenuItem('Resource'),
uio.RightMenu(
uio.MenuItem('User Name', 'account', uio.Icon('user icon')),
uio.MenuItem('Logout', 'logout', uio.Icon('sign out alternate icon'))
),
)
""" | [
6738,
764,
30854,
1330,
11703,
198,
6738,
764,
19816,
259,
1330,
797,
80,
818,
752,
7391,
35608,
259,
198,
6738,
764,
26272,
1330,
21860,
11,
21860,
7449,
198,
6738,
764,
4749,
1330,
26544,
198,
198,
4871,
12075,
10374,
7,
20180,
11,
... | 2.092157 | 510 |
from datetime import datetime
from freezegun import freeze_time
import doccron
def foo() -> None:
"""
This function prints "foo"
/etc/crontab::
* * * * * 2021
* * * * * 2020
:returns: None
"""
print("foo")
def bar() -> None:
"""
/etc/crontab::
* * * * * 2021
* * * * * 2020
This should not be added
"""
print("bar")
def baz() -> None:
"""
* * * * * 2021
* * * * * 2020
"""
print("baz")
@freeze_time("2020-01-01")
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
1479,
89,
1533,
403,
1330,
16611,
62,
2435,
198,
198,
11748,
2205,
66,
1313,
628,
198,
4299,
22944,
3419,
4613,
6045,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
770,
2163,
... | 2.125 | 248 |
# Authors: Gavin Niendorf <gavinniendorf@gmail.com>
#
# Classes and methods for defining rays and their propagation rules.
#
# License: MIT
import numpy as np
from .transforms import *
from .exceptions import NormalizationError, NotOnSurfaceError
class ray:
"""Class for rays and their propagation through surfaces.
Note
----
Also checks whether the direction cosines are normalized.
Attributes
----------
P : np.array of 3 floats/ints
Position of ray in the lab frame.
D : np.array of 3 floats/ints
Direction cosines for the ray in the lab frame.
P_hist : list of P np.arrays
Previous P np.arrays in a list.
D_hist : list of D np.arrays
Previous D np.arrays in a list.
N : float/int
Index of refraction of current material.
wvl: float/int
Wavelength of the ray in microns 550nm --> 0.55.
"""
def transform(self, surface):
""" Updates position and direction of a ray to obj coordinate system. """
self.P, self.D = transform(surface.R, surface, np.array([self.P]), np.array([self.D]))
def find_intersection(self, surface):
"""Finds the intersection point of a ray with a surface.
Note
----
Directly changes the self.P (position) attribute of the ray
that corresponds to the intersection point. Also be aware
that my error definition is different from Spencer's paper.
I found that the more direct error equation of abs(F) allows
me to tune my max error values to get better accuracy.
Parameters
----------
surface : geometry object
Surface to find intersection of ray with.
"""
#Initial guesses, see Spencer, Murty for explanation.
s_0 = -self.P[2]/self.D[2]
X_1 = self.P[0]+self.D[0]*s_0
Y_1 = self.P[1]+self.D[1]*s_0
s_j = [0., 0.]
#Initial error.
error = 1.
n_iter = 0
#Max iterations allowed.
n_max = 1e4
while error > 1e-6 and n_iter < n_max:
X, Y, Z = [X_1, Y_1, 0.]+np.dot(self.D, s_j[0])
try:
#'normal' is the surface direction numbers.
func, normal= surface.get_surface([X, Y, Z])
deriv = np.dot(normal, self.D)
#Newton-raphson method
s_j = s_j[1], s_j[1]-func/deriv
except NotOnSurfaceError:
self.P = None
return None
#Error is how far f(X, Y, Z) is from 0.
error = abs(func)
n_iter += 1
if n_iter == n_max or s_0+s_j[0] < 0 or np.dot(([X, Y, Z]-self.P), self.D) < 0.:
self.P = None
else:
self.normal = normal
self.P = np.array([X, Y, Z])
def interact(self, surface, typeof):
"""Updates new direction of a ray for a given interaction type.
Note
----
High level method that calls the appropriate method for a given
interaction.
Parameters
----------
surface : geometry object
Surface to find intersection of ray with.
typeof : str
Type of interaction
reflection -> Reflect the ray off the surface.
refraction -> Refract the ray into the surface.
stop -> Don't change ray direction.
"""
if hasattr(surface,'glass'):
mu = self.N / surface.glass(self.wvl)
else:
mu = self.N / surface.N
a = mu*np.dot(self.D, self.normal)/pow(np.linalg.norm(self.normal), 2)
b = (pow(mu,2)-1)/pow(np.linalg.norm(self.normal), 2)
if typeof == 'stop':
pass
#Needed for total internal reflection even if typeof is refraction.
elif b > pow(a, 2) or typeof == 'reflection':
self.reflection(surface, a/mu)
elif typeof == 'refraction':
self.refraction(surface, mu, a, b)
def reflection(self, surface, a):
"""Reflects the ray off a surface and updates the ray's direction.
Note
----
This method computes D exactly rather than numerically like in the
refraction method.
Parameters
----------
surface : geometry object
Surface to reflect from.
a : float/int
Constant defined in the interact method.
"""
k, l, m = self.D
K, L, M = self.normal
self.D = np.array([k-2.*a*K, l-2.*a*L, m-2.*a*M])
def refraction(self, surface, mu, a, b):
"""Simulates refraction of a ray into a surface and updates the ray's direction.
Note
----
My error definition is not in Spencer and Murty's paper but is inspired by my
unique intersection error definition. We are solving for roots of a quadratic and
I am defining my error by how far the quadtratic is from 0. See Spencer, Murty for
derivation of the quadratic.
Parameters
----------
surface : geometry object
Surface to refract into.
mu, a, b : float/int
Constants defined in the interact method.
Returns
-------
0
Returns 0 if the number of iterations exceeds the max allowed to converge.
"""
k, l, m = self.D
K, L, M = self.normal
G = [-b/(2*a), -b/(2*a)]
#Initial error.
error = 1.
niter = 0
#Max iterations allowed.
nmax = 1e5
while error > 1e-15 and niter < nmax:
#Newton-raphson method
G = G[1], (pow(G[1],2)-b)/(2*(G[1]+a))
#See Spencer, Murty for where this is inspired by.
error = abs(pow(G[1],2)+2*a*G[1]+b)
niter += 1
if niter==nmax:
self.P = None
return 0.
#Update direction and index of refraction of the current material.
self.D = np.array([mu*k+G[1]*K,mu*l+G[1]*L,mu*m+G[1]*M])
if hasattr(surface,'glass'):
self.N = surface.glass(self.wvl)
else:
self.N = surface.N
def ray_lab_frame(self, surface):
""" Updates position and direction of a ray in the lab frame. """
self.P, self.D = lab_frame(surface.R, surface, np.array([self.P]), np.array([self.D]))
def update(self):
""" Updates the P_hist and D_hist arrays from current P and D arrays. """
self.P_hist.append(self.P)
self.D_hist.append(self.D)
def propagate(self, surfaces):
"""Propagates a ray through a given surfaces list.
Note
----
If self.P is None then the ray failed to converge or
took too many iterations to meet the required accuracy.
Note that this is used (self.P is None) as a flag in
many other functions in TracePy.
Parameters
----------
surfaces : list of geometry objects
Surfaces to propagate through in order of propagation.
"""
for surface in surfaces:
self.transform(surface)
self.find_intersection(surface)
#Results from failure to converge.
if self.P is None:
break
self.interact(surface, surface.action)
#Results from too many iterations.
if self.P is None:
break
self.ray_lab_frame(surface)
#Update current to history arrays.
self.update()
| [
2,
46665,
25,
30857,
11556,
18738,
69,
1279,
70,
615,
3732,
72,
18738,
69,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
38884,
290,
5050,
329,
16215,
24823,
290,
511,
43594,
3173,
13,
198,
2,
198,
2,
13789,
25,
17168,
198,
198,
11748,
... | 2.213173 | 3,401 |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@author: yuejl
@application:
@contact: lewyuejian@163.com
@file: strutil.py
@time: 2021/7/3 0003 22:19
@desc:
'''
import ujson
import re
import random
import string
import uuid | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
31,
9800,
25,
331,
518,
20362,
198,
31,
31438,
25,
198,
31,
32057,
25,
443,
21768,
518,
73,
666,
... | 2.385417 | 96 |
import numpy as np
from numba import jitclass
from numba import int32, float32
spec = [
('value', int32),
('array', float32[:]),
]
@jitclass(spec)
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
997,
7012,
1330,
474,
270,
4871,
198,
6738,
997,
7012,
1330,
493,
2624,
11,
12178,
2624,
628,
198,
16684,
796,
685,
198,
220,
220,
220,
19203,
8367,
3256,
493,
2624,
828,
198,
220,
220,
... | 2.580645 | 62 |
import random
| [
11748,
4738,
628,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198
] | 1.666667 | 18 |
"""
Patrons file incoming from IS&T in a version 1 schema to a version 2 schema
written by J Ammerman [jwacooks] (2015-10-09)
edited by A Sawyer [atla5] (2019-09-04)
"""
# coding: utf-8
# requires python 3.x
# load required modules
import codecs
import os
import xml.etree.ElementTree as ET
import glob
from zipfile import ZipFile
from xml.dom import minidom
import csv
# variables
DEFAULT_XML_ENCODING = "Windows-1252" # should be encoded in the first line of the xml
EXTRANEOUS_XML_LINE = 'xmlns:use="http://com/exlibris/digitool/repository/extsystem/xmlbeans" xsi:schemaLocation="http://com/exlibris/digitool/repository/extsystem/xmlbeans user_012513.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
SYM_BEL = '\u0007' # https://unicode.org/cldr/utility/character.jsp?a=0007
SYM_SYN = '\u0016' # https://unicode.org/cldr/utility/character.jsp?a=0016
SYM_SUB = '\u001a' # https://unicode.org/cldr/utility/character.jsp?a=001a
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
if __name__ == "__main__":
#os.chdir('/Volumes/jwa_drive1/git/patrons')
file_list = glob.glob('patrons*.xml')
"""get the list of user group codes and descriptions to read into a to enhance the records with the description"""
reader = csv.DictReader(open('user_groups.csv'))
user_groups = {}
for row in reader:
key = row.pop('Code')
if key in user_groups:
# implement your duplicate row handling here
pass
user_groups[key] = row['Description']
for f in file_list:
# create an empty file to write to
out_file = codecs.open('prep_' + f[len("patrons_"):], 'w', 'utf-8')
users = ET.Element('users')
xml_str = codecs.open(f, 'rb', DEFAULT_XML_ENCODING).read()
xml_str = xml_str.replace(SYM_BEL, '').replace(SYM_SUB, '').replace(SYM_SYN, '')
xml_str = xml_str.replace('use:', '').replace(EXTRANEOUS_XML_LINE, '')
root = ET.fromstring(xml_str)
for child in root:
user = ET.SubElement(users, 'user')
add_user_details(child, user)
#add_notes(child,user)
add_identifiers(child, user)
add_contacts(child, user)
out_file.write(prettify(users))
out_file.close()
file_list = glob.glob('prep*.xml')
with ZipFile('patrons.zip', 'a') as myzip:
for f in file_list:
myzip.write(f)
myzip.close()
| [
37811,
198,
12130,
12212,
2393,
15619,
422,
3180,
5,
51,
287,
257,
2196,
352,
32815,
284,
257,
2196,
362,
32815,
198,
220,
3194,
416,
449,
1703,
647,
805,
685,
73,
86,
330,
31085,
60,
357,
4626,
12,
940,
12,
2931,
8,
198,
220,
130... | 2.305677 | 1,145 |
from extract_image_features.video_utils import *
import numpy as np
from extract_image_features.keras_pretrained_models.imagenet_utils import preprocess_input
from keras.models import Model
from keras.preprocessing import image
from extract_image_features.keras_pretrained_models.vgg19 import VGG19
# file saving and loading destinations change whether you are working on laptop or desktop
USE_TITANX = True
### CHANGE THE FILE TO BE READ HERE!!!!
######## LOADING VIDEO FILENAMES
print ("--- Loading video and audio filenames...")
if USE_TITANX:
video_dir = '/home/zanoi/ZANOI/auditory_hallucination_videos'
else: # Working on MacBook Pro
video_dir = "/Volumes/SAMSUNG_SSD_256GB/ADV_CV/2-25_VIDAUD/EXPORTS"
video_files = [os.path.join(video_dir, file_i)
for file_i in os.listdir(video_dir)
if file_i.endswith('.mp4')]
num_videos = len(video_files)
print("num_videos: ", num_videos)
######## LOADING AUDIO FILENAMES
audio_feature_dir = "../audio_vectors"
audio_f_files = [os.path.join(audio_feature_dir, file_i)
for file_i in os.listdir(audio_feature_dir)
if file_i.endswith('.mat')]
num_audio_f = len(audio_f_files)
print (audio_f_files)
print("num_audio_f: ", num_audio_f)
for audio_idx in range(num_audio_f): # Loop over all audio files
audio_prefix, audio_vector_length, audio_features = returnAudioVectors(audio_idx, audio_f_files)
# Find all the linked videos for the given audio vector
linked_video_f = findMatchingVideos(audio_prefix, video_files)
print(audio_f_files[audio_idx])
print(linked_video_f)
for video_filename in linked_video_f:
# Return the angle_name to name the file correctly
angle_name = returnAngleName(video_filename)
print ("angle_name:", angle_name)
# Process the videos linked to a particular audio vector
######## PROCESS VIDEO TO BLACK AND WHITE
print("--- Processing video to greyscale...")
processed_video = processOneVideo(audio_vector_length, video_filename, normalize=False)
print("processed_video.shape:", processed_video.shape)
######### CONCATENATE INTO SPACETIME IMAGE
print ("--- Concatenating into Spacetime image...")
window = 3
space_time_image = createSpaceTimeImagesforOneVideo(processed_video,window) # (1, 8377, 224, 224, 3)
print ("space_time_image.shape:", space_time_image.shape)
########## RUN THE SPACETIME IMAGES THROUGH VGG19
print ("--- Running through VGG19 FC2 layer...")
# Build the model
base_model = VGG19(weights='imagenet')
model = Model(input=base_model.input, output=base_model.get_layer('fc1').output) # Only take the FC2 layer output
# Preallocate matrix output
(num_frames, frame_h, frame_w, channels) = space_time_image.shape
CNN_FC_output = np.zeros((num_frames,1,4096)) # (1,8377,1,4096) -> FC2 outputs dimensions (1,4096)
for frame_num in tqdm(range(num_frames)):
img = space_time_image[frame_num]
x = np.expand_dims(img, axis=0)
x = preprocess_input(x)
fc2_features = model.predict(x) # Predict the FC2 features from VGG19, output shape is (1,4096)
CNN_FC_output[frame_num] = fc2_features # Save the FC2 features to a matrix
print("CNN_FC_output.shape:", CNN_FC_output.shape) # (1,8377,1,4096)
########### CREATE FINAL DATASET, concatenate FC output with audio vectors
# Normalization of the audio_vectors occurs in this function -> Hanoi forgot to normalize in MATLAB!!!!
final_audio_vector = createAudioVectorDatasetForOneVid(audio_features, space_time_image.shape) #(8377, 18)
print ("final_audio_vector.shape:", final_audio_vector.shape)
############ PACKAGE AND SAVE THE DATASET
if USE_TITANX:
data_extern_dest = '/home/zanoi/ZANOI/auditory_hallucinations_data/FC_2_data/'
else: # Working on MacBook Pro
data_extern_dest = '/Volumes/SAMSUNG_SSD_256GB/ADV_CV/data/'
file_name = data_extern_dest + audio_prefix + angle_name + '_dataX_dataY.h5'
with h5py.File(file_name, 'w') as hf:
print ("Writing data to file...")
hf.create_dataset('dataX', data=CNN_FC_output)
hf.create_dataset('dataY', data=final_audio_vector)
print ("--- {EVERYTHING COMPLETE HOMIEEEEEEEEE} ---") | [
6738,
7925,
62,
9060,
62,
40890,
13,
15588,
62,
26791,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
7925,
62,
9060,
62,
40890,
13,
6122,
292,
62,
5310,
13363,
62,
27530,
13,
320,
11286,
316,
62,
26791,
1330,
662,
14681,
... | 2.488764 | 1,780 |
import sqlite3 | [
11748,
44161,
578,
18
] | 3.5 | 4 |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next | [
2,
30396,
329,
1702,
306,
12,
25614,
1351,
13,
198,
2,
1398,
7343,
19667,
25,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
1188,
28,
15,
11,
1306,
28,
14202,
2599,
198,
2,
220,
220,
220,
220,
220,
220,
220,
... | 2.272727 | 66 |
"""Allows to configure custom shell commands to turn a value for a sensor."""
CONF_COMMAND_TIMEOUT = "command_timeout"
DEFAULT_TIMEOUT = 15
DOMAIN = "command_line"
PLATFORMS = ["binary_sensor", "cover", "sensor", "switch"]
| [
37811,
34934,
284,
17425,
2183,
7582,
9729,
284,
1210,
257,
1988,
329,
257,
12694,
526,
15931,
198,
198,
10943,
37,
62,
9858,
44,
6981,
62,
34694,
12425,
796,
366,
21812,
62,
48678,
1,
198,
7206,
38865,
62,
34694,
12425,
796,
1315,
19... | 3.068493 | 73 |
import os
import sys
import hashlib
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
12234,
8019,
628,
628,
628,
628,
198
] | 3.142857 | 14 |
# Generated by Django 2.2.10 on 2020-05-29 12:30
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
940,
319,
12131,
12,
2713,
12,
1959,
1105,
25,
1270,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
from transformers import ElectraTokenizer, ElectraForSequenceClassification, pipeline
from pprint import pprint
tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-small-finetuned-nsmc")
model = ElectraForSequenceClassification.from_pretrained("monologg/koelectra-small-finetuned-nsmc")
nsmc = pipeline("sentiment-analysis", tokenizer=tokenizer, model=model)
texts = [
"이 영화는 미쳤다. 넷플릭스가 일상화된 시대에 극장이 존재해야하는 이유를 증명해준다.",
"촬영감독의 영혼까지 갈아넣은 마스터피스",
"보면서 화가날수있습니다.",
"아니 그래서 무슨말이 하고싶은거야 ㅋㅋㅋ",
]
pprint(nsmc(texts))
| [
6738,
6121,
364,
1330,
5903,
430,
30642,
7509,
11,
5903,
430,
1890,
44015,
594,
9487,
2649,
11,
11523,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
30001,
7509,
796,
5903,
430,
30642,
7509,
13,
6738,
62,
5310,
13363,
7203,
2144,
9... | 1.371212 | 396 |
from __future__ import division
import numpy as np
import tensorflow as tf
from SIDLoader import SIDLoader
from ModelBuilder import ModelBuilder
from Experiment import Experiment
import time,datetime,os,glob
path_prefix = '.'
checkpoint_dir = path_prefix+'/chk'
dataset_dir = path_prefix+'/dataset'
black_level = 512
seed = 1337
tensorboard_dir = path_prefix+'/tensorboard/'
#Set initial seed
np.random.seed(seed)
#Load flat matrix
dataset = SIDLoader(dataset_dir, patch_fn=None,keep_raw=False,keep_gt=True, set_id='test')
#Set up experiments
expList = []
expList.append(Experiment(name='Sony',model_fn={'fn':ModelBuilder.build_loadable_cchen},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir='../checkpoint',dataset=dataset))
#expList.append(Experiment(name='cchen_sony_noflip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_sony_noflip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='deep_isp_noflip',model_fn={'fn':ModelBuilder.build_deep_isp_exp},device="/device:GPU:2",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_resize_sony_noflip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp_resize},device="/device:GPU:3",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_resize_sony_noflip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp_resize},device="/device:GPU:4",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_sony_flip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_sony_flip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='deep_isp_flip',model_fn={'fn':ModelBuilder.build_deep_isp_exp},device="/device:GPU:2",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_resize_sony_flip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp_resize},device="/device:GPU:3",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_resize_sony_flip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp_resize},device="/device:GPU:4",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_self_amp2',model_fn={'fn':ModelBuilder.build_unet_self_scale},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_amp_infer2',model_fn={'fn':ModelBuilder.build_unet_amp_infer},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
epoch = 0
dataset.start()
try:
#test loop
for exp in expList:
exp.create_test_writer()
while(epoch < 1):
#Get batch from batchloader
(x,y,r) = dataset.get_batch()
#start running training step on each GPU
for exp in expList:
exp.test_action(x,y,r)
#Wait for all to finish
for exp in expList:
exp.finish_test_action()
epoch = dataset.readEpoch
if(dataset.readC == 0): #It is the end of the epoch
for exp in expList:
exp.end_of_epoch_test()
except KeyboardInterrupt:
print('Keyboard interrupt accepted')
finally:
print("Stopping dataset")
dataset.stop()
for exp in expList:
exp.model['sess'].close()
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
311,
2389,
17401,
1330,
311,
2389,
17401,
198,
6738,
9104,
32875,
1330,
9104,
32875,
198,
6738,
29544,
1330,
29... | 2.49938 | 1,614 |
with open('./input.txt') as infile:
jumps = [int(i.rstrip('\n')) for i in infile.readlines()]
steps = 0
idx = 0
while idx < (len(jumps)):
step = jumps[idx]
if step >= 3:
jumps[idx] -= 1
else:
jumps[idx] += 1
idx += step
steps += 1
print(steps)
| [
4480,
1280,
7,
4458,
14,
15414,
13,
14116,
11537,
355,
1167,
576,
25,
198,
220,
220,
220,
18045,
796,
685,
600,
7,
72,
13,
81,
36311,
10786,
59,
77,
6,
4008,
329,
1312,
287,
1167,
576,
13,
961,
6615,
3419,
60,
198,
220,
220,
220... | 1.849162 | 179 |
from geneal.genetic_algorithms import ContinuousGenAlgSolver, BinaryGenAlgSolver
| [
6738,
9779,
282,
13,
5235,
5139,
62,
282,
7727,
907,
1330,
45012,
13746,
2348,
70,
50,
14375,
11,
45755,
13746,
2348,
70,
50,
14375,
628
] | 3.28 | 25 |
# from KK
import matplotlib
matplotlib.use('Agg')
from rnn import RNN
from copy import deepcopy
import time
import os
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.clip_grad import clip_grad_norm
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.init as init
from IPython import embed
import shutil
from datasets import EpisodicFroggerDataset, EpisodicDiffFroggerDataset
from collections import OrderedDict
from imageio import imread, imwrite
from glob import glob
from vq_vae_small import AutoEncoder
from conv_vae import Encoder, Decoder, VAE
from utils import discretized_mix_logistic_loss
from utils import sample_from_discretized_mix_logistic
worst_inds = np.load('worst_inds.npz')['arr_0']
all_inds = range(800)
best_inds = np.array([w for w in all_inds if w not in list(worst_inds)])
torch.manual_seed(139)
pcad = np.load('pca_components_vae.npz')
V = pcad['V']
vae_mu_mean = pcad['Xmean']
vae_mu_std = pcad['Xstd']
Xpca_std = pcad['Xpca_std']
dparams = np.load('vae_diff_params.npz')
mu_diff_mean = dparams['mu_diff_mean'][best_inds]
mu_diff_std = dparams['mu_diff_std'][best_inds]
sig_diff_mean = dparams['sig_diff_mean'][best_inds]
sig_diff_std = dparams['sig_diff_std'][best_inds]
if __name__ == '__main__':
import argparse
default_base_datadir = '/localdata/jhansen/trajectories_frames/dataset/'
default_base_savedir = '/localdata/jhansen/trajectories_frames/saved/'
default_vae_model_loadpath = os.path.join(default_base_savedir, 'conv_vae.pkl')
#default_rnn_model_loadpath = os.path.join(default_base_savedir, 'rnn_vae.pkl')
default_rnn_model_loadpath = os.path.join(default_base_savedir, 'rnn_model_epoch_000152_loss0.000166.pkl')
parser = argparse.ArgumentParser(description='train vq-vae for frogger images')
parser.add_argument('-c', '--cuda', action='store_true', default=False)
parser.add_argument('-d', '--datadir', default=default_base_datadir)
parser.add_argument('-v', '--vae_model_loadpath', default=default_vae_model_loadpath)
parser.add_argument('-t', '--transform', default='std')
parser.add_argument('-r', '--rnn_model_loadpath', default=default_rnn_model_loadpath)
parser.add_argument('-dt', '--data_type', default='diff')
parser.add_argument('-hs', '--hidden_size', default=512, type=int)
parser.add_argument('-n', '--num_train_limit', default=-1, help='debug flag for limiting number of training images to use. defaults to using all images', type=int)
parser.add_argument('-g', '--generate_results', action='store_true', default=False, help='generate dataset of codes')
args = parser.parse_args()
use_cuda = args.cuda
dsize = 40
nr_mix = nr_logistic_mix = 10
## mean and scale for each components and weighting bt components (10+2*10)
probs_size = (2*nr_mix)+nr_mix
latent_size = 32
encoder = Encoder(latent_size)
decoder = Decoder(latent_size, probs_size)
vae = VAE(encoder, decoder, use_cuda)
if use_cuda:
print("using gpu")
vae = vae.cuda()
vae.encoder = vae.encoder.cuda()
vae.decoder = vae.decoder.cuda()
vae_epoch = 0
if args.vae_model_loadpath is not None:
if os.path.exists(args.vae_model_loadpath):
vae_model_dict = torch.load(args.vae_model_loadpath)
vae.load_state_dict(vae_model_dict['state_dict'])
vae_epoch = vae_model_dict['epoch']
print('loaded vae checkpoint at epoch: {} from {}'.format(vae_epoch, args.vae_model_loadpath))
else:
print('could not find checkpoint at {}'.format(args.vae_model_loadpath))
embed()
else:
print("no VAE path provided")
# setup rnn
hidden_size = args.hidden_size
# input after only good parts of vae taken
input_size = 50
seq_length = 168
lr = 1e-4
rnn = RNN(input_size,hidden_size)
optim = optim.Adam(rnn.parameters(), lr=lr, weight_decay=1e-6)
if use_cuda:
rnn.cuda()
rnn_epoch = 0
if args.rnn_model_loadpath is not None:
if os.path.exists(args.rnn_model_loadpath):
rnn_model_dict = torch.load(args.rnn_model_loadpath)
rnn.load_state_dict(rnn_model_dict['state_dict'])
rnn_epoch = rnn_model_dict['epoch']
print('loaded rnn checkpoint at epoch: {} from {}'.format(rnn_epoch, args.rnn_model_loadpath))
else:
print('could not find rnn checkpoint at {}'.format(args.rnn_model_loadpath))
embed()
else:
print("no RNN path provided")
#test_dir = 'episodic_vae_test_results'
#test_dir = 'episodic_vae_test_tiny/'
test_dir = 'episodic_vae_test_tiny/'
train_dir = test_dir.replace('test', 'train')
gen_test_dir = test_dir.replace('episodic_', 'episodic_rnn_')
gen_train_dir = train_dir.replace('episodic_', 'episodic_rnn_')
test_data_path = os.path.join(args.datadir,test_dir)
train_data_path = os.path.join(args.datadir,train_dir)
if args.data_type == 'diff':
test_data_loader = DataLoader(EpisodicDiffFroggerDataset(test_data_path, transform=args.transform), batch_size=32, shuffle=True)
#train_data_loader = DataLoader(EpisodicDiffFroggerDataset(train_data_path, transform=args.transform, limit=args.num_train_limit), shuffle=True)
else:
test_data_loader = DataLoader(EpisodicFroggerDataset(test_data_path, transform=args.transform), batch_size=32, shuffle=True)
#train_data_loader = DataLoader(EpisodicFroggerDataset(train_data_path, transform=args.transform, limit=args.num_train_limit), shuffle=True)
test_true_data_path = os.path.join(args.datadir, 'imgs_test')
#train_true_data_path = os.path.join(args.datadir, 'imgs_train')
generate_imgs(test_data_loader,os.path.join(args.datadir, gen_test_dir), test_true_data_path, args.data_type, args.transform)
#generate_imgs(train_data_loader,os.path.join(args.datadir, gen_train_dir), train_true_data_path)
embed()
| [
2,
422,
509,
42,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
6738,
374,
20471,
1330,
371,
6144,
198,
6738,
4866,
1330,
2769,
30073,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
28034,
19... | 2.430898 | 2,518 |
# climatology test adpated from Patrick Halsall's
# ftp://ftp.aoml.noaa.gov/phod/pub/bringas/XBT/AQC/AOML_AQC_2018/codes/qc_checks/clima_checker.py
import sys, numpy
import util.AOMLinterpolation as interp_helper
import util.AOMLnetcdf as read_netcdf
def climatology_check(temperature, interpMNTemp, interpSDTemp, sigmaFactor=5.0):
"""
temperature: Float for temperature
interpMNTemp: interpolated temperature from climatology file
interpSDTemp: interpolated standard deviation from climatology file
sigmaFactor: tolerated deviation from climatological temperature, in standard deviations.
"""
if interpMNTemp == 99999.99 or interpSDTemp == 99999.99 or interpSDTemp <= 0.0:
return 0
if abs(temperature-interpMNTemp)/interpSDTemp <= sigmaFactor:
return 1
else:
return 4
def subset_climatology_data(longitude, latitude, statType, coordRange=1, filePathName='data/woa13_00_025.nc'):
"""
longitude: float
latitude: float
statType: either 'analyzed mean' or 'standard deviations'
coordRange: degrees plus / minus around longitude and latitude to consider.
filePathName: relative path from root of climatology file
Return list of lists with temperatures that maps one to one with list
of lists with tuples of latitude and longitude coordinates, list for
depth measurements, and list of lists with tuples of latitude and
longitude coordinates that maps one to one with list of lists with
temperature
Return an empty list, an empty list, and an empty list if exception
"""
if statType == "analyzed mean":
fieldType = "t_an"
elif statType == "standard deviations":
fieldType = "t_sd"
else:
sys.stderr.write("Cannot process climatology file with a statistical "
"field as " + statType + "\n")
return [], [], []
latLonDepthTempList, depthColumns, latLonList, time = read_netcdf.subset_data(longitude, latitude, filePathName, coordRange, True, fieldType)
return latLonDepthTempList, depthColumns, latLonList
| [
2,
5424,
265,
1435,
1332,
512,
79,
515,
422,
9925,
367,
874,
439,
338,
220,
198,
2,
10117,
79,
1378,
701,
79,
13,
64,
296,
75,
13,
3919,
7252,
13,
9567,
14,
746,
375,
14,
12984,
14,
48580,
292,
14,
55,
19313,
14,
32,
48,
34,
... | 2.991202 | 682 |
from django.contrib import admin
from .models import *
admin.site.register(Scientist)
admin.site.register(Employer)
admin.site.register(DataPool)
admin.site.register(DataEntry) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
1635,
198,
198,
28482,
13,
15654,
13,
30238,
7,
23010,
396,
8,
198,
28482,
13,
15654,
13,
30238,
7,
29733,
263,
8,
198,
28482,
13,
15654,
13,
30238,
7,
660... | 3.218182 | 55 |
"""
Module containing different distance functions.
"""
import numpy as np
from scipy import stats
def linear_distance(data, synth_data):
""" compute linear distance between autocorrelations.
Parameters
-----------
data : 1d array
autocorrelation of real data.
synth_data : 1d array
autocorrelation of synthetic data.
Returns
-------
d : float
linear ditance between autocorrelations.
"""
d = np.nanmean(np.power(((data) - (synth_data)),2))
return d
def logarithmic_distance(data, synth_data):
""" compute logarithmic distance between autocorrelations.
Parameters
-----------
data : 1d array
autocorrelation of real data.
synth_data : 1d array
autocorrelation of synthetic data.
Returns
-------
d : float
logarithmic ditance between autocorrelations.
"""
d = np.nanmean(np.power((np.log(data) - np.log(synth_data)),2))
return d | [
37811,
198,
26796,
7268,
1180,
5253,
5499,
13,
220,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
9756,
628,
198,
4299,
14174,
62,
30246,
7,
7890,
11,
33549,
62,
7890,
2599,
198,
220,
220,
220,
37227,
... | 2.589005 | 382 |
# Copyright 2013-2014 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.util import sortedset
from cassandra.cqltypes import EMPTY
| [
2,
15069,
2211,
12,
4967,
6060,
1273,
897,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789... | 3.619512 | 205 |
""" This module contains a class to represent multiple Tichu Cards. """
BOMBS = ['four_bomb', 'straight_bomb']
class Cards():
"""
A class to represent multiple Tichu Cards.
Can either be a hand (i.e. no specific combination)
or a combination (e.g. pair, straight, ...).
The type is determined automatically when adding or removing cards.
Inspired by the following sources:
- https://github.com/hundredblocks/ticher
- https://github.com/sylee421/TichuRL
Attributes
----------
cards: list of Card
A list containing all Card objects in this Cards instance.
phoenix_flag: bool
Whether this Cards instance contains a Phoenix.
size: int
The number of Cards in this instance.
points: int
The points of the card.
In Tichu, only 5, 10, K, Phoenix and Dragon give points.
type: str
The type of this Cards instance (e.g. hand, pair, straight)
power: float
The power of this Cards instance. It depends on the type
and the highest Card.
For example: A hand has 0 power, a pair of 10s has power 10.
points: int
The aggregated Card points in this instance.
Methods
-------
show:
Prints all the Cards using the Card.image attribute.
get_available_combinations:
Outputs a list of all possible combinations.
contains(other):
Checks whether other (list of Card objects) are contained
in this Cards instance.
remove(card):
Removes a Card from this Cards instance.
"""
size = None
cards = None
phoenix_flag = None
def __init__(self, card_list):
"""
Constructs a Cards instance.
Paramter
--------
card_list: A list of Card objects.
"""
# dispatch table for type checking function
self.dispatch_type = {0: self._typecheck_pass,
1: self._typecheck_solo,
2: self._typecheck_pair,
3: self._typecheck_triple,
4: self._typecheck_four_bomb,
5: self._typecheck_full_straight,
6: self._typecheck_pair_seq}
# set attributes
self.phoenix_flag = False
self.cards = list()
for i in card_list:
self.cards.append(i)
if i.name == 'Phoenix':
self.phoenix_flag = True
self.cards.sort()
self.size = len(self.cards)
self.type = None
self.power = 0
# run init functions
self._set_type_and_power()
self._set_points()
def show(self):
""" A nice visualization of all cards in the set. """
if self.size == 0:
print(' PASS')
else:
for i in range(5):
for crd in range(self.size):
print(self.cards[crd].image[i], end='')
print()
def _set_points(self):
""" Set number of game points of this card set. """
if self.type != 'pass':
self.points = sum([crd.points for crd in self.cards])
else:
self.points = 0
def _set_type_and_power(self):
""" Determines which combination (if any) is this card set. """
self.type = 'unk'
# check for all but pair sequence depending on card length
self.dispatch_type[min(len(self.cards),5)]()
# if type is still unkown, check for pair sequence
if self.type == 'unk':
self.dispatch_type[6]()
# if type is still unkown, it must be a hand
if self.type == 'unk':
self.type = 'hand'
self.power = 0
def get_available_combinations(self):
""" Get all available combinations form this card set. """
solo = self._get_available_solo()
pair = self._get_available_pair()
triple = self._get_available_triple()
four_bomb = self._get_available_four_bomb()
full = self._get_available_full()
straight, straight_bomb = self._get_available_straight()
pair_seq = self._get_available_pair_seq()
return [solo, pair, triple, four_bomb,
full, straight, straight_bomb, pair_seq]
def contains(self, other):
""" Checks if this instance contains all cards from other. """
this_cards = [(crd.name, crd.suit) for crd in self.cards]
other_cards = [(crd.name, crd.suit) for crd in other.cards]
return all([elem in this_cards for elem in other_cards])
def remove(self, card):
""" Remove a single Card and update this Cards instance. """
try:
self.cards.remove(card)
except ValueError: # if card is not in cards, return False
return False
self.cards.sort()
if card.name == 'Phoenix':
self.phoenix_flag = False
self.size = self.size - 1
self._set_type_and_power()
self._set_points()
return True
def _typecheck_pass(self):
""" Checks whether Cards is of type pass. """
if len(self.cards)==0:
self.type = 'pass'
self.power = 0
def _typecheck_solo(self):
""" Checks whether Cards is of type solo. """
if len(self.cards)==1:
self.type = 'solo'
self.power = self.cards[0].power
def _typecheck_pair(self):
""" Checks whether Cards is of type pair. """
if len(self.cards)==2:
# regular pair
if self.cards[0].power == self.cards[1].power:
self.type = 'pair'
self.power = self.cards[0].power
return
# phoenix pair
elif (self.phoenix_flag and
not (self.cards[1].name == 'Dragon' or
self.cards[1].name == 'Dog')):
self.type = 'pair'
self.power = self.cards[1].power
def _typecheck_triple(self):
""" Checks whether Cards is of type triple. """
if len(self.cards)==3:
# regular triple
if (self.cards[0].power == self.cards[1].power and
self.cards[1].power == self.cards[2].power):
self.type = 'triple'
self.power = self.cards[0].power
# phoenix triple
elif self.phoenix_flag and self.cards[1].power == self.cards[2].power:
self.type = 'triple'
self.power = self.cards[1].power
def _typecheck_four_bomb(self):
""" Checks whether Cards is of type four bomb. """
if (len(self.cards)==4 and self.cards[0].power == self.cards[1].power and
self.cards[1].power == self.cards[2].power and
self.cards[2].power == self.cards[3].power):
self.type = 'four_bomb'
self.power = 50 + self.cards[0].power
def _typecheck_full_straight(self):
""" Checks whether Cards is of type full house or straight. """
self._typecheck_full()
self._typecheck_straight()
def _typecheck_full(self):
""" Checks whether Cards is of type full house. """
if len(self.cards)==5:
# regular full house with triple higher than pair
if (self.cards[0].power == self.cards[1].power and
self.cards[1].power == self.cards[2].power and
self.cards[3].power == self.cards[4].power):
self.type = 'full'
self.power = self.cards[0].power
# regular full house with pair higher than triple
elif (self.cards[0].power == self.cards[1].power and
self.cards[2].power == self.cards[3].power and
self.cards[3].power == self.cards[4].power):
self.type = 'full'
self.power = self.cards[2].power
# phoenix full house with phoenix triple
elif (self.phoenix_flag and
self.cards[1].power == self.cards[2].power and
self.cards[3].power == self.cards[4].power):
self.type = 'full'
self.power = self.cards[3].power
# phoenix full house with phoenix pair
elif self.phoenix_flag:
if (self.cards[1].power == self.cards[2].power and
self.cards[2].power == self.cards[3].power):
self.type = 'full'
self.power = self.cards[1].power
elif (self.cards[2].power == self.cards[3].power and
self.cards[3].power == self.cards[4].power):
self.type = 'full'
self.power = self.cards[2].power
def _typecheck_straight(self):
"""
Checks whether Cards is of type straight.
Can be a straight with regular cards, straight with Phoenix,
or straight bomb.
"""
self._typecheck_regular_straight()
self._typecheck_phoenix_straight()
def _typecheck_regular_straight(self):
""" Checks whether Cards is of type straight (w/o Phoenix). """
if len(self.cards)>=5:
is_straight = True
is_flush = True
for i in range(len(self.cards)-1):
if self.cards[i].power + 1 == self.cards[i+1].power:
if self.cards[i].suit == self.cards[i+1].suit:
pass
else:
is_flush = False
else:
is_straight = False
break
# if it is a straight and all suits are equal, it is a bomb
if is_straight and is_flush:
self.type = 'straight_bomb'
self.power = 100 + self.cards[-1].power
return
if is_straight:
self.type = 'straight'
self.power = self.cards[-1].power
def _typecheck_phoenix_straight(self):
""" Checks whether Cards is of type straight (with Phoenix). """
if len(self.cards)>=5 and self.phoenix_flag:
phoenix_used = False
phoenix_idx = -1
is_straight = True
for i in range(len(self.cards)-2):
if self.cards[i+1].power+1 == self.cards[i+2].power:
pass
elif (not(phoenix_used) and
(self.cards[i+1].power+2 == self.cards[i+2].power)):
phoenix_used = True
phoenix_idx = i+1
else:
is_straight = False
if is_straight:
self.type = 'straight'
# phoenix is last card of straight: power is last card + 1
if not(phoenix_used) or (phoenix_idx == len(self.cards)):
self.power = self.cards[-1].power+1
# phoenix is not last card of straight: power is last card
else:
self.power = self.cards[-1].power
def _typecheck_pair_seq(self):
""" Checks whether Cards is of type pair sequence. """
self._typecheck_regular_pair_seq()
self._typecheck_phoenix_pair_seq()
def _typecheck_regular_pair_seq(self):
""" Checks whether Cards is of type pair_seq (w/o Phoenix). """
if (len(self.cards)>=4 and len(self.cards)%2==0 and
not(any((crd.name == 'Dog' or crd.name == 'Dragon')
for crd in self.cards))):
is_pair_regular = True
for i in range(len(self.cards)-1):
if i%2 == 0 and self.cards[i].power == self.cards[i+1].power:
pass
elif i%2 == 1 and self.cards[i].power+1 == self.cards[i+1].power:
pass
else:
is_pair_regular = False
break
if is_pair_regular:
self.type = 'pair_seq'
self.power = self.cards[-1].power
def _typecheck_phoenix_pair_seq(self):
"""
Checks whether Cards is of type pair_seq (with Phoenix).
For a phoenix pair sequence, the algorithm is quite complicated,
because there are a lot of possible combinations.
Phoenix can be used in the first pair, in any middle pair, or in
the last pair.
Depending on where the Phoenix is used, either all equal or all
unequal indices are increments of 1 in a valid pair sequence.
If the Phoenix is used as a replacement for an equal indexed card,
then the logic turns around ("toggles") and all subsequent cards
need to be increments of the previous card in unequal indices.
"""
# return if pair sequence is not possible
if not (len(self.cards)>=4 and len(self.cards)%2==0 and
not(any((crd.name == 'Dog' or crd.name == 'Dragon')
for crd in self.cards)) and
self.phoenix_flag):
return
# return if card sequence (excluding Phoenix) does not increase by 1
unique_power = sorted({crd.power for crd in self.cards})
unique_power.pop(0) # remove phoenix from set
if not (all(x+1==y for x, y in zip(unique_power, unique_power[1:])
) and len(unique_power)>1):
return
# continue and prepare local variables if preconditions are met
phoenix_used = False
is_pair_equal = True
is_pair_unequal = True
# check for phoenix use in equal card list index
toggle = 1
antitoggle = 0
for i in range(1,len(self.cards)-1):
if (i%2 == toggle and
self.cards[i].power == self.cards[i+1].power):
pass
elif (i%2 == antitoggle and
self.cards[i].power + 1 == self.cards[i+1].power):
if i+1 >= len(self.cards)-1 and not phoenix_used:
# phoenix used as the highest pair of sequence
phoenix_used = True
elif phoenix_used: # phoenix cannot be used twice
is_pair_unequal = False
break
else:
# if phoenix is used in the middle of the sequence,
# change matching behavior of toggle/antitoggle
# so that i%2 matches next element
phoenix_used = True
toggle = 0
antitoggle = 1
# check for phoenix use in equal card list index
if not is_pair_unequal:
phoenix_used = False
for i in range(1,len(self.cards)-1):
if (i%2 == 0 and
self.cards[i].power == self.cards[i+1].power):
pass
elif (i%2 == 1 and
self.cards[i].power+1 == self.cards[i+1].power):
# check if phoenix is first card in sequence
if i == 1:
phoenix_used = True
elif phoenix_used: # phoenix cannot be used twice
is_pair_equal = False
break
else:
phoenix_used = True
if is_pair_unequal or is_pair_equal:
self.type = 'pair_seq'
self.power = self.cards[-1].power
def _get_available_solo(self):
""" Returns a list with all possible solo combinations. """
solo = list()
for i in range(len(self.cards)):
solo_list = self.cards[i]
solo_cards = Cards([solo_list])
if solo_cards.type == 'solo':
solo.append(solo_cards)
return solo
def _get_available_pair(self):
""" Returns a list with all possible pair combinations. """
pair = list()
for i in range(len(self.cards)-1):
# regular pairs
if self.cards[i].power == self.cards[i+1].power:
pair_list = [self.cards[i], self.cards[i+1]]
pair_cards = Cards(pair_list)
if pair_cards.type == 'pair':
pair.append(pair_cards)
# phoenix pairs
if self.phoenix_flag and self.cards[i+1].suit != 'Special':
pair_list = [self.cards[0], self.cards[i+1]]
pair_cards = Cards(pair_list)
if pair_cards.type == 'pair':
pair.append(pair_cards)
# multiple pairs
try:
if self.cards[i].power == self.cards[i+2].power:
pair_list = [self.cards[i], self.cards[i+2]]
pair_cards = Cards(pair_list)
if pair_cards.type == 'pair':
pair.append(pair_cards)
if self.cards[i].power == self.cards[i+3].power:
pair_list = [self.cards[i], self.cards[i+3]]
pair_cards = Cards(pair_list)
if pair_cards.type == 'pair':
pair.append(pair_cards)
except IndexError:
pass
return pair
def _get_available_triple(self):
""" Returns a list with all possible triple combinations. """
triple = list()
for i in range(len(self.cards)-2):
# regular triple
if (self.cards[i].power == self.cards[i+1].power and
self.cards[i+1].power == self.cards[i+2].power):
triple_candidate = [self.cards[i], self.cards[i+1],
self.cards[i+2]]
triple = check_and_append_triple(triple_candidate, triple)
# phoenix triple
if (self.phoenix_flag and
self.cards[i+1].power == self.cards[i+2].power):
triple_candidate = [self.cards[0], self.cards[i+1],
self.cards[i+2]]
triple = check_and_append_triple(triple_candidate, triple)
# multiple triples
try:
if (self.cards[i].power == self.cards[i+1].power and
self.cards[i+1].power == self.cards[i+3].power):
triple_candidate = [self.cards[i], self.cards[i+1],
self.cards[i+3]]
triple = check_and_append_triple(triple_candidate, triple)
if (self.cards[i].power == self.cards[i+2].power and
self.cards[i+2].power == self.cards[i+3].power):
triple_candidate = [self.cards[i], self.cards[i+2],
self.cards[i+3]]
triple = check_and_append_triple(triple_candidate, triple)
if (self.phoenix_flag and
self.cards[i+1].power == self.cards[i+3].power):
triple_candidate = [self.cards[0], self.cards[i+1],
self.cards[i+3]]
triple = check_and_append_triple(triple_candidate, triple)
if (self.phoenix_flag and
self.cards[i+1].power == self.cards[i+4].power):
triple_candidate = [self.cards[0], self.cards[i+1],
self.cards[i+4]]
triple = check_and_append_triple(triple_candidate, triple)
except IndexError:
pass
return triple
def _get_available_four_bomb(self):
""" Returns a list with all possible four bomb combinations. """
four_bomb = list()
for i in range(len(self.cards)-3):
if (self.cards[i].power == self.cards[i+1].power and
self.cards[i+1].power == self.cards[i+2].power and
self.cards[i+2].power == self.cards[i+3].power):
four_list = [self.cards[i], self.cards[i+1],
self.cards[i+2], self.cards[i+3]]
four_cards = Cards(four_list)
if four_cards.type == 'four_bomb':
four_bomb.append(four_cards)
return four_bomb
def _get_available_full(self):
""" Returns a list with all possible full house combinations. """
full = list()
pair = self._get_available_pair()
triple = self._get_available_triple()
for i in pair:
for j in triple:
if i.power != j.power:
full_list = list()
full_list.extend(i.cards)
full_list.extend(j.cards)
full_cards = Cards(full_list)
if full_cards.type == 'full':
full.append(full_cards)
return full
def _get_available_straight(self):
""" Returns a list with all possible straight combinations. """
straight = list()
straight_bomb = list()
for i in range(len(self.cards)-4):
candidate_list = list()
phoenix_available = self.phoenix_flag
for j in range(i,len(self.cards)):
# add first card of possible straight
if len(candidate_list)==0:
candidate_list.append(self.cards[j])
if self.cards[j].name == 'Phoenix':
phoenix_available = False
# no check if Phoenix is last entry
elif candidate_list[-1].name == 'Phoenix':
candidate_list.append(self.cards[j])
straight, straight_bomb = check_candidate(candidate_list,
straight, straight_bomb)
# add subsequent cards
elif candidate_list[-1].power+1 == self.cards[j].power:
candidate_list.append(self.cards[j])
straight, straight_bomb = check_candidate(candidate_list,
straight, straight_bomb)
# skip pairs
elif candidate_list[-1].power == self.cards[j].power:
pass
# use phoenix mid straight if available
elif (phoenix_available and
candidate_list[-1].power+2 == self.cards[j].power):
candidate_list.append(self.cards[0])
candidate_list.append(self.cards[j])
straight, straight_bomb = check_candidate(candidate_list,
straight, straight_bomb)
phoenix_available = False
# use phoenix as first/last card if available
elif phoenix_available:
candidate_list.append(self.cards[0])
straight, straight_bomb = check_candidate(candidate_list,
straight, straight_bomb)
phoenix_available = False
# no straight possible
else:
break
return straight, straight_bomb
def _get_available_pair_seq(self):
""" Returns a list with all possible pair sequence combinations. """
pair_seq = list()
pair = self._get_available_pair()
for i in range(len(pair)-1):
candidate_list = list()
for j in range(i,len(pair)):
# add first element to candidate list
if len(candidate_list) == 0:
candidate_list.extend(pair[j].cards)
# add subsequent pairs
elif candidate_list[-1].power+1 == pair[j].power:
candidate_list.extend(pair[j].cards)
if len(candidate_list) > 1:
pair_seq_cards = Cards(candidate_list)
if pair_seq_cards.type == 'pair_seq':
pair_seq.append(pair_seq_cards)
# skip double pairs
elif candidate_list[-1].power == pair[j].power:
pass
# break if no pair_seq possible
else:
break
return pair_seq
| [
37811,
770,
8265,
4909,
257,
1398,
284,
2380,
3294,
309,
488,
84,
15824,
13,
37227,
198,
198,
33,
2662,
4462,
796,
37250,
14337,
62,
27657,
3256,
705,
42729,
62,
27657,
20520,
198,
198,
4871,
15824,
33529,
198,
220,
220,
220,
37227,
1... | 1.982524 | 12,131 |
# Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
from typing import Any, Callable, List
from materialize.mzcompose import Composition
| [
2,
15069,
14633,
1096,
11,
3457,
13,
290,
20420,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
5765,
286,
428,
3788,
318,
21825,
416,
262,
7320,
8090,
13789,
198,
2,
3017,
287,
262,
38559,
24290,
2393,
379,
262,
6808,
286,
428,
16099,
... | 4.115044 | 113 |
import itertools
import Utterance
import PossibleWorld
#this table contains all the possible worlds
#this adds up all of the possible world probabilities in the rows and columns of a table
#re-adds up all of the columns and rows so that normalization is accurate
#important function for normalizing so that we can look at probability distributions
| [
11748,
340,
861,
10141,
198,
11748,
7273,
353,
590,
198,
11748,
33671,
10603,
198,
2,
5661,
3084,
4909,
477,
262,
1744,
11621,
628,
197,
2,
5661,
6673,
510,
477,
286,
262,
1744,
995,
39522,
287,
262,
15274,
290,
15180,
286,
257,
3084,... | 4.395062 | 81 |
# coding=utf-8
"""
dataloader for PASCAL VOC 2012 dataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from PIL import Image
from torchvision import transforms
from torch.utils.data import Dataset
from RMI.dataloaders import custom_transforms as tr
# PASCAL VOC 2012 dataset statistics
_PASCAL_R_MEAN = 116
_PASCAL_G_MEAN = 113
_PASCAL_B_MEAN = 104
_PASCAL_R_STD = 69.58
_PASCAL_G_STD = 68.68
_PASCAL_B_STD = 72.67
class VOCSegmentation(Dataset):
"""PASCAL VOC 2012 dataset
"""
NUM_CLASSES = 21
def __init__(self,
data_dir,
crop_size=513,
split='train',
min_scale=0.5,
max_scale=2.0,
step_size=0.25):
"""
Args:
data_dir: path to VOC dataset directory.
crop_size: the crop size.
split: ["trainaug", "train", "trainval", "val", "test"].
"""
super().__init__()
# dataset dir
self.data_dir = data_dir
self.iamge_dir = os.path.join(self.data_dir, 'JPEGImages')
self.label_dir = os.path.join(self.data_dir, 'SegmentationClassAug')
assert split in ["trainaug", "train", "trainval", "val", "test"]
self.split = split
# txt lists of images
list_file_dir = os.path.join(self.data_dir, 'ImageSets/Segmentation')
# crop size and scales
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.step_size = step_size
# dataset info
self.mean = (_PASCAL_R_MEAN, _PASCAL_G_MEAN, _PASCAL_B_MEAN)
self.std = (_PASCAL_R_STD, _PASCAL_G_STD, _PASCAL_B_STD)
self.ignore_label = 255
self.image_ids = []
self.image_lists = []
self.label_lists = []
# read the dataset file
with open(os.path.join(os.path.join(list_file_dir, self.split + '.txt')), "r") as f:
lines = f.read().splitlines()
for line in lines:
image_filename = os.path.join(self.iamge_dir, line + ".jpg")
label_filename = os.path.join(self.label_dir, line + ".png")
assert os.path.isfile(image_filename)
if 'test' not in self.split:
assert os.path.isfile(label_filename)
self.image_ids.append(line)
self.image_lists.append(image_filename)
self.label_lists.append(label_filename)
assert (len(self.image_lists) == len(self.label_lists))
# print the dataset info
print('Number of image_lists in {}: {:d}'.format(split, len(self.image_lists)))
def __len__(self):
"""len() method"""
return len(self.image_lists)
def __getitem__(self, index):
"""index method"""
_image, _label = self._make_img_gt_point_pair(index)
# different transforms for different splits
if 'train' in self.split:
sample = {'image': _image, 'label': _label}
return self.transform_train(sample)
elif 'val' in self.split:
sample = {'image': _image, 'label': _label}
return self.transform_val(sample)
elif 'test' in self.split:
sample = {'image': _image}
return self.transform_test(sample)
else:
raise NotImplementedError
def _make_img_gt_point_pair(self, index):
"""open the image and the gorund truth"""
_image = Image.open(self.image_lists[index]).convert('RGB')
if 'test' not in self.split:
_label = Image.open(self.label_lists[index])
else:
_label = None
return _image, _label
def transform_val(self, sample):
"""transform for validation"""
composed_transforms = transforms.Compose([
tr.Normalize(mean=self.mean, std=self.std),
tr.ToTensor()])
return composed_transforms(sample)
def transform_test(self, sample):
"""transform for validation"""
composed_transforms = transforms.Compose([
tr.Normalize_Image(mean=self.mean, std=self.std),
tr.ToTensor_Image()])
return composed_transforms(sample)
if __name__ == '__main__':
# data dir
data_dir = os.path.join("/home/zhaoshuai/dataset/VOCdevkit/VOC2012")
print(data_dir)
dataset = VOCSegmentation(data_dir)
#print(dataset.image_lists)
image_mean = np.array([0.0, 0.0, 0.0])
cov_sum = np.array([0.0, 0.0, 0.0])
pixel_nums = 0.0
# mean
for filename in dataset.image_lists:
image = Image.open(filename).convert('RGB')
image = np.array(image).astype(np.float32)
pixel_nums += image.shape[0] * image.shape[1]
image_mean += np.sum(image, axis=(0, 1))
image_mean = image_mean / pixel_nums
print(image_mean)
# covariance
for filename in dataset.image_lists:
image = Image.open(filename).convert('RGB')
image = np.array(image).astype(np.float32)
cov_sum += np.sum(np.square(image - image_mean), axis=(0, 1))
image_cov = np.sqrt(cov_sum / (pixel_nums - 1))
print(image_cov)
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
37811,
198,
67,
10254,
1170,
263,
329,
350,
42643,
1847,
569,
4503,
2321,
27039,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
19... | 2.474289 | 1,828 |
'''
Dataloader.py
'''
import cv2
import sys,os
import xml.etree.ElementTree as ET
import numpy as np
print(os.listdir())
'''
Gets the coordinates of the bounding box of the object
returns the bounding box
'''
'''
Returns the one hot encoded label list as a numpy array
'''
'''
This is the function that should be called to extract the data
Returns bounding box coordinates, labels, and actual images of all
data points in that order
'''
if __name__ == '__main__':
proc()
| [
7061,
6,
201,
198,
35,
10254,
1170,
263,
13,
9078,
201,
198,
7061,
6,
201,
198,
11748,
269,
85,
17,
201,
198,
11748,
25064,
11,
418,
201,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
201,
198,
11748,
299,
32152,
... | 2.862857 | 175 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-10 22:32
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
319,
1584,
12,
3023,
12,
940,
2534,
25,
2624,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
... | 2.781818 | 55 |
# # 1 uzdevums
name = input("Enter your name: ")
age = int(input(name + ", how old are you?"))
import datetime
currentYear = datetime.datetime.now().year
print("You will be 100 in", 100-age, "years and that will be year", currentYear+(100-age))
# name = input("What is your name?")
# age = input (f"What is your age {name}?")
# age_till_100 = 100 - int(age)
#
# import datetime
# current_year = datetime.datetime.now().year
# # current_year = 2020
#
# year_with_100 = current_year + age_till_100
# print(f"{name}, after {age_till_100} years in {year_with_100} you will be 100 years old!") | [
2,
1303,
352,
334,
89,
7959,
5700,
198,
3672,
796,
5128,
7203,
17469,
534,
1438,
25,
366,
8,
198,
496,
796,
493,
7,
15414,
7,
3672,
1343,
33172,
703,
1468,
389,
345,
1701,
4008,
198,
11748,
4818,
8079,
198,
14421,
17688,
796,
4818,
... | 2.788732 | 213 |
# coding: utf-8
import time
import torch
import torch.nn.functional as F
import torchvision
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import sys
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 均已测试
print(device, torch.__version__)
# 读取内容图像和样式图像
content_img = Image.open('data/rainier.jpg')
plt.imshow(content_img);
plt.show()
style_img = Image.open('data/autumn_oak.jpg')
plt.imshow(style_img);
plt.show()
# 预处理和后处理图像
rgb_mean = np.array([0.485, 0.456, 0.406])
rgb_std = np.array([0.229, 0.224, 0.225])
# 抽取特征
pretrained_net = torchvision.models.vgg19(pretrained=True, progress=True)
style_layers, content_layers = [0, 5, 10, 19, 28], [25]
net_list = []
for i in range(max(content_layers + style_layers) + 1):
net_list.append(pretrained_net.features[i])
net = torch.nn.Sequential(*net_list)
# 定义损失函数
# 内容损失
# 样式损失
# 总变差损失
# 损失函数
content_weight, style_weight, tv_weight = 1, 1e3, 10
# #创建和初始化合成图像
# 训练
image_shape = (150, 225)
# image_shape = (50, 75)
net = net.to(device)
content_X, contents_Y = get_contents(image_shape, device)
style_X, styles_Y = get_styles(image_shape, device)
output = train(content_X, contents_Y, styles_Y, device, 0.01, 500, 200)
plt.imshow(postprocess(output))
plt.show()
# image_shape = (300, 450)
# _, content_Y = get_contents(image_shape, device)
# _, style_Y = get_styles(image_shape, device)
# X = preprocess(postprocess(output), image_shape).to(device)
# big_output = train(X, content_Y, style_Y, device, 0.01, 500, 200)
# d2l.set_figsize((7, 5))
# d2l.plt.imshow(postprocess(big_output)); | [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
640,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
10178,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
26... | 2.102094 | 764 |
"""Scraping reviews and ratings from goodreads.com
DESCRIPTION:
Scraping the newest reviews from a given goodreads book url. Script works as follows:
1. Get the given url and open with webdriver of selenium.
2. Sort the reviews by newest.
3. Parse the returned web page using BeautifulSoup4 to isolate reviews.
4. Append the reviews to global mutable list object `reviews`.
5. Move to the next page until none is left.
DEPENDENCIES:
- selenium==3.11.0
- beautifulsoup4==4.10.0
- geckodriver-v0.30.0-linux64
SCARPING ELEMENTS MAPPING:
- rating stars `<span class=" staticStars notranslate" title="liked it">`
- 5: "it was amazing"
- 4: "really liked it"
- 3: "liked it"
- 2: "it was ok"
- 1: "did not like it"
"""
| [
37811,
3351,
2416,
278,
8088,
290,
10109,
422,
922,
40779,
13,
785,
198,
198,
30910,
40165,
25,
628,
220,
220,
220,
1446,
2416,
278,
262,
15530,
8088,
422,
257,
1813,
922,
40779,
1492,
19016,
13,
12327,
2499,
355,
5679,
25,
198,
220,
... | 2.527778 | 324 |
"""Format base class"""
import abc
from typing import Any, BinaryIO, Iterable, Iterator
from wingline.types import Payload
class Format(metaclass=abc.ABCMeta):
"""Base class for a file format."""
mime_type: str
suffixes: Iterable[str] = set()
@property
def reader(self) -> Iterator[dict[str, Any]]:
"""Reader property"""
return self.read(self._handle)
def writer(self, payload: Payload) -> None:
"""Writer property"""
self.write(self._handle, payload)
@abc.abstractmethod
def read(self, handle: BinaryIO) -> Iterator[dict[str, Any]]:
"""Yields dicts from a file handle."""
raise NotImplementedError
@abc.abstractmethod
def write(self, handle: BinaryIO, payload: Payload) -> None:
"""Writes a payload dict to a file handle."""
raise NotImplementedError
| [
37811,
26227,
2779,
1398,
37811,
198,
198,
11748,
450,
66,
198,
6738,
19720,
1330,
4377,
11,
45755,
9399,
11,
40806,
540,
11,
40806,
1352,
198,
198,
6738,
8539,
1370,
13,
19199,
1330,
7119,
2220,
628,
198,
4871,
18980,
7,
4164,
330,
3... | 2.682099 | 324 |
# Generated by Django 3.0.2 on 2020-03-20 11:48
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
17,
319,
12131,
12,
3070,
12,
1238,
1367,
25,
2780,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from flask import jsonify, make_response
from api.v1.models.office_model import OfficesModel
from api.v1.models.party_model import PartiesModel
| [
6738,
42903,
1330,
33918,
1958,
11,
787,
62,
26209,
198,
6738,
40391,
13,
85,
16,
13,
27530,
13,
31810,
62,
19849,
1330,
3242,
1063,
17633,
198,
6738,
40391,
13,
85,
16,
13,
27530,
13,
10608,
62,
19849,
1330,
32024,
17633,
628,
198
] | 3.47619 | 42 |
from functools import reduce
from operator import mul
from AoC20.day_16 import data as data, parse
rules, my_ticket, other_tickets = parse(data)
other_tickets = [ticket for ticket in other_tickets if rules.ticket_violation(ticket) is None]
fields = rules.field_deduction(other_tickets)
print(reduce(mul, [my_ticket[idx] for name, idx in fields.items() if name.startswith("departure")]))
| [
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
10088,
1330,
35971,
198,
198,
6738,
27378,
34,
1238,
13,
820,
62,
1433,
1330,
1366,
355,
1366,
11,
21136,
628,
198,
38785,
11,
616,
62,
43350,
11,
584,
62,
83,
15970,
796,
21136,
7,
789... | 3.046875 | 128 |
from django import template
from django.core.urlresolvers import reverse
register = template.Library()
@register.tag
| [
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
628,
198,
30238,
796,
11055,
13,
23377,
3419,
198,
198,
31,
30238,
13,
12985,
628,
198
] | 3.588235 | 34 |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""OS information for testing."""
from coverage import env
if env.WINDOWS:
# Windows implementation
def process_ram():
"""How much RAM is this process using? (Windows)"""
import ctypes
# From: http://lists.ubuntu.com/archives/bazaar-commits/2009-February/011990.html
class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure):
"""Used by GetProcessMemoryInfo"""
_fields_ = [
('cb', ctypes.c_ulong),
('PageFaultCount', ctypes.c_ulong),
('PeakWorkingSetSize', ctypes.c_size_t),
('WorkingSetSize', ctypes.c_size_t),
('QuotaPeakPagedPoolUsage', ctypes.c_size_t),
('QuotaPagedPoolUsage', ctypes.c_size_t),
('QuotaPeakNonPagedPoolUsage', ctypes.c_size_t),
('QuotaNonPagedPoolUsage', ctypes.c_size_t),
('PagefileUsage', ctypes.c_size_t),
('PeakPagefileUsage', ctypes.c_size_t),
('PrivateUsage', ctypes.c_size_t),
]
mem_struct = PROCESS_MEMORY_COUNTERS_EX()
ret = ctypes.windll.psapi.GetProcessMemoryInfo(
ctypes.windll.kernel32.GetCurrentProcess(),
ctypes.byref(mem_struct),
ctypes.sizeof(mem_struct)
)
if not ret:
return 0
return mem_struct.PrivateUsage
elif env.LINUX:
# Linux implementation
import os
_scale = {'kb': 1024, 'mb': 1024*1024}
def _VmB(key):
"""Read the /proc/PID/status file to find memory use."""
try:
# Get pseudo file /proc/<pid>/status
with open('/proc/%d/status' % os.getpid()) as t:
v = t.read()
except IOError:
return 0 # non-Linux?
# Get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(key)
v = v[i:].split(None, 3)
if len(v) < 3:
return 0 # Invalid format?
# Convert Vm value to bytes.
return int(float(v[1]) * _scale[v[2].lower()])
def process_ram():
"""How much RAM is this process using? (Linux implementation)"""
return _VmB('VmRSS')
else:
# Generic implementation.
def process_ram():
"""How much RAM is this process using? (stdlib implementation)"""
import resource
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
| [
2,
49962,
739,
262,
24843,
13789,
25,
2638,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
43,
2149,
24290,
12,
17,
13,
15,
198,
2,
1114,
3307,
25,
3740,
1378,
2545,
27041,
316,
13,
2398,
14,
2817,
14,
1073,
1857,
9078,
14,
... | 2.069523 | 1,237 |
import random
# for declaring function using def
test_function()
test_function_parameter("teste parameter")
# function type get type variable
list = ["ade"]
print(type(list))
# function int formating string to int
string = "10"
print(int(string))
# function input receive a value entry from the user in version 3.X from python
age = input("Whats is your age?")
print(int(age))
# range of function return a iterable list of numbers, using in for
print(range(5))
# function help
# help() then the function name you want help
# format examples
# format float
# 7 is houses before the comma
# 2 is houses after the comma
# f format is float
print("R$ {:7.2f}".format(1234.50))
# integer using d
print("R$ {:07d}".format(4))
# format date
print("Data {:02d}/{:02d}".format(9, 4))
# number random
print(int(random.random() * 100))
# using range
print(random.randrange(1, 101))
# numero absoluto abs()
print(abs(10))
print(abs(-10))
# variable __name__
# content variable for "__main__" file run directly
if __name__ == "__main__":
print("file run directly not imported !!")
# boll testing
bool(0)
bool("")
bool(None)
bool(1)
bool(-100)
bool(13.5)
bool("test")
bool(True)
# using find in string, return position OR -1 for not found
string = "test"
print(string.find("t"))
# using for witch string
for letter in string:
print(letter)
# lower and upper
print(string.lower())
print(string.upper())
# first letter upper
print(string.title())
# remove spaces from string
string = " test"
print(string.split())
# __file__ get complete path file
import os
print(__file__)
# dir of actual file
print(os.path.dirname(__file__))
# has_attr verify exists attribute in variable
person = Person()
print('Person has age?:', hasattr(person, 'age'))
# if ternary
print('True' if bool(1) else 'False')
| [
11748,
4738,
628,
198,
2,
329,
18684,
2163,
1262,
825,
628,
198,
9288,
62,
8818,
3419,
628,
198,
198,
9288,
62,
8818,
62,
17143,
2357,
7203,
9288,
68,
11507,
4943,
198,
198,
2,
2163,
2099,
651,
2099,
7885,
198,
4868,
796,
14631,
671... | 3.043624 | 596 |
#
# --------------------------------------------------------------------------------------------------------------------
# <copyright company="Aspose" file="base_test_context.py">
# Copyright (c) 2020 Aspose.Tasks Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# --------------------------------------------------------------------------------------------------------------------
#
import os
import json
import unittest
import warnings
import six
from asposetaskscloud import ApiClient, TasksApi, UploadFileRequest, DeleteFileRequest, DeleteFolderRequest
| [
2,
198,
2,
16529,
3880,
19351,
198,
2,
1279,
22163,
4766,
1664,
2625,
1722,
3455,
1,
2393,
2625,
8692,
62,
9288,
62,
22866,
13,
9078,
5320,
198,
2,
220,
220,
15069,
357,
66,
8,
12131,
1081,
3455,
13,
51,
6791,
10130,
198,
2,
7359,... | 4.095477 | 398 |
"""Tests for :py:mod:`katsdpdisp.data`."""
import numpy as np
from numpy.testing import assert_array_equal
from katsdpdisp.data import SparseArray
def test_sparsearray(fullslots=100,fullbls=10,fullchan=5,nslots=10,maxbaselines=6,islot_new_bls=6):
"""Simulates the assignment and retrieval of data as it happens in the signal displays when
it receives different sets of baseline data at different timestamps, with some time continuity.
(fullslots,fullbls,fullchan) is the dimensions of the full/complete dataset
(nslots,maxbaselines,fullchan) is the true size of the sparse array, representing a size of (nslots,fullbls,fullchan)
where maxbaselines<fullbls
islot_new_bls is the number of time stamps that passes before there is a new baseline product selected/chosen in the test sequence"""
mx=SparseArray(nslots,fullbls,fullchan,maxbaselines,dtype=np.int32)
rs = np.random.RandomState(seed=0)
fulldata=rs.random_integers(0,10,[fullslots,fullbls,fullchan])
histbaselines=[]
for it in range(fullslots):
if it%islot_new_bls==0:#add a new baseline, remove old, every so often
while True:
newbaseline=rs.random_integers(0,fullbls-1,[1])
if len(histbaselines)==0 or (newbaseline not in histbaselines[-1]):
break
if (len(histbaselines)==0):
newbaselines=np.r_[newbaseline]
elif (len(histbaselines[-1])<islot_new_bls):
newbaselines=np.r_[histbaselines[-1],newbaseline]
else:
newbaselines=np.r_[histbaselines[-1][1:],newbaseline]
histbaselines.append(newbaselines)
mx[it%nslots,histbaselines[-1],:]=fulldata[it,histbaselines[-1],:]
for cit in range(islot_new_bls):
if (cit>=len(histbaselines)):
break
hasthesebaselines=list(set(histbaselines[-1-cit]) & set(histbaselines[-1]))
missingbaselines=list(set(histbaselines[-1-cit]) - set(histbaselines[-1]))
retrieved=mx[(it-cit)%nslots,hasthesebaselines,:]
assert_array_equal(retrieved, fulldata[it-cit,hasthesebaselines,:], 'SparseArray getitem test failed')
missingretrieved=mx[(it-cit)%nslots,missingbaselines,:]
assert_array_equal(missingretrieved,np.zeros(missingretrieved.shape,dtype=np.int32), 'SparseArray missing baseline test failed')
| [
37811,
51,
3558,
329,
1058,
9078,
25,
4666,
25,
63,
74,
1381,
26059,
6381,
79,
13,
7890,
63,
526,
15931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
18747,
62,
40496,
198,
6738,
479,
13... | 2.310642 | 1,043 |
import requests
from urllib.parse import urlencode
from_mate = "http://172.16.0.69:3000"
to_mate = "http://mete.cloud.cccfr"
for category in ("users", "drinks"):
items = get_items(category)
for item in items:
set_item(item, category)
| [
11748,
7007,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
2956,
11925,
8189,
198,
198,
6738,
62,
9830,
796,
366,
4023,
1378,
23628,
13,
1433,
13,
15,
13,
3388,
25,
23924,
1,
198,
1462,
62,
9830,
796,
366,
4023,
1378,
4164,
68,
13,
... | 2.52 | 100 |
#!/usr/bin/python
import fire
import os
import re
import requests
from configparser import ConfigParser
from datetime import datetime
HTTP_OK_200 = 200
HTTP_CREATED_201 = 201
HTTP_AUTHORIZATION_401 = 401
HTTP_NOT_FOUND_404 = 404
class Github(object):
'''Base class to interface with Github.com.
'''
username = os.environ.get('GITHUB_USERNAME')
token = os.environ.get('GITHUB_TOKEN')
class Checks(object):
'''Abstraction of PR checks.
'''
def _request(self, method, path, payload=None, expected_status=None):
'''RFC2617 defined Basic Authentication via HTTP/token.
'''
client = Github()
url = client.info()['url']
response = method(
'%s%s' % (url, path),
headers={
'Accept': 'application/vnd.github.antiope-preview+json',
'Authorization': '%s:%s' % (client.username, client.token)
}
)
# Validate potential responses
if response.status_code in (HTTP_AUTHORIZATION_401, HTTP_NOT_FOUND_404):
raise Exception('Invalid credentials provided for auth')
# Validate expected status codes for a give action
if expected_status is None:
expected_status = (HTTP_OK_200, )
elif isinstance(expected_status, int):
expected_status = (expected_status, )
if response.status_code not in expected_status:
raise Exception('Unexpected response [%s] for `%s`' % (response.status_code, path))
return response
def create(self, name, branch, sha):
'''Create new checks for a given commit.
'''
response = self._request(
requests.post,
'/check-runs',
payload={
'name': name,
'branch': branch,
'head_sha': sha,
'status': 'completed',
'conclusion': 'success',
'completed_at': datetime.now().isoformat()
},
expected_status=(HTTP_CREATED_201, )
)
return response.json
def list(self, commit_hash):
'''Lists the checks for a given commit.
'''
response = self._request(
requests.get,
'/commits/%s/check-runs' % commit_hash
)
return response.json
@staticmethod
def info():
'''Returns info about the current repository.
'''
info = {}
config = ConfigParser()
config.read('.git/config')
# Validate that this is hosted on remote
try:
remote_url = config['remote "origin"']['url']
except KeyError:
raise ValueError('Git repository does not have remote origin')
# Retrieve the information we need
m = re.match(
r'git@(?P<host>github\.com):(?P<username>[a-zA-Z0-9]+)/(?P<repo_name>[a-zA-Z0-9_-]+)\.git',
remote_url
)
# Validate that the repo is on Github
if m.group('host') is None:
raise ValueError('Git repository origin is not Github.com')
# Build the URL
info['url'] = 'https://api.github.com/repos/%(owner)s/%(repo)s' % {
'owner': m.group('username'),
'repo': m.group('repo_name'),
}
# Determine where is the HEAD
with open('.git/HEAD') as file:
m = re.match(r'ref: ref/heads/(?P<branch>[a-zA-Z0-9_-]+)', f.read())
if m.group('branch') is None:
raise ValueError('Unable to find current branch name')
info['branch'] = m.group('branch')
return info
if __name__ == '__main__':
fire.Fire(Github)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
2046,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
7007,
198,
6738,
4566,
48610,
1330,
17056,
46677,
198,
6738,
4818,
8079,
1330,
4818,
8079,
628,
198,
40717,
62,
11380,
62,
2167,
796... | 2.035565 | 1,912 |
"""Helper to check if path is safe to remove."""
from pathlib import Path
from custom_components.racelandshop.share import get_racelandshop
def is_safe_to_remove(path: str) -> bool:
"""Helper to check if path is safe to remove."""
racelandshop = get_racelandshop()
paths = [
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.appdaemon_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.netdaemon_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.plugin_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.python_script_path}"),
Path(f"{racelandshop.core.config_path}/{racelandshop.configuration.theme_path}"),
Path(f"{racelandshop.core.config_path}/custom_components/"),
]
if Path(path) in paths:
return False
return True
| [
37811,
47429,
284,
2198,
611,
3108,
318,
3338,
284,
4781,
526,
15931,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
2183,
62,
5589,
3906,
13,
11510,
8822,
24643,
13,
20077,
1330,
651,
62,
11510,
8822,
24643,
628,
198,
4299,
318,
... | 2.585014 | 347 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# Ref:
# https://www.reddit.com/r/learnpython/comments/9oc0mu/just_an_interesting_thing_i_found/
# https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments
a = f()
b = f()
a.append(3)
b.append(4)
print(b)
# Solution
# Ref: https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments
print('\nSolving mutable argument to function gotchas')
a = append_to(3)
b = append_to(4)
print(b)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
12,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
2,
6524,
25,
220,
198,
2,
3740,
1378,
2503,
13,
10748,
13,
785,
14,
81,
14,
35720,
29412,
14,
15944,
14,
24,
420,
... | 2.431579 | 190 |
"""
Created on Thu Oct 26 14:19:44 2017
@author: Utku Ozbulak - github.com/utkuozbulak
"""
import os
import numpy as np
import torch
from torch.optim import SGD
from cnn_visualization.misc_functions import preprocess_image, recreate_image, save_image
import argparse
import torch.nn as nn
class ClassSpecificImageGeneration():
"""
Produces an image that maximizes a certain class with gradient ascent
"""
def generate(self, iterations=150):
"""Generates class specific image
Keyword Arguments:
iterations {int} -- Total iterations for gradient ascent (default: {150})
Returns:
np.ndarray -- Final maximally activated class image
"""
print("bat dau generate xong ... ")
initial_learning_rate = 200
for i in range(1, iterations):
print(i)
# Process image and return variable
self.processed_image = preprocess_image(self.created_image, False)
# Define optimizer for the image
optimizer = SGD([self.processed_image], lr=initial_learning_rate)
# Forward
output = self.model(self.processed_image.to(self.device))
# Target specific class
print(output)
class_loss = -output[0, self.target_class]
if i % 1 == 0 or i == iterations-1:
print('Iteration:', str(i), 'Loss',
"{0:.2f}".format(class_loss.cpu().data.numpy()))
# Zero grads
self.model.zero_grad()
# Backward
class_loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
print(self.created_image.size)
if i % 1 == 0 or i == iterations-1:
# Save image
initial_learning_rate /=2
im_path = 'generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'
save_image(self.created_image, im_path)
return self.processed_image
if __name__ == '__main__':
target_class = 0 # Flamingo
# pretrained_model = models.alexnet(pretrained=True)
args = parse_args()
print(args)
model = args.model
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
gpu_id = 0 if int(args.gpu_id) >=0 else -1
image_size = args.image_size
iterations= args.iterations
if model== "capsule":
exit(0)
pass
elif model == "drn" :
from pytorch_model.drn.drn_seg import DRNSub
model = DRNSub(1)
pass
elif model == "local_nn" :
from pytorch_model.local_nn import local_nn
model = local_nn()
elif model == "self_attention":
from pytorch_model.self_attention import self_attention
model = self_attention()
elif model == "resnext50":
from pytorch_model.model_cnn_pytorch import resnext50
model = resnext50(False)
elif model == "resnext101":
from pytorch_model.model_cnn_pytorch import resnext101
model = resnext101(False)
elif model == "myresnext":
from pytorch_model.model_cnn_pytorch import MyResNetX
model = MyResNetX()
elif model == "mnasnet":
from pytorch_model.model_cnn_pytorch import mnasnet
model = mnasnet(False)
elif model == "xception_torch":
from pytorch_model.xception import xception
model = xception(pretrained=False)
elif model == "xception2_torch":
from pytorch_model.xception import xception2
model = xception2(pretrained=False)
elif model == "dsp_fwa":
from pytorch_model.DSP_FWA.models.classifier import SPPNet
model = SPPNet(backbone=50, num_class=1)
elif model == "siamese_torch":
from pytorch_model.siamese import SiameseNetworkResnet
model = SiameseNetworkResnet(length_embed = args.length_embed,pretrained=True)
elif model == "efficient":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b'+args.type,num_classes=1)
model = nn.Sequential(model,nn.Sigmoid())
elif model == "efft":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b' + args.type, num_classes=1,in_channels=1)
model = nn.Sequential(model, nn.Sigmoid())
elif model == "e4dfft":
from pytorch_model.efficientnet import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b' + args.type, num_classes=1,in_channels=4)
model = nn.Sequential(model, nn.Sigmoid())
elif model == "efficientdual":
pass
from pytorch_model.xception import xception
model = xception(pretrained=False)
device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
model = model.to(device)
model.load_state_dict(torch.load(args.model_path,map_location=torch.device('cpu')))
print("Load xong ... ")
model.eval()
csig = ClassSpecificImageGeneration(model, target_class,image_size)
csig.generate(iterations = iterations)
| [
37811,
198,
41972,
319,
26223,
2556,
2608,
1478,
25,
1129,
25,
2598,
2177,
198,
198,
31,
9800,
25,
7273,
23063,
440,
14969,
377,
461,
532,
33084,
13,
785,
14,
315,
23063,
8590,
15065,
461,
198,
37811,
198,
11748,
28686,
198,
11748,
29... | 2.277802 | 2,293 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Has metadata about the cell libraries in the PDK.
This is used by the Bazel rules to set up the proper workspaces and targets."""
# The following is a list of cell libraries in the PDK. Each cell library has the
# git commit to use and a list of process corners.
#
# This list is manually curated and needs to be updated when upgrading to newer
# cell library versions.
CELL_LIBRARIES = {
"sky130_fd_io": {
"commit": "7ec511f1a4689e174c63b3964d1ba8da9a3565e5", # v0.2.1, 2020-12-09
"shallow_since": "1606239275 -0800",
"library_type": "ip_library",
},
"sky130_fd_pr": {
"commit": "f62031a1be9aefe902d6d54cddd6f59b57627436", # v0.20.1, 2020-12-09
"shallow_since": "1605038979 -0800",
"library_type": "ip_library",
},
"sky130_fd_sc_hd": {
"commit": "ac7fb61f06e6470b94e8afdf7c25268f62fbd7b1", # v0.0.2, 2020-12-04
"shallow_since": "1605028103 -0800",
"corners": {
"ff_100C_1v65": ["basic"],
"ff_100C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v65": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise"],
"ss_100C_1v40": ["basic"],
"ss_100C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v35": ["basic"],
"ss_n40C_1v40": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"ss_n40C_1v76": ["basic"],
"tt_025C_1v80": ["basic"],
"tt_100C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
"open_road_configuration": Label("//dependency_support/com_google_skywater_pdk/sky130_fd_sc_hd:open_road_sky130_fd_sc_hd"),
"patches": [
Label("//dependency_support/com_google_skywater_pdk/sky130_fd_sc_hd:pdk.patch"),
],
},
"sky130_fd_sc_hdll": {
"commit": "0694bd23893de20f5233ef024acf6cca1e750ac6", # v0.1.1, 2020-12-04
"shallow_since": "1604475910 -0800",
"corners": {
"ff_100C_1v65": ["basic"],
"ff_100C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v65": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise"],
"ss_100C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"ss_n40C_1v76": ["basic"],
"tt_025C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
"sky130_fd_sc_hs": {
"commit": "1d051f49bfe4e2fe9108d702a8bc2e9c081005a4", # v0.0.2, 2020-12-04
"shallow_since": "1605574092 -0800",
"corners": {
"ff_100C_1v95": ["basic"],
"ff_150C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise"],
"ss_100C_1v60": ["basic"],
"ss_150C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"tt_025C_1v20": ["basic"],
"tt_025C_1v35": ["basic"],
"tt_025C_1v44": ["basic"],
"tt_025C_1v50": ["basic"],
"tt_025C_1v62": ["basic"],
"tt_025C_1v68": ["basic"],
"tt_025C_1v80": ["basic", "ccsnoise"],
"tt_025C_1v89": ["basic"],
"tt_025C_2v10": ["basic"],
"tt_100C_1v80": ["basic"],
"tt_150C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
"sky130_fd_sc_hvl": {
"commit": "4fd4f858d16c558a6a488b200649e909bb4dd800", # v0.0.3, 2020-12-04
"shallow_since": "1604476031 -0800",
"corners": {
"ff_085C_5v50": ["basic"],
"ff_085C_5v50_lv1v95": ["basic"],
"ff_100C_5v50": ["basic"],
"ff_100C_5v50_lowhv1v65_lv1v95": ["basic"],
"ff_100C_5v50_lv1v95": ["basic"],
"ff_150C_5v50": ["basic"],
"ff_150C_5v50_lv1v95": ["basic"],
"ff_n40C_4v40": ["basic"],
"ff_n40C_4v40_lv1v95": ["basic"],
"ff_n40C_4v95": ["basic"],
"ff_n40C_4v95_lv1v95": ["basic"],
"ff_n40C_5v50": ["basic", "ccsnoise"],
"ff_n40C_5v50_lowhv1v65_lv1v95": ["basic"],
"ff_n40C_5v50_lv1v95": ["basic", "ccsnoise"],
"hvff_lvss_100C_5v50_lowhv1v65_lv1v60": ["basic"],
"hvff_lvss_100C_5v50_lv1v40": ["basic"],
"hvff_lvss_100C_5v50_lv1v60": ["basic"],
"hvff_lvss_n40C_5v50_lowhv1v65_lv1v60": ["basic"],
"hvff_lvss_n40C_5v50_lv1v35": ["basic"],
"hvff_lvss_n40C_5v50_lv1v60": ["basic"],
"hvss_lvff_100C_1v65": ["basic"],
"hvss_lvff_100C_1v95": ["basic"],
"hvss_lvff_100C_1v95_lowhv1v65": ["basic"],
"hvss_lvff_100C_5v50_lowhv1v65_lv1v95": ["basic"],
"hvss_lvff_n40C_1v65": ["basic"],
"hvss_lvff_n40C_1v95": ["basic"],
"hvss_lvff_n40C_1v95_lowhv1v65": ["basic"],
"hvss_lvff_n40C_5v50_lowhv1v65_lv1v95": ["basic"],
"ss_100C_1v65": ["basic"],
"ss_100C_1v65_lv1v40": ["basic"],
"ss_100C_1v65_lv1v60": ["basic"],
"ss_100C_1v95": ["basic"],
"ss_100C_2v40_lowhv1v65_lv1v60": ["basic"],
"ss_100C_2v70_lowhv1v65_lv1v60": ["basic"],
"ss_100C_3v00": ["basic"],
"ss_100C_3v00_lowhv1v65_lv1v60": ["basic"],
"ss_100C_5v50_lowhv1v65_lv1v60": ["basic"],
"ss_150C_1v65": ["basic"],
"ss_150C_1v65_lv1v60": ["basic"],
"ss_150C_3v00_lowhv1v65_lv1v60": ["basic"],
"ss_n40C_1v32": ["basic"],
"ss_n40C_1v32_lv1v28": ["basic"],
"ss_n40C_1v49": ["basic"],
"ss_n40C_1v49_lv1v44": ["basic"],
"ss_n40C_1v65": ["basic", "ccsnoise"],
"ss_n40C_1v65_lv1v35": ["basic"],
"ss_n40C_1v65_lv1v40": ["basic"],
"ss_n40C_1v65_lv1v60": ["basic", "ccsnoise"],
"ss_n40C_1v95": ["basic"],
"ss_n40C_5v50_lowhv1v65_lv1v60": ["basic"],
"tt_025C_2v64_lv1v80": ["basic"],
"tt_025C_2v97_lv1v80": ["basic"],
"tt_025C_3v30": ["basic"],
"tt_025C_3v30_lv1v80": ["basic"],
"tt_100C_3v30": ["basic"],
"tt_100C_3v30_lv1v80": ["basic"],
"tt_150C_3v30_lv1v80": ["basic"],
},
"default_corner": "ss_100C_1v95",
},
"sky130_fd_sc_lp": {
"commit": "e2c1e0646999163d35ea7b2521c3ec5c28633e63", # v0.0.2, 2020-12-04
"shallow_since": "1604476084 -0800",
"corners": {
"ff_100C_1v95": ["basic"],
"ff_125C_3v15": ["basic"],
"ff_140C_1v95": ["basic"],
"ff_150C_2v05": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic"],
"ff_n40C_2v05": ["basic"],
"ss_100C_1v60": ["basic"],
"ss_140C_1v65": ["basic"],
"ss_150C_1v65": ["basic"],
"ss_n40C_1v55": ["basic"],
"ss_n40C_1v60": ["basic"],
"ss_n40C_1v65": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
"sky130_fd_sc_ls": {
"commit": "4f549e30dd91a1c264f8895e07b2872fe410a8c2", # v0.1.1, 2020-12-04
"shallow_since": "1604476021 -0800",
"corners": {
"ff_085C_1v95": ["basic"],
"ff_100C_1v65_dest1v76_destvpb1v76_ka1v76": ["basic"],
"ff_100C_1v95": ["basic"],
"ff_150C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v65_dest1v76_destvpb1v76_ka1v76": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise"],
"ss_100C_1v40": ["basic"],
"ss_100C_1v60": ["basic"],
"ss_150C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v35": ["basic"],
"ss_n40C_1v40": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"ss_n40C_1v76": ["basic"],
"tt_025C_1v80": ["basic", "ccsnoise"],
"tt_100C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
"sky130_fd_sc_ms": {
"commit": "ae1b7f68821505cf2d93d9d44cce5ece22710fad", # v0.0.2, 2020-12-04
"shallow_since": "1605631186 -0800",
"corners": {
"ff_085C_1v95": ["leakage"],
"ff_100C_1v65": ["basic"],
"ff_100C_1v95": ["basic", "leakage"],
"ff_150C_1v95": ["basic"],
"ff_n40C_1v56": ["basic"],
"ff_n40C_1v65_ka1v76": ["basic"],
"ff_n40C_1v76": ["basic"],
"ff_n40C_1v95": ["basic", "ccsnoise", "leakage"],
"ss_100C_1v60": ["basic"],
"ss_150C_1v60": ["basic"],
"ss_n40C_1v28": ["basic"],
"ss_n40C_1v44": ["basic"],
"ss_n40C_1v60": ["basic", "ccsnoise"],
"tt_025C_1v80": ["basic", "ccsnoise"],
"tt_100C_1v80": ["basic"],
},
"default_corner": "ff_100C_1v95",
},
}
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 1.672961 | 5,984 |
"""
A Python module containing various utility functions, classes, decorators or
whatever.
"""
from collections import namedtuple, Iterable
import sys
import functools
import inspect
from bs4 import BeautifulSoup
import logging
import time
import random
import os
import errno
# Constants
# =========
USER_AGENTS = [
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36 OPR/34.0.2036.25',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.6.01001)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1',
'Opera/9.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.01',
]
"""
A bunch of random User-Agent strings.
"""
# Decorators
# ==========
class Hook:
"""
A special Hook decorator that will call something after a method has
completed.
When decorating your method, make sure to only use keyword arguments in
the hook.
The idea is for a developer to implement a specific class which has various
methods, and on some methods he will add a Hook decorator. Then the user
can create a subclass of this class and implement the hooks themselves.
The user is given access to the return value of the decorated function
through the `self._hook_return_value` variable. The return value is None
if the hook is called before the decorated function.
Example
-------
Developer::
class MyClass:
@Hook('on_do_stuff', arg1='something', arg2=7)
def do_stuff(self):
pass
User::
class MyNewClass(MyClass):
def on_do_stuff(self, **kwargs):
# Do something useful
pass
Parameters
----------
hook_name: str
The name of the hook function to be called.
call_after: bool
Whether to call the hook after or before the decorated function runs.
(default: True)
Raises
------
ValueError
When a normal function is decorated instead of a method.
"""
def call_hook(self, func, args, return_value=None):
"""
Get the "self" argument (i.e. the instance of a class that is implicitly
passed to a method when you call something like "some_class.method()")
then call our hook.
Uses inspect to check that a function has this "self" variable passed
in first. This is a sanity check to ensure that the hook decorator is
only used on methods.
By default any exceptions encountered while running the hook will be
silently ignored.
"""
func_args = inspect.getargspec(func).args
if len(func_args) < 1 or 'self' not in func_args:
raise TypeError('Only methods can be decorated with "Hook"')
instance = args[0]
hook = getattr(instance, self.hook_name, None)
if hook:
instance._hook_return_value = return_value
try:
hook(**self.hook_kwargs)
except Exception:
if not self.skip_exceptions:
raise
class Timed:
"""
Time a function call and save it's duration (in seconds) to
`function.duration`.
Parameters
----------
output_stream: Stream-like object
A stream to write the timing message to, set to None to disable it
(default: stderr)
decimals: int
The number of decimal places to print the duration to in the output
stream
"""
# Functions
# =========
def get_logger(name, log_file, log_level=None):
"""
Get a logger object which is set up properly with the correct formatting,
logfile, etc.
Parameters
----------
name: str
The __name__ of the module calling this function.
log_file: str
The filename of the file to log to.
Returns
-------
logging.Logger
A logging.Logger object that can be used to log to a common file.
"""
logger = logging.getLogger(name)
logger.setLevel(log_level or logging.INFO)
if log_file == 'stdout':
handler = logging.StreamHandler(sys.stdout)
elif log_file == 'stderr':
handler = logging.StreamHandler(sys.stderr)
else:
handler = logging.FileHandler(log_file)
if not len(logger.handlers):
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s: %(message)s',
datefmt='%Y/%m/%d %I:%M:%S %p'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def flatten(items, ignore_types=(str, bytes)):
"""
Turn a nested structure (usually a list of lists... of lists of lists of
lists) into one flat list.
Parameters
----------
items: list(list(...))
A nested list structure.
ignore_types: list(types)
A list of types (usually iterables) that shouldn't be expanded. (e.g.
don't flatten a string into a list of characters, etc)
Returns
-------
generator
Yields each element of the nested structure in turn.
"""
# If a string, bytes etc is passed in as the "items" nested function then
# just yield it back out
if isinstance(items, ignore_types):
yield items
else:
for x in items:
if isinstance(x, Iterable) and not isinstance(x, ignore_types):
yield from flatten(x)
else:
yield x
def hidden_fields(soup):
"""
Retrieve all the hidden fields from a html form.
Parameters
----------
soup: BeautifulSoup or str
The form to search. If it is not a BeautifulSoup object then assume it
is the html source and convert it into BeautifulSoup.
Returns
-------
dict
A dictionary of the hidden fields and their values.
"""
if not isinstance(soup, BeautifulSoup):
soup = BeautifulSoup(soup, 'html.parser')
hidden = {}
hidden_fields = soup.find_all('input', type='hidden')
for field in hidden_fields:
hidden[field['name']] = field['value']
return hidden
_suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def humansize(nbytes, decimals=2):
"""
Convert a number of bytes into it's human readable string using SI
suffixes.
Note
----
1 KB = 1024 bytes
Parameters
----------
nbytes: int
The total number of bytes
decimals: int
The number of decimal places to round to
Returns
-------
string
The human readable size.
"""
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(_suffixes)-1:
nbytes /= 1024.
i += 1
f = ('{}'.format(round(nbytes, decimals)))
f = f.rstrip('0').rstrip('.')
return '%s %s' % (f, _suffixes[i])
def innerHTML(element):
"""
Return the HTML contents of a BeautifulSoup tag.
"""
return element.decode_contents(formatter="html")
| [
37811,
198,
32,
11361,
8265,
7268,
2972,
10361,
5499,
11,
6097,
11,
11705,
2024,
393,
198,
39664,
13,
198,
37811,
198,
198,
6738,
17268,
1330,
3706,
83,
29291,
11,
40806,
540,
198,
11748,
25064,
198,
11748,
1257,
310,
10141,
198,
11748,... | 2.517676 | 2,857 |
import argparse
import subprocess
import random
import os
import tensorflow as tf
import sys
#os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2,3,4,5,6,7"
from tensorflow.python.client import device_lib
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
850,
14681,
198,
11748,
4738,
198,
11748,
28686,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
25064,
198,
2,
418,
13,
268,
2268,
14692,
43633,
5631,
62,
29817,
34563,
62,
39345,
34444,
8973,
2625,... | 2.651685 | 89 |
from typing import Any
import numpy as np
| [
6738,
19720,
1330,
4377,
198,
198,
11748,
299,
32152,
355,
45941,
628
] | 3.666667 | 12 |
import requests
from bs4 import BeautifulSoup
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
url = "https://gomechanic.in/hyderabad"
req = requests.get(url, headers)
soup = BeautifulSoup(req.content, 'html.parser')
print(soup.prettify()) | [
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
628,
198,
50145,
796,
1391,
198,
220,
220,
220,
705,
15457,
12,
15988,
12,
35265,
12,
39688,
10354,
705,
9,
3256,
198,
220,
220,
220,
705,
15457,
12,
15988,
12,
35265,
12,... | 2.5 | 192 |
"""
Modified example from:
https://github.com/pytorch/examples
"""
from __future__ import print_function
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
| [
37811,
198,
5841,
1431,
1672,
422,
25,
198,
5450,
1378,
12567,
13,
785,
14,
9078,
13165,
354,
14,
1069,
12629,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
14601,
198,
198,
11748,
28034,
198,
1... | 3.516854 | 89 |
from utils.primes import is_prime
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
#
# What is the 10 001st prime number?
#
# Answer: 104743
| [
6738,
3384,
4487,
13,
1050,
999,
1330,
318,
62,
35505,
628,
198,
2,
2750,
13487,
262,
717,
2237,
6994,
3146,
25,
362,
11,
513,
11,
642,
11,
767,
11,
1367,
11,
290,
1511,
11,
356,
460,
766,
326,
262,
718,
400,
6994,
318,
1511,
13... | 2.855072 | 69 |
# -*- coding:utf-8 -*-
__author__ = 'Leo.Z'
'''
image_name.jpg x y x2 y2 c x y x2 y2 c xy为左上角坐标,x2y2为右下角坐标
'''
import os
import os.path
import random
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import cv2
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
834,
9800,
834,
796,
705,
3123,
78,
13,
57,
6,
198,
198,
7061,
6,
198,
9060,
62,
3672,
13,
9479,
2124,
331,
2124,
17,
331,
17,
269,
2124,
331,
2124,
17,
331,
17,
269,... | 2.044444 | 135 |
__version__ = '0.0.dev5'
| [
834,
9641,
834,
796,
705,
15,
13,
15,
13,
7959,
20,
6,
198
] | 1.923077 | 13 |
# from config import conf
#import telegram
#
# tg_token=conf['telegram_token']
# bot = telegram.Bot(token=tg_token)
# print(tg_token)
#
# #proxy list: https://50na50.net/ru/proxy/socks5list
#
# proxy_url='socks5://66.33.210.203:24475'
#
# pp = telegram.utils.request.Request(proxy_url=proxy_url)
# bot = telegram.Bot(token=tg_token, request=pp)
# print(bot.get_me())
#
# REQUEST_KWARGS={'proxy_url'=proxy_url}
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler, Filters
from config import conf
import logging
proxy_url='socks5://104.248.63.49:30588'
REQUEST_KWARGS={'proxy_url':proxy_url}
tg_token=conf['telegram_token']
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
import os
server_url='https://hello-world-delete-234.nw.r.appspot.com/'
PORT = int(os.environ.get('PORT', '8443'))
updater = Updater(tg_token, use_context=True, request_kwargs=REQUEST_KWARGS)
dispatcher = updater.dispatcher
# add handlers
updater.start_webhook(listen="0.0.0.0",
port=PORT,
url_path=tg_token)
updater.bot.set_webhook("server_url" + tg_token)
updater.idle()
# updater = Updater(token=tg_token, use_context=True,request_kwargs=REQUEST_KWARGS)
# dispatcher = updater.dispatcher
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
echo_handler = MessageHandler(Filters.text & (~Filters.command), echo)
dispatcher.add_handler(echo_handler)
caps_handler = CommandHandler('caps', caps)
dispatcher.add_handler(caps_handler)
# updater.start_polling()
| [
198,
2,
422,
4566,
1330,
1013,
198,
2,
11748,
573,
30536,
198,
2,
198,
2,
256,
70,
62,
30001,
28,
10414,
17816,
660,
30536,
62,
30001,
20520,
198,
2,
10214,
796,
573,
30536,
13,
20630,
7,
30001,
28,
25297,
62,
30001,
8,
198,
2,
... | 2.466667 | 675 |
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Forwarding
GUID : 699e309c-e782-4400-98c8-e21d162d7b7b
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=100, version=0)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=101, version=0)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=102, version=0)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=102, version=1)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=103, version=0)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=104, version=0)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=104, version=1)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=105, version=0)
@declare(guid=guid("699e309c-e782-4400-98c8-e21d162d7b7b"), event_id=107, version=0)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
15905,
12,
11209,
12,
39746,
278,
198,
38,
27586,
1058,
718,
2079,
68,
26895,
66,
12,
68,
46519,
12,
2598,
405,
12,
4089,
66,
23,
12,
68,
2481,
67,
25061... | 2.015817 | 569 |
import numpy as np
from tqdm.auto import tqdm
COLS_GROUP1 = 24
COLS_GROUP2 = 47
COLS_GROUP3 = 24*13
COLS_GROUP4 = 55
COLS_TOTAL = COLS_GROUP1 + COLS_GROUP2 + COLS_GROUP3 + COLS_GROUP4
same_color_suit = {'C':'S', 'D':'H', 'H':'D', 'S':'C'}
COLS_TARGET = 24
def format_data(data, usetqdm=True, start=0, stop=None, count=None):
"""
Here is all the data that needs to be fed to the ML algorithm, grouped by phase of the game.
I have also tried to include an estimate of how many columns each will need to take up.
If a categorical feature has N options, I will OHE it as N columns, instead of using N-1.
A card will be OHEncoded as [9-A] + [C/D/H/S] (6+4), and possibly tagged as Y/N trump.
#######
DATA GROUP 1: Calling trump
#######
(4) 1.) Who dealt (position relative to "me")
(4) 2.) Who called trump (position relative to "me")
(1) 3.) Which round was trump called in
(1) 4.) Going alone?
(4) 5.) Which suit is trump (not sure about this one)
(10) 6.) What is the turn card
Total: 24 columns
#######
DATA GROUP 2: Other misc. information
#######
(4) 1.) Who is leading right now
(4) 2.) Who is winning right now
(11) 3.) What card was led (is it trump)
(11) 4.) What card is winning (is it trump)
(5) 5.) Which team won each trick so far (+1 for "me", 0 for no one (yet), -1 for opponents)
(12) 6.) Any players confirmed short in any suits
Total: 47 columns
#######
DATA GROUP 3: All card locations (constant order: 9C, 10C, ..., (D), (H), ..., KS, AS)
#######
For each card (24):
(4) 1.) Confirmed in anyone's hand (my hand + ordered up turn card?)
(4) 2.) Played in a previous trick by someone (maybe later expand this to which prev trick?)
(3) 3.) Played in CURRENT trick by someone
(1) 4.) Confirmed buried
(1) 5.) Is trump
Total: 312 columns
#######
DATA GROUP 4: My remaining hand, again
#######
(11) 1.) Card #1 (is it trump)
(11) 2.) Card #2 (is it trump)
(11) 3.) Card #3 (is it trump)
(11) 4.) Card #4 (is it trump)
(11) 5.) Card #5 (is it trump)
Total: 55 columns
SUPER-TOTAL: 414 columns. Yeesh.
"""
counter = 0
stop = len(data) if stop is None else stop
count = len(data) if count is None else count
formatted = np.zeros((20*(stop-start), COLS_TOTAL), dtype=np.int8)
target = np.zeros((20*(stop-start), COLS_TARGET), dtype=np.int8)
for i in tqdm(data.index) if usetqdm else data.index:
i = int(i)
if i < start: continue
elif i >= stop: break
elif counter >= count: break
game = data.iloc[i]
formatted[20*counter:20*(counter+1)] = format_game(game)
target[20*counter:20*(counter+1)] = get_target(game)
counter += 1
mask = ~np.all(target==0, axis=1)
return formatted[mask], target[mask]
def get_group1_info(game, tricknum, playernum):
"""
#######
DATA GROUP 1: Calling trump
#######
(4) 1.) Who dealt (position relative to "me")
(4) 2.) Who called trump (position relative to "me")
(1) 3.) Which round was trump called in
(1) 4.) Going alone?
(4) 5.) Which suit is trump (not sure if this one needs to be here)
(10) 6.) What is the turn card
Total: 24 columns
"""
group1_info = np.zeros(COLS_GROUP1, dtype=np.int8)
current_player = get_current_player(game, tricknum, playernum)
# who dealt
group1_info[get_relative_position(game, tricknum, playernum, '3')] = 1
# who called
group1_info[4+get_relative_position(game, tricknum, playernum, game['caller'])] = 1
# was it called first round
group1_info[8] = 2-int(game['round'])
# did they go alone
group1_info[9] = int(game['alone'])
# which suit is trump
group1_info[10+{'C':0, 'D':1, 'H':2, 'S':3}[get_trump_suit(game)]] = 1
# what is the turn card
turn_card = get_turn_card(game)
group1_info[14+{n:i for n,i in zip(list('9TJQKA'), range(6))}[turn_card[0]]] = 1
group1_info[20+{s:i for s,i in zip(list('CDHS'), range(4))}[turn_card[1]]] = 1
return group1_info
def get_group2_info(game, tricknum, playernum):
"""
#######
DATA GROUP 2: Other misc. information
#######
(4) 1.) Who is leading right now
(4) 2.) Who is winning right now
(11) 3.) What card was led (is it trump)
(11) 4.) What card is winning (is it trump)
(5) 5.) Which team won each trick so far (+1 for "me", 0 for no one (yet), -1 for opponents)
(12) 6.) Any players confirmed short in any suits
Total: 47 columns
"""
group2_info = np.zeros(COLS_GROUP2, dtype=np.int8)
current_trick = game[['played'+str(i+1) for i in range(4*tricknum, 4*tricknum+playernum)]]
trump_suit = get_trump_suit(game)
# who leads
group2_info[get_relative_position(game, tricknum, playernum, current_trick[0][-1]) if len(current_trick) > 0 else 3] = 1
# who's winning
if len(current_trick) > 0:
winner, winning_card = get_winner(current_trick, trump_suit)
group2_info[4+get_relative_position(game, tricknum, playernum, winner)] = 1
# what card was led
if len(current_trick) > 0:
group2_info[8+{n:i for n,i in zip(list('9TJQKA'), range(6))}[current_trick[0][0]]] = 1
group2_info[14+{s:i for s,i in zip(list('CDHS'), range(4))}[current_trick[0][1]]] = 1
group2_info[18] = (current_trick[0][1]==trump_suit) or (current_trick[0][0]=='J' and current_trick[0][1]==same_color_suit[trump_suit])
# what card is winning
if len(current_trick) > 0:
group2_info[19+{n:i for n,i in zip(list('9TJQKA'), range(6))}[winning_card[0]]] = 1
group2_info[25+{s:i for s,i in zip(list('CDHS'), range(4))}[winning_card[1]]] = 1
group2_info[29] = (winning_card[1]==trump_suit) or (winning_card[0]=='J' and winning_card[1]==same_color_suit[trump_suit])
# what team won each trick so far
for tnum in range(5):
if tnum >= tricknum:
continue
# return +1 if relative_position % 2 == 1, return -1 if relative_position % 2 == 0 (self is always 3)
group2_info[30+tnum] = -1+2*(get_relative_position(game, tricknum, playernum, game['winner'+str(tnum+1)])%2)
# any players confirmed short in suits
# list it like [opp1 short in clubs, opp1 short in diamonds, ..., opp2 short in spades]
for opp_pos in range(3):
for i, s in enumerate(list('CDHS')):
group2_info[35+4*opp_pos + i] = get_short_suitedness(game, tricknum, playernum, opp_pos, s)
return group2_info
card_ix = {**{n:i for n,i in zip(list('9TJQKA'), range(6))},\
**{s:6*i for s,i in zip(list('CDHS'), range(4))}}
def get_group3_info(game, tricknum, playernum):
"""
#######
DATA GROUP 3: All card locations (constant order: 9C, 10C, ..., (D), (H), ..., KS, AS)
#######
For each card (24):
(4) 1.) Confirmed in anyone's hand (my hand + ordered up turn card?)
(4) 2.) Played in a previous trick by someone (maybe later expand this to which prev trick?)
(3) 3.) Played in CURRENT trick by someone
(1) 4.) Confirmed buried
(1) 5.) Is trump
Total: 312 columns
"""
COLS_PER_CARD = 13
group3_info = np.zeros(24*COLS_PER_CARD, dtype=np.int8)
trump_suit = get_trump_suit(game)
# cards played in a previous trick
if tricknum > 0:
prev_played_cards = game[['played'+str(i+1) for i in range(4*tricknum)]]
for c in prev_played_cards:
if '-' in c:
continue
group3_info[COLS_PER_CARD*(card_ix[c[0]] + card_ix[c[1]]) + 4 + get_relative_position(game, tricknum, playernum, c[-1])] = 1
# cards played THIS trick
if playernum > 0:
current_played_cards = game[['played'+str(i+1) for i in range(4*tricknum, 4*tricknum+playernum)]]
for c in current_played_cards:
if c.startswith('-'):
continue
group3_info[COLS_PER_CARD*(card_ix[c[0]] + card_ix[c[1]]) + 8 + get_relative_position(game, tricknum, playernum, c[-1])] = 1
# cards in my hand
my_remaining_cards = [c[:-1] for c in game[['played'+str(i+1) for i in range(4*tricknum+playernum, 20)]]\
if c[-1] == get_current_player(game, tricknum, playernum)]
for c in my_remaining_cards:
# position of self wrt self is always 3
group3_info[COLS_PER_CARD*(card_ix[c[0]] + card_ix[c[1]]) + 3] = 1
# confirmed turn card location
if game['round']==2:
turn_card = get_turn_card(game)
group3_info[COLS_PER_CARD*(card_ix[turn_card[0]] + card_ix[turn_card[1]]) + COLS_PER_CARD-2] = 1
elif get_current_player(game, tricknum, playernum) == '3':
original_cards = get_original_hand(game, tricknum, playernum)
played_cards = [c[:-1] for c in game[['played'+str(i+1) for i in range(20)]] if c[-1]=='3']
buried_card = [c for c in original_cards if c not in played_cards][0]
group3_info[COLS_PER_CARD*(card_ix[buried_card[0]]+card_ix[buried_card[1]]) + COLS_PER_CARD-2] = 1
else:
turn_card = get_turn_card(game)
all_played_cards = game[['played'+str(i+1) for i in range(4*tricknum+playernum)]]
if turn_card+'3' not in list(all_played_cards):
group3_info[COLS_PER_CARD*(card_ix[turn_card[0]]+card_ix[turn_card[1]]) + get_relative_position(game, tricknum, playernum, 3)] = 1
# Mark trump
for s in list('CDHS'):
if s == trump_suit:
for name in list('9TJQKA'):
group3_info[COLS_PER_CARD*(card_ix[name]+card_ix[s]) + COLS_PER_CARD-1] = 1
group3_info[COLS_PER_CARD*(card_ix['J']+card_ix[same_color_suit[s]]) + COLS_PER_CARD-1] = 1
return group3_info
def get_group4_info(game, tricknum, playernum):
"""
#######
DATA GROUP 4: My remaining hand, again
#######
(11) 1.) Card #1 (is it trump)
(11) 2.) Card #2 (is it trump)
(11) 3.) Card #3 (is it trump)
(11) 4.) Card #4 (is it trump)
(11) 5.) Card #5 (is it trump)
Total: 55 columns
"""
"""
my_cards = [c for c in game[['played'+str(i) for i in range(1,21)]] if c[-1] == str(playernum)]
trump_suit = get_trump_suit(game)
np.random.shuffle(my_cards)
my_cards = [c[:-1] if c not in game[['played'+str(i) for i in range(1,4*tricknum+playernum+1)]] else '00' for c in my_cards]
"""
# slightly more efficient
trump_suit = get_trump_suit(game)
my_cards = [c[:-1] for c in game[['played'+str(i+1) for i in range(4*tricknum+playernum, 20)]]\
if c[-1] == get_current_player(game, tricknum, playernum)]
my_cards += ['00']*(5-len(my_cards))
np.random.shuffle(my_cards)
group4_info = []
for c in my_cards:
group4_info += card_to_ohe(c[0], c[1], trump_suit==c[1] or (c[0]=='J' and c[1]==same_color_suit[trump_suit]))
return group4_info
power_to_name = {power:n for power,n in zip([1,2,3,4,5,10,12,15,20,25,30,31,35], list('9TJQKA9TQKAJJ'))}
oldstyle=False
card_ix = {**{n:i for n,i in zip(list('9TJQKA'), range(6))},\
**{s:6*i for s,i in zip(list('CDHS'), range(4))}} | [
11748,
299,
32152,
355,
45941,
198,
6738,
256,
80,
36020,
13,
23736,
1330,
256,
80,
36020,
198,
198,
25154,
50,
62,
46846,
16,
796,
1987,
198,
25154,
50,
62,
46846,
17,
796,
6298,
198,
25154,
50,
62,
46846,
18,
796,
1987,
9,
1485,
... | 2.237612 | 5,025 |
"""Vectordump configuration information.
"""
#: MONGO URI
MONGO_URI = 'mongodb://localhost:27017/'
| [
37811,
53,
478,
585,
931,
8398,
1321,
13,
198,
37811,
198,
198,
2,
25,
25000,
11230,
43975,
198,
27857,
11230,
62,
47269,
796,
705,
31059,
375,
65,
1378,
36750,
25,
1983,
29326,
14,
6,
198
] | 2.857143 | 35 |
import math
#TODO: WRITEME sciNum | [
11748,
10688,
198,
198,
2,
51,
3727,
46,
25,
11342,
2043,
3620,
36,
20681,
33111
] | 2.266667 | 15 |
import os
from special_math.common_utilities import SpecialMathCalc, RequestUtils
from special_math import MAX_SPECIAL_NUMBER_ENTRY
import logging
from flask import Blueprint
bp = Blueprint('specialmath', __name__, url_prefix='/specialmath')
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("LOG_LEVEL", logging.DEBUG))
special_calculator = SpecialMathCalc()
@bp.route('/<int:n>')
def special_math(n: int):
"""
Takes an integer input and computes the special value for that number
:param n: The path value given to calculate the special value from
:return: a dict with context and response and a status code
"""
request_context = RequestUtils().get_request_context()
logger.debug(f'Received request for {n}, request_id: {request_context["request-id"]}')
if n > MAX_SPECIAL_NUMBER_ENTRY:
return {'context': request_context, 'error': {'message': f'Invalid special math request: request '
f'{n} exceeds maximum value of '
f'{MAX_SPECIAL_NUMBER_ENTRY}',
'name': 'InvalidRequestParameter'}}, 400
try:
special_number = special_calculator.calculate_special_value(n)
except Exception as e:
logger.error("Experienced error attempting to calculate special number")
logger.critical(e)
return {'context': request_context, 'error': {'message': 'Unexpected error encountered. '
'Please retry your request. If this error persists '
'reach out to John because he did something wrong.',
'name': 'InternalServerError'}}, 500
logger.debug(f'Calculated special number: {special_number}')
response = {"context": request_context,
"response": {
"special-calculation": special_number
}
}
logger.info(f"Successfully processed request {n}: {response}")
return response
| [
11748,
28686,
198,
6738,
2041,
62,
11018,
13,
11321,
62,
315,
2410,
1330,
6093,
37372,
9771,
66,
11,
19390,
18274,
4487,
198,
6738,
2041,
62,
11018,
1330,
25882,
62,
48451,
12576,
62,
41359,
13246,
62,
3525,
18276,
198,
11748,
18931,
19... | 2.207831 | 996 |
import Qt as Qt
import Qt.QtGui as QtGui
import Qt.QtCore as QtCore
from qtLearn.nodes import Node
import qtLearn.uiUtils as uiUtils
############################################################################
############################################################################ | [
11748,
33734,
355,
33734,
198,
11748,
33734,
13,
48,
83,
8205,
72,
355,
33734,
8205,
72,
198,
11748,
33734,
13,
48,
83,
14055,
355,
33734,
14055,
198,
198,
6738,
10662,
83,
20238,
13,
77,
4147,
1330,
19081,
198,
11748,
10662,
83,
2023... | 4.271429 | 70 |
# Полуавтоматические тесты
#
# list_temp = [1,2,3,'abc']
#
# print(test_function(list_temp))
# теперь пишем полуавтоматическую фун-ю
function_test()
list_temp = [1, 2, 3,'5', 'abc', 4]
list_out = test_function(list_temp)
print(list_out)
| [
2,
12466,
253,
25443,
119,
35072,
16142,
38857,
20375,
25443,
120,
16142,
20375,
18849,
141,
229,
16843,
21727,
31583,
18849,
16843,
220,
20375,
16843,
21727,
20375,
45035,
198,
2,
198,
2,
1351,
62,
29510,
796,
685,
16,
11,
17,
11,
18,
... | 1.596026 | 151 |
# -*- coding: utf-8 -*-
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198
] | 1.714286 | 14 |
import certifi
import ftplib
import hatanaka
import os
import urllib.request
import pycurl
import time
import tempfile
from datetime import datetime
from urllib.parse import urlparse
from io import BytesIO
from .constants import SECS_IN_HR, SECS_IN_DAY, SECS_IN_WEEK
from .gps_time import GPSTime
dir_path = os.path.dirname(os.path.realpath(__file__))
def retryable(f):
"""
Decorator to allow us to pass multiple URLs from which to download.
Automatically retry the request with the next URL on failure
"""
return wrapped
@retryable
def ftp_download_files(url_base, folder_path, cacheDir, filenames, compression='', overwrite=False):
"""
Like download file, but more of them. Keeps a persistent FTP connection open
to be more efficient.
"""
folder_path_abs = os.path.join(cacheDir, folder_path)
ftp = ftp_connect(url_base + folder_path)
filepaths = []
for filename in filenames:
filename_zipped = filename + compression
filepath = str(hatanaka.get_decompressed_path(os.path.join(folder_path_abs, filename)))
filepath_zipped = os.path.join(folder_path_abs, filename_zipped)
print("pulling from", url_base, "to", filepath)
if not os.path.isfile(filepath) or overwrite:
if not os.path.exists(folder_path_abs):
os.makedirs(folder_path_abs)
try:
ftp.retrbinary('RETR ' + filename_zipped, open(filepath_zipped, 'wb').write)
except (ftplib.error_perm):
raise IOError("Could not download file from: " + url_base + folder_path + filename_zipped)
filepaths.append(str(hatanaka.decompress_on_disk(filepath_zipped)))
else:
filepaths.append(filepath)
return filepaths
@retryable
@retryable
| [
11748,
5051,
22238,
198,
11748,
10117,
489,
571,
198,
11748,
6877,
272,
8130,
198,
11748,
28686,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
12972,
66,
6371,
198,
11748,
640,
198,
11748,
20218,
7753,
198,
198,
6738,
4818,
8079,
1... | 2.744373 | 622 |
import urllib2
import json
import MySQLdb
conn = MySQLdb.connect(host= "localhost", user="root", passwd="", db="hackerone_reports")
x = conn.cursor()
hackerone = "https://hackerone.com/programs/search?query=bounties%3Ayes&sort=name%3Aascending&limit=1000"
opener = urllib2.build_opener()
opener.addheaders = [('Accept','application/json, text/javascript, */*; q=0.01'),('content-type','application/json'),('x-requested-with','XMLHttpRequest')]
response = opener.open(hackerone)
print "Read the response..."
json_string = response.read()
print "Loading json..."
data = json.loads(json_string, encoding='latin-1')
print "Total programs: " + str(data['total'])
programs = data['results']
for program in programs:
about = program['about']
disclosure_email = ''
if 'disclosure_email' in program:
disclosure_email = program['disclosure_email']
disclosure_url = ''
if 'disclosure_url' in program:
disclosure_url = program['disclosure_url']
handle = program['handle']
name = program['name']
offers_rewards = '0'
if 'offers_rewards' in program:
offers_rewards = program['offers_rewards']
offers_thanks = '0'
if 'offers_thanks' in program:
offers_thanks = program['offers_thanks']
stripped_policy = program['stripped_policy']
url = program['url']
try:
x.execute("""INSERT INTO hackerone_programs(about, disclosure_email, disclosure_url, handle, name, offers_rewards, offers_thanks, stripped_policy, url) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)""",(about, disclosure_email, disclosure_url, handle, name, offers_rewards, offers_thanks, stripped_policy, url))
conn.commit()
print "Bounty program: " + handle.encode('latin-1') + " added to database."
except Exception as ex:
conn.rollback()
# print "Problems saving: " + str(ex) + ", skipping..."
pass
conn.close()
| [
198,
11748,
2956,
297,
571,
17,
198,
11748,
33918,
198,
11748,
33476,
9945,
198,
198,
37043,
796,
33476,
9945,
13,
8443,
7,
4774,
28,
366,
36750,
1600,
2836,
2625,
15763,
1600,
1208,
16993,
2625,
1600,
20613,
2625,
71,
10735,
505,
62,
... | 2.849445 | 631 |
import logging
import re
from pathlib import Path
from subprocess import check_output, CalledProcessError, STDOUT
from typing import Any, Dict, List, Optional, Tuple, Union
from .common import convert_external_variables
_RULE_BLOCK_REGEX = re.compile(r'^(?P<rule>\w+)\s+\[(?P<raw_meta>.*)\]\s+(?P<scanned_file>.*)\n(?P<raw_matches>(?:0x[a-f0-9]+.*(?:[\n]|$))+)', flags=re.MULTILINE)
_YARA_MATCH_REGEX = re.compile(r'^(?P<offset>0x[a-f0-9]+):(?P<tag>\S+):\s(?P<string>.+)$', flags=re.MULTILINE)
def scan(
signature_path: Union[str, Path],
file_path: Union[str, Path],
external_variables: Optional[Dict[str, Any]] = None,
recursive: bool = False,
compiled: bool = False
) -> dict:
'''
Scan files and return matches
:param signature_path: path to signature file
:param file_path: files to scan
:param external_variables: define external variables
:param recursive: scan recursively
:param compiled: rule is in compiled form (Yara >= 4 only!)
:return: a dict containing the scan results
'''
if external_variables is None:
external_variables = {}
variables = convert_external_variables(external_variables)
recursive_flag = '-r' if recursive else ''
compiled_flag = '-C' if compiled else ''
try:
command = f'yara {variables} {recursive_flag} {compiled_flag} -m -s {signature_path} {file_path}'
scan_result = check_output(command, shell=True, stderr=STDOUT)
return _parse_yara_output(scan_result.decode())
except CalledProcessError as e:
logging.error(f'There seems to be an error in the rule file:\n{e.output.decode()}', exc_info=True)
return {}
except Exception as e:
logging.error(f'Could not parse yara result: {e}', exc_info=True)
return {}
def _parse_meta_data(block: dict) -> Dict[str, str]:
'''
Will be of form 'item0=lowercaseboolean0,item1="value1",item2=value2,..'
'''
meta_data = dict()
for item in block['raw_meta'].split(','):
if '=' in item:
key, value = item.split('=', maxsplit=1)
value = value == 'true' if value in ['true', 'false'] else value.strip('"')
meta_data[key] = value
else:
logging.warning(f'Malformed meta string \'{block["raw_meta"]}\'')
return meta_data
| [
11748,
18931,
198,
11748,
302,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
850,
14681,
1330,
2198,
62,
22915,
11,
34099,
18709,
12331,
11,
48571,
12425,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
32233,
11,
309,
29291,... | 2.467161 | 944 |
# BOJ 2448
| [
2,
16494,
41,
1987,
2780,
198
] | 1.833333 | 6 |
"""
A script for finding equal or near-equal partitions in a group.
Do parts a, b, and g
"""
from itertools import combinations
import random
import numpy as np
from matplotlib import pyplot as plt
from pathlib import Path
from progressbar import progressbar as pbar
DIR = Path(__file__).parent
group1 = [10, 13, 23, 6, 20]
group2 = [6, 4, 9, 14, 12, 3, 15, 15]
group3 = [93, 58, 141, 209, 179, 48, 225, 228]
group4 = [2474, 1129, 1388, 3752, 821, 2082, 201, 739]
if __name__ == '__main__':
# frac_perfect(1000)
plot_perfect()
| [
37811,
198,
32,
4226,
329,
4917,
4961,
393,
1474,
12,
40496,
43869,
287,
257,
1448,
13,
198,
5211,
3354,
257,
11,
275,
11,
290,
308,
198,
37811,
198,
6738,
340,
861,
10141,
1330,
17790,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
... | 2.681373 | 204 |
from matplotlib import pyplot as plt
import io
from PIL import Image
import cv2
import torch
import os
WIDTH = 1280
HEIGHT = 760
model = torch.hub.load("ultralytics/yolov5", "custom", path="./best.pt")
# results_pandas structure
# xmin ymin xmax ymax confidence class name
cap = cv2.VideoCapture("./driving_video/driving3.mp4")
while cap.isOpened():
ret, frame = cap.read()
if ret:
img = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (WIDTH,HEIGHT))
results = get_prediction(img, model)
results.render()
processed_img = cv2.cvtColor(results.imgs[0], cv2.COLOR_BGR2RGB)
stop, processed_prediction = process_prediction(results.pandas().xyxy[0])
if stop:
print("#### PLEASE STOP ####")
cv2.imshow('Result', processed_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print('video is ended')
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
cap.release()
cv2.destroyAllWindows() | [
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,
33245,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
269,
85,
17,
198,
11748,
28034,
198,
11748,
28686,
628,
198,
54,
2389,
4221,
796,
37674,
198,
13909,
9947,
796,
... | 2.09589 | 511 |
import runs
import optimization as opt
| [
11748,
4539,
198,
11748,
23989,
355,
2172,
628
] | 5 | 8 |