index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
73,311 | Meitie/WTC_Code_Clinics | refs/heads/main | /clinician/insert.py | import re, datetime
def valid_date(start_date):
"""
Checks if the start date is the correct format (using regex):
:return: True if valid
:return: False if invalid
"""
if (re.findall(r"\d\d\d\d-\d\d-\d\d", start_date)):
return True
else:
return False
def valid_time(start_time):
"""
Checks if the start time is the correct format (using regex):
:return: True if valid
:return: False if invalid
"""
if re.findall(r"^\d\d:\d\d$", start_time):
return True
else:
return False
def validate_params(command_params):
"""
Checks if the it is valid or not.
seperates the date and time into variabels.
runs if they are valid or not, if both are true returns the date and time
else returns false
"""
if command_params == []:
print("invalid request, plese try again")
return "", ""
if len(command_params) != 2:
print("invalid request, plese try again")
return "", ""
date = command_params[0]
time = command_params[1]
if valid_date(date) == True and valid_time(time) == True:
return date, time
else:
print("invalid request, plese try again")
return "", ""
return "", ""
def meeting_setups(command_params, user_name):
"""
If there are commands then assign them the values of "summary" and "description".
Else 'summary' is the username and 'description' is "Open for anything"
"""
if command_params:
summary = command_params[0]
description = command_params[1]
return summary, description
else:
summary = user_name
description = "Open for anything"
return summary, description
def clearing_dates(table_data):
"""
Grab the whole table data, and strip it to just the list of dates.
returns just the list of the start dates.
"""
available_dates = table_data[0]
available_dates.pop(available_dates.index(""))
return available_dates
def validated_slot(table_data, date, time):
"""
Check if the date and the time are in the start times and dates.
If they are then make the variable "time_in" and "date_in" for True.
Else they will be False.
if "time_in" and "date_in" are true return "True".
else return false.
"""
start_times = ['08:00', '08:30', '10:00', '11:30', '13:00', '14:30', '16:00', '17:30']
available_dates = clearing_dates(table_data)
time_in = False
date_in = False
for i in start_times:
if time == str(i):
time_in = True
break
for i in available_dates:
if date == str(i):
date_in = True
break
if time_in == True and date_in == True:
return True
else:
return False
def user_pre_slotted(cc_events, user_name):
"""
Makes a Creators and Start_times list, append all the creators
and date times. as well as splitting them all on the "@".
Slots = the dateTime for each of the creators.
returns slots
"""
creators = list()
start_times = list()
for i in range(len(cc_events)):
creators.append(cc_events[i]['creator']['email'])
start_times.append(cc_events[i]['start']['dateTime'])
creator_names = list()
for i in range(len(creators)):
name = creators[i]
name = name.split('@')
creator_names.append(name[0])
slots = [cc_events[num]['start']['dateTime'] for num, user in enumerate(creator_names) if creator_names[num] == user_name]
return slots
def already_booked(slots, date, time):
"""
Converts the date and time into the correct format.
for each item in the slots list, it checks if there is already a matching times.
If there is change the value of "conflicted_times"
if "conflicted_times" not an empty string then return False
else return True
"""
date_time = f'{date}T{time}:00+02:00'
conflicted_times = ''
for i in slots:
if date_time == i:
conflicted_times = i
break
if conflicted_times != '':
return False
else:
return True
def make_datetime_from_string(string):
"""
Creates a dattime object form a given string, in the right format.
:return: a datetime in the correct format.
"""
return datetime.datetime.strptime(string, "%Y-%m-%dT%H:%M:%S%z")
def freebusy_check(service, date, time, user_name):
"""
checks the format for timeMin and timeMax as well as the timezones.
then checks the id for both calendars.
returns the eventsResults.
"""
event = {
"timeMin": (make_datetime_from_string(f'{date}T{time}:00+0200')).isoformat(),
"timeMax": (make_datetime_from_string(f'{date}T{time}:00+0200')+datetime.timedelta(minutes = 90)).isoformat(),
"timeZone": 'Africa/Johannesburg',
"items": [
{
"id": user_name + '@student.wethinkcode.co.za'
},
{
'id': 'teamtwotesting@gmail.com'
}
]
}
eventsResult = service.freebusy().query(body=event).execute()
return eventsResult
def do_you_have_meetings(service, date, time, user_name):
"""
Grabs the events from freebusy_check
seperates the 2 calendars based on the events.
Check if the patients calendar is empty in the alotted time.
If patient['busy'] == []: return true
else they have an event and return false
"""
events = freebusy_check(service, date, time, user_name)
two_cals = events['calendars']
patient, clinic = two_cals[user_name+'@student.wethinkcode.co.za'], two_cals['teamtwotesting@gmail.com']
if patient['busy'] == []:
return True
else:
return False
return False
def insertion_of_event(service, event, date, time):
"""
Try to insert the events into the calendar.
If it succeeds then says "The events have been created"
Else prints "A Spooky thing happend"
"""
try:
service.events().insert(calendarId='teamtwotesting@gmail.com', body=event, maxAttendees=2, sendUpdates='all', sendNotifications=True).execute()
return print(f"The event(s) have been created at {time} on the {date}")
except:
print("A spooky thing happened. Please try again.")
def valid_date_checker(date, time):
"""
checks the valid date and time, and makes sure that they are valid.
as well as proper datetime.
If if is valid then returns starttime, endtime, valid_slot.
Else returns "Please enter a valid date and time"
"""
date = date.split("-")
time = time.split(":")
year, month, day = date[0], date[1], date[2]
hour, minute = time[0], time[1]
valid_check = False
while not valid_check:
try:
valid_slot = datetime.datetime(year=int(year), month=int(month), day=int(day), hour=int(hour), minute=int(minute))
valid_check = True
except:
print("Please enter a valid date and time")
starttime = f'{year}-{month}-{day}T{hour}:{minute}:00+02:00'
endtime = str(valid_slot + datetime.timedelta(minutes=30)).split(" ")
endtime = f'{endtime[0]}T{endtime[1]}+02:00'
return starttime, endtime, valid_slot
def create_event(date, time, summary, description, user_name, service):
"""
Gets the valid times, dates and summarys.
Creates the valid event to submit to the service for the calendar.
Checks if the timeslot is the 1 event slot, if it is create one.
Else if it is the 90 min slot, create 3x30min slots.
"""
starttime, endtime, valid_slot = valid_date_checker(date, time)
event = {
'summary': summary,
'location': '',
'description': description,
'start': {
'dateTime': starttime,
'timeZone': 'Africa/Johannesburg',
},
'end': {
'dateTime': endtime,
'timeZone': 'Africa/Johannesburg',
},
"hangoutLink": "https://meet.google.com/snz-hfvt-zuo?pli=1&authuser=0",
'attendees': [
{'email': f'{user_name}@student.wethinkcode.co.za'},
],
'reminders': {
'useDefault': False,
'overrides': [
{'method': 'email', 'minutes': 24 * 60},
{'method': 'popup', 'minutes': 10},
],
},
'anyoneCanAddSelf': True,
}
if time == '08:00' or time == '17:30':
insertion_of_event(service, event, date, time)
else:
for i in range(3):
starttime = str(valid_slot + datetime.timedelta(minutes=(i) * 30)).split(" ")
endtime = str(valid_slot + datetime.timedelta(minutes=(i + 1) * 30)).split(" ")
event["start"]["dateTime"] = f'{starttime[0]}T{starttime[1]}+02:00'
event["end"]["dateTime"] = f'{endtime[0]}T{endtime[1]}+02:00'
insertion_of_event(service, event, starttime[0], starttime[1])
return
def insert_event(command_params, service, user_name, table_data, full_time_list, cc_events, us_events):
"""
Seperates the command params into the slots they need if valid.
1. checks if they are valid slot. if False return.
2. checks if they user already has this slot. if False return.
3. checks if the user already has a meeting in their calendar. if False return.
4. if you passed all other checks, then create the event.
"""
date, time = validate_params(command_params[:2])
summary, description = meeting_setups(command_params[2:], user_name)
if validated_slot(table_data, date, time) == False:
print("Invalid time slot, please stick to the allotted times, please check the calendar.")
return
slots = user_pre_slotted(cc_events, user_name)
if already_booked(slots, date, time) == False:
print(f"You have already a time booked on '{date}' at '{time}'.")
return
if do_you_have_meetings(service, date, time, user_name) == False:
print("You already have a meeting at this time in your calendar.")
return
create_event(date, time, summary, description, user_name, service)
| {"/user_logging/__init__.py": ["/user_logging/login.py", "/user_logging/logout.py"], "/patient/__init__.py": ["/patient/insert.py", "/patient/delete.py"], "/clinician/__init__.py": ["/clinician/insert.py", "/clinician/delete.py", "/clinician/update.py"], "/clinic_calendars/__init__.py": ["/clinic_calendars/client_calendar.py"], "/api_calls/__init__.py": ["/api_calls/get_events.py"], "/user_logging/login.py": ["/api_calls/__init__.py"]} |
73,331 | yh2n/financial-analysis-v2 | refs/heads/master | /src/strats/threshold_momentum.py | import pandas as pd
import numpy as np
def threshold_momentum_returns(close_prices, hi_prices, threshold):
"""
Returns a series with returns on the days the strategy would
have sold.
Logic:
- Buy at close_t if return on close from t-1 to t exceeds `threshold`
- Sell at the high the following day (theoretical)
"""
close_to_close = close_prices.pct_change()
close_to_hi = (hi_prices - close_prices.shift()) / close_prices.shift()
bought_day_before = close_to_close.shift() >= threshold
return close_to_hi.where(bought_day_before, other=np.nan)
def threshold_momentum_limit_returns(
close_prices, hi_prices, threshold, limit):
"""
:returns: a series with the returns on the days the strategy would
have sold.
Logic:
- Buy at close_t if return on close from t-1 to t exceeds `threshold`
- Sell at either
- a return of `limit`
- the close the following day, if the return never reaches `limit`.
"""
close_to_close = close_prices.pct_change()
close_to_hi = (hi_prices - close_prices.shift()) / close_prices.shift()
holding = close_to_close.shift() >= threshold
sold_at_limit = (close_to_hi >= limit) & holding
sold_at_close = (close_to_hi < limit) & holding
returns = close_to_close.copy() * np.nan
returns[sold_at_limit] = limit
returns[sold_at_close] = close_to_close
return returns
def threshold_momentum_holdout_returns(
close_prices, hi_prices, threshold, limit):
"""
:returns: a series with the returns on the days the strategy would
have sold.
Logic:
- Buy at close_t if return on close from t-1 to t exceeds `threshold`
- Sell at either
- `limit` if return meets or exceeds `limit` the next day
- first nonnegative return thereafter
"""
close_to_close = close_prices.pct_change()
returns = pd.DataFrame(
np.nan, index=close_to_close.index, columns=close_to_close.columns)
drawdowns = pd.DataFrame(
np.nan, index=close_to_close.index, columns=close_to_close.columns)
for ticker in returns.columns:
bought_date = None
bought_price = None
closes = close_prices[ticker]
his = hi_prices[ticker]
for day, yesterday in zip(returns.index[1:], returns.index[:-1]):
if bought_date is not None:
close_return = (closes.loc[day] - bought_price) / bought_price
hi_return = (his.loc[day] - bought_price) / bought_price
target = limit if bought_date == yesterday else 0
# If it's day t+1 and target isn't hit during the day
# but the close return is non-negative, then close out
if hi_return >= target or close_return >= 0:
returns.loc[day, ticker] = target
bought_date = None
bought_price = None
else:
drawdowns.loc[day, ticker] = close_return
if (bought_date is None
and close_to_close.loc[day, ticker] >= threshold):
bought_date = day
bought_price = closes.loc[day]
return returns, drawdowns
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,332 | yh2n/financial-analysis-v2 | refs/heads/master | /__init__.py | from .fe_charts_django import * | {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,333 | yh2n/financial-analysis-v2 | refs/heads/master | /src/test/test_threshold_momentum.py | import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from strats.threshold_momentum import (
threshold_momentum_returns,
threshold_momentum_limit_returns,
threshold_momentum_holdout_returns)
def single_col_df(series):
return pd.DataFrame({'AAPL': series}, dtype=float)
class TestSellHighThresholdMomentum(unittest.TestCase):
def test_sell(self):
# Correct returns for single jump
close_prices = single_col_df([1, 2, 2])
hi_prices = single_col_df([1, 2, 4])
expected = single_col_df([np.nan, np.nan, 1.0])
returns = threshold_momentum_returns(close_prices, hi_prices, 0.05)
assert_frame_equal(returns, expected)
def test_no_sell(self):
# Returns nan series for no jumps
close_prices = single_col_df([1, 1, 1])
hi_prices = single_col_df([1, 10, 20])
expected = single_col_df([np.nan, np.nan, np.nan])
returns = threshold_momentum_returns(close_prices, hi_prices, 0.05)
assert_frame_equal(returns, expected)
def test_buy_sell(self):
# Buys and sells on the same day
close_prices = single_col_df([1, 2, 3, 3])
hi_prices = single_col_df([1, 2, 4, 6])
expected = single_col_df([np.nan, np.nan, 1.0, 1.0])
returns = threshold_momentum_returns(close_prices, hi_prices, 0.05)
assert_frame_equal(returns, expected)
def test_holding_at_end(self):
# Don't calculate any returns for days still holding at end
close_prices = single_col_df([1, 1, 2])
hi_prices = single_col_df([1, 2, 2])
expected = single_col_df([np.nan, np.nan, np.nan])
returns = threshold_momentum_returns(close_prices, hi_prices, 0.05)
assert_frame_equal(returns, expected)
class TestCloseoutThresholdMomentum(unittest.TestCase):
limit = 0.05
def test_hits(self):
# Returns only limit when limit exceeded
close_prices = single_col_df([1, 2, 2])
hi_prices = single_col_df([1, 2, 4])
expected = single_col_df([np.nan, np.nan, self.limit])
returns = threshold_momentum_limit_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
def test_closes_out(self):
# Sells at close when limit not hit
close_prices = single_col_df([1, 2, 1])
eps = 0.01
hi_below_limit = close_prices.iloc[1] * (1 + self.limit) - eps
hi_prices = single_col_df([1, 2, hi_below_limit])
expected = single_col_df([np.nan, np.nan, -.5])
returns = threshold_momentum_limit_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
def test_no_sell(self):
close_prices = single_col_df([1, 1, 1])
hi_prices = single_col_df([1, 2, 2])
expected = single_col_df([np.nan, np.nan, np.nan])
returns = threshold_momentum_limit_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
def test_buy_sell(self):
# Buys and sells on the same day
close_prices = single_col_df([1, 2, 3, 3])
hi_prices = single_col_df([1, 2, 4, 6])
expected = single_col_df([np.nan, np.nan, self.limit, self.limit])
returns = threshold_momentum_limit_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
def test_holding_at_end(self):
# Don't calculate any returns for days still holding at end
close_prices = single_col_df([1, 1, 2])
hi_prices = single_col_df([1, 2, 2])
expected = single_col_df([np.nan, np.nan, np.nan])
returns = threshold_momentum_limit_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
class TestHoldoutThresholdMomentum(unittest.TestCase):
limit = 0.05
def test_waits(self):
# Recovers 2 days after buying
close_prices = single_col_df([1, 2, 1, 2])
hi_prices = single_col_df([1, 2, 1, 2])
expected_returns = single_col_df([np.nan, np.nan, np.nan, 0])
expected_drawdowns = single_col_df([np.nan, np.nan, -0.5, np.nan])
returns, drawdowns = threshold_momentum_holdout_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected_returns)
assert_frame_equal(drawdowns, expected_drawdowns)
def test_hits(self):
# Hits day after buying
close_prices = single_col_df([1, 2, 1, 2])
hi_prices = single_col_df([1, 2, 3, 2])
expected_returns = single_col_df([np.nan, np.nan, self.limit, np.nan])
expected_drawdowns = single_col_df([np.nan, np.nan, np.nan, np.nan])
returns, drawdowns = threshold_momentum_holdout_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected_returns)
assert_frame_equal(drawdowns, expected_drawdowns)
def test_no_sell(self):
close_prices = single_col_df([1, 1, 1])
hi_prices = single_col_df([1, 2, 2])
expected = single_col_df([np.nan, np.nan, np.nan])
expected_drawdowns = single_col_df([np.nan, np.nan, np.nan])
returns, drawdowns = threshold_momentum_holdout_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
assert_frame_equal(drawdowns, expected_drawdowns)
def test_buy_sell(self):
close_prices = single_col_df([1, 2, 3, 3])
hi_prices = single_col_df([1, 2, 4, 6])
expected = single_col_df([np.nan, np.nan, self.limit, self.limit])
expected_drawdowns = single_col_df([np.nan, np.nan, np.nan, np.nan])
returns, drawdowns = threshold_momentum_holdout_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
assert_frame_equal(drawdowns, expected_drawdowns)
def test_holding_at_end(self):
# Don't calculate any returns for days still holding at end
close_prices = single_col_df([1, 1, 2])
hi_prices = single_col_df([1, 2, 2])
expected = single_col_df([np.nan, np.nan, np.nan])
expected_drawdowns = single_col_df([np.nan, np.nan, np.nan])
returns, drawdowns = threshold_momentum_holdout_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected)
assert_frame_equal(drawdowns, expected_drawdowns)
def test_sells_if_breakeven_during_day(self):
# Test case where doesn't sell next day, and day after closes
# at a loss but breaks even during the day (high >= buy_price)
close_prices = single_col_df([1, 2, 1, 1])
hi_prices = single_col_df([1, 2, 1, 2])
expected_returns = single_col_df([np.nan, np.nan, np.nan, 0])
expected_drawdowns = single_col_df([np.nan, np.nan, -0.5, np.nan])
returns, drawdowns = threshold_momentum_holdout_returns(
close_prices, hi_prices, 0.05, self.limit)
assert_frame_equal(returns, expected_returns)
assert_frame_equal(drawdowns, expected_drawdowns)
if __name__ == '__main__':
unittest.main()
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,334 | yh2n/financial-analysis-v2 | refs/heads/master | /src/utils/fama_french.py | import pandas as pd
from statsmodels.regression.linear_model import OLS
from statsmodels.api import add_constant
def read_monthly_ff_file(path):
data = pd.read_csv(path, index_col=0, parse_dates=True)
data.index = data.index.to_series().apply(_yearmonth_to_datetime)
data.index = data.index.to_period('M')
data.columns = data.columns.str.strip()
return data / 100
def read_daily_ff_file(path):
data = pd.read_csv(path, index_col=0, parse_dates=True)
data.columns = data.columns.str.strip()
return data / 100
def _yearmonth_to_datetime(yearmonth_int):
"""Takes a 'yearmonth' integer (so January 1970 would
be `197001`) and returns the `pd.Timestamp` for the first day
of that month.
Parameters
----------
yearmonth_int
Returns
-------
pd.Timestamp
"""
yearmonth = str(yearmonth_int)
year = int(yearmonth[:4])
month = int(yearmonth[-2:])
return pd.Timestamp(
year=year,
month=month,
day=1)
def run_aligned_ols(endog, exog):
endog, exog = _align_dfs(endog.dropna(), exog)
return OLS(endog, add_constant(exog)).fit()
def _align_dfs(df1, df2):
"""Subsets both df1 and df2 to have the same index.
Tolerant of unequal endpoints, but not of otherwise missing
index values.
NOTE: This may be useful enough to pull out into a more general
utils file at some point.
Returns
-------
df1, df2
Both along the same index, which has endpoints corresponding
to the tighter of the two.
"""
start = max(df1.index.min(), df2.index.min())
end = min(df1.index.max(), df2.index.max())
df1 = df1.loc[start:end]
df2 = df2.loc[start:end]
if not df1.index.equals(df2.index):
raise ValueError('Indexes do not contiguously overlap.')
return df1, df2
# The returns aggregators below may end up better suited in a more general
# utils file, but have so far only been used in the FF analyses.
def monthly_returns(series, is_return_series=True):
if is_return_series:
series = series + 1
grouped = series.groupby(series.index.to_period('M'))
if is_return_series:
return grouped.prod() - 1
return grouped.last() / grouped.first() - 1
def returns_over_periods(daily_returns, periods, is_return_series=True):
"""Calculates the total returns for each period in `periods`.
Parameters
----------
daily_returns : pd.Series
periods : list of pd.Timestamp slices
Returns
-------
pd.Series
The index corresponds to the endpoints of the `periods`; value is the
total returns within that period.
"""
daily_returns = daily_returns.loc[:periods[-1].stop]
period_end = daily_returns.index.to_series().apply(
lambda date: _period_end_for_date(date, periods))
return (1 + daily_returns).groupby(period_end).prod() - 1
def returns_over_periods_from_prices(daily_prices, periods):
"""As `returns_over_periods`, but calculates returns from `daily_prices`
first.
"""
period_end = daily_prices.index.to_series().apply(
lambda date: _period_end_for_date(date, periods))
return returns_over_periods(
daily_prices.groupby(period_end).pct_change(), periods)
def _period_end_for_date(date, periods):
for period in periods:
if date >= period.start and date < period.stop:
return period.stop
raise ValueError(f'Date {date} not in any period.')
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,335 | yh2n/financial-analysis-v2 | refs/heads/master | /src/evaluation/price_targets.py | def price_targets_test_df(
close_prices, estimates,
days_before_fiscal_end, return_window):
"""
Create a dataframe to be used in an analysis of price targets vs.
returns.
To try to approximate independent estimates, one estimate is chosen
per fiscal period per ticker. For consistency the estimate
`days_before_fiscal_end` days before the announcement is used.
Parameters
----------
close_prices
estimates:
Flat DataFrame of estimate data as returned from FactSet
days_before_fiscal_end : int
The number of days before earnings announcement to use to pick the
estimate to include.
return_window : int
Window following the estimate in which to calculate returns
Returns
-------
DataFrame with one row per (ticker, fiscalEndDate) pair. Each row
contains potential exog variables around price and returns, and endog
variables using the estimates such as high, low, mean, and deviation from
current price.
"""
grouped = estimates.groupby(['ticker', 'fiscalEndDate'])
df = grouped \
.tail(days_before_fiscal_end) \
.groupby(['ticker', 'fiscalEndDate']) \
.first()
# Convert to MultiIndex of (ticker, [dates])
prices = close_prices.stack().swaplevel().sort_index()
prices_by_ticker = prices.groupby(level=0)
returns = prices_by_ticker.pct_change().rolling(return_window).mean()
total_returns = prices_by_ticker.apply(_nday_total_return, return_window)
df = df.reset_index().set_index(['ticker', 'estimateDate'])
# Exog
df['price_realised'] = prices.shift(-days_before_fiscal_end)
df['price_at_estimate'] = prices
df['from_current'] = df['mean'] / df['price_at_estimate']
df['hi_sd'] = (df['high'] - df['mean']) / df['standardDeviation']
df['low_sd'] = (df['low'] - df['mean']) / df['standardDeviation']
df['sd_pct'] = df['standardDeviation'] / df['mean']
df['hi_sd'] = df['hi_sd'].fillna(0)
df['low_sd'] = df['low_sd'].fillna(0)
# Endog
df['mean_return'] = returns.shift(-return_window)
df['total_return'] = total_returns.shift(-return_window)
df['miss'] = df['price_realised'] / df['mean']
df = df[~df['mean_return'].isna()]
df['realised_diff'] = (df['price_realised'] / df['price_at_estimate']) - 1
df = df[df['estimateCount'] > 5]
return df
def _nday_total_return(series, n):
return (series / series.shift(n)) - 1
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,336 | yh2n/financial-analysis-v2 | refs/heads/master | /src/strats/tw_portfolio.py | import numpy as np
def n_day_periods(trading_days, offset, n):
"""Generate time periods of length n,
using `trading_days` as the calendar.
`offset` allows different period sets to be created from
the same calendar, even if the period duration is the same.
NOTE: In future, this may be better placed in a more general Portfolio
utils file but so far has only been used for the TW portfolio.
Parameters
----------
trading_days : pd.DateTimeIndex
offset : int
Number of days after start of `trading_days` from which
to begin generating the periods.
n : int
Duration of each period
Returns
-------
list of slices of pd.Timestamps, each spanning `n` days according
to `trading_days`
"""
period_endpoints = range(0, len(trading_days), n)
periods = zip(period_endpoints, period_endpoints[1:])
periods = [
slice(
trading_days[offset + period[0]],
trading_days[offset + period[1]])
for period in periods
if offset + period[1] < len(trading_days)]
return periods
def corr_to_benchmarks(returns, bench_returns, window):
"""Calculate the rolling correlation of all stocks in the `returns`
time series to each of the benchmarks in the `bench_returns`
series.
The result is in theory a 3D tensor, with dimensions
(num_benchmarks) x (num_tickers) x (num_days)
but is returned as a dictionary here for simplicity.
Parameters
----------
returns : pd.DataFrame
bench_returns : pd.DataFrame
window : int
Size of the rolling window
Returns
-------
dict
with keys corresponding to each benchmark, and values the rolling
correlations of all tickers in `returns` to that benchmark.
"""
corrs = {}
for name, series in bench_returns.items():
corrs[name] = returns.rolling(window) \
.corr(series.rolling(window))
return corrs
def quantile_corr_criteria(corrs1, corrs2, quantile):
"""Calculates the quantile-based Third Way criteria, which requires that
each qualifying stock has a magnitude of correlation to both value and
growth based indexes less than some threshold.
Parameters
----------
corrs1 : pd.DataFrame
Rolling correlations in time for each ticker to one of the
benchmark indexes.
corrs2 : pd.DataFrame
quantile : int
Returns
-------
pd.DataFrame
of shape (num_trading_days, num_tickers). Each row is a boolean series
of all tickers with truthy values for those satisfying the criteria
on that day.
"""
cutoffs1 = corrs1.abs().quantile(q=quantile, axis=1)
cutoffs2 = corrs2.abs().quantile(q=quantile, axis=1)
return corrs1.abs().lt(cutoffs1, axis=0) & \
corrs2.abs().lt(cutoffs2, axis=0)
def bottom_k_total_corr_criteria(corrs1, corrs2, k):
"""Similar signature to `quantile_corr_criteria`, but instead ensures a
fixed number of qualifying tickers on each day by computing the maximum
correlation magnitude of each ticker to either benchmark, and accepting
only those tickers with the `k` lowest scores.
Parameters
----------
corrs1 : pd.DataFrame
corrs2 : pd.DataFrame
k : int
Returns
-------
pd.DataFrame
"""
return np.maximum(corrs1.abs(), corrs2.abs()).rank(axis=1) < k
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,337 | yh2n/financial-analysis-v2 | refs/heads/master | /src/evaluation/threshold_momentum.py | import pandas as pd
def calculate_stats(returns, threshold, drawdowns=None):
thres_data = []
for tick in returns.columns:
all_moves = returns[tick].dropna()
thres_mean = all_moves.mean()
thres_std = all_moves.std()
thres_count = all_moves.count()
thres_sharpe = thres_mean / thres_std
stats = {
'ticker': tick, 'mean': thres_mean, 'threshold': threshold,
'sharpe': thres_sharpe, 'count': thres_count,
'count_pos': (all_moves > 0).sum(),
'count_neg': (all_moves < 0).sum(),
'std': thres_std
}
if drawdowns is not None:
# :TODO: add stats about drawdown durations
stats['mean_dd'] = drawdowns[tick].mean()
stats['count_dd'] = drawdowns[tick].count()
stats['max_dd'] = drawdowns[tick].min()
thres_data.append(stats)
return thres_data
def calculate_stats_for_thresholds(
close_prices, hi_prices, strategy_fn, *args):
all_stats = []
MOVE_THRESHOLDS = [0.02, 0.03, 0.04, 0.05]
for threshold in MOVE_THRESHOLDS:
returns = strategy_fn(close_prices, hi_prices, threshold, *args)
all_stats.extend(calculate_stats(returns, threshold))
return pd.DataFrame(all_stats)
def calculate_stats_with_drawdowns_for_thresholds(
close_prices, hi_prices, strategy_fn, *args):
"""
:TODO: Really duplicated with `calculate_stats_for_thresholds`
due to jamming in dropdown calculations for the one strategy
where it matters. Once we've cleaned up our strategy interfaces
this should be updated / removed.
"""
all_stats = []
MOVE_THRESHOLDS = [0.02, 0.03, 0.04, 0.05]
for threshold in MOVE_THRESHOLDS:
returns, drawdowns = strategy_fn(
close_prices, hi_prices, threshold, *args)
all_stats.extend(calculate_stats(
returns, threshold, drawdowns=drawdowns))
return pd.DataFrame(all_stats)
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,338 | yh2n/financial-analysis-v2 | refs/heads/master | /src/utils/whale_wisdom_cleaning.py | import pandas as pd
from collections import defaultdict
def clean_holdings(holdings):
holdings['quarter'] = pd.to_datetime(holdings['quarter'])
holdings.loc[:, 'stock_ticker'] = holdings['stock_ticker'].str.strip()
# Change . to - to match Tiingo format
holdings.loc[:, 'stock_ticker'] = holdings['stock_ticker'] \
.str.replace('.', '-', regex=False)
holdings = dedup_holdings(holdings)
holdings = filter_valid_positions(holdings)
return holdings
def resolve_holdings_and_fund_names(holdings, funds):
"""Fund lists downloaded from the website can sometimes have
different names to the holdings data, even though the ids are the
same. For anaylsis it's nicer to have them all together.
It may also be the case that the names change through time in the
holdings data, too.
Parameters
----------
holdings : pd.DataFrame
funds : pd.DataFrame
Collection of funds from the WW website filter.
Expects 'Filer' column to hold the filer name.
Returns
-------
(holdings, funds) with filer names reconciled.
"""
id_to_name = _filer_id_to_name_mapping(holdings, funds)
holdings.loc[:, 'filer_name'] = id_to_name[holdings['filer_id']].values
funds.loc[:, 'Filer'] = id_to_name[funds['filer_id']].values
return holdings, funds
def exclude_missing_prices(holdings, prices):
"""
Some tickers are supported on Tiingo, but they mean something different
to a stock ticker that was in the 13F.
Attempt to filter them by only accepting tickers on dates after they
were first available on Tiingo
"""
ticker_to_start = prices.apply(lambda s: s.first_valid_index())
valid_tickers = prices.columns.intersection(ticker_to_start.index)
holdings = holdings.loc[holdings['stock_ticker'].isin(valid_tickers)]
tiingo_start_dates = ticker_to_start[holdings['stock_ticker']].values
return holdings.loc[holdings['quarter'] >= tiingo_start_dates]
def dedup_holdings(holdings):
# Sometimes tickers are doubled up; sometimes the stock names are doubled
# up. There can be many reasons for this, but we make a best bet by taking
# the highest MV positions.
return holdings.sort_values('current_mv') \
.drop_duplicates(
subset=['filer_name', 'stock_ticker', 'quarter_id'], keep='last') \
.drop_duplicates(
subset=['filer_name', 'stock_name', 'quarter_id'], keep='last')
def filter_valid_positions(holdings):
holdings = holdings.loc[holdings['security_type'] == 'SH']
holdings = holdings.loc[holdings['position_change_type'] != 'soldall']
holdings = holdings.loc[holdings['current_shares'] != 0]
# Around 300 have current_mv < 0. ~6.6k have current-mv == 0
# As there may be an innocuous issue with WW's current_mv calc,
# don't throw away those equal to 0
holdings = holdings.loc[holdings['current_mv'] >= 0]
return filter_valid_tickers(holdings)
def filter_valid_tickers(holdings):
holdings = holdings.loc[~holdings['stock_ticker'].isna()]
has_numbers = holdings['stock_ticker'].str.contains('[0-9]')
is_empty = holdings['stock_ticker'].str.len() == 0
return holdings.loc[(~has_numbers) & (~is_empty)]
def filter_young_funds(holdings):
"""Only keep holdings for funds that have more than 2 years worth of
data in `holdings`. Not currently used in analyses.
"""
def quarters_alive(holdings):
return holdings.groupby('filer_name').apply(
lambda fund_holdings: len(fund_holdings['quarter_id'].unique()))
young = quarters_alive(holdings) < 8
return holdings[~holdings['filer_name'].isin(young[young].index)]
def _filer_id_to_name_mapping(holdings, fund_list):
name_to_id = defaultdict(set)
id_to_name = {}
for data, name_key in [(holdings, 'filer_name'), (fund_list, 'Filer')]:
unique_id_name_pairs = data[[name_key, 'filer_id']].drop_duplicates()
for _, pair in unique_id_name_pairs.iterrows():
name_to_id[pair[name_key]].add(pair['filer_id'])
id_to_name[pair['filer_id']] = pair[name_key]
assert all([len(v) == 1 for v in name_to_id.values()])
return pd.Series(id_to_name)
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,339 | yh2n/financial-analysis-v2 | refs/heads/master | /src/utils/get_target_prices.py | import os
import quandl
SOURCES = ['quandl']
QUANDL_API_KEY = os.environ.get('QUANDL_API_KEY', None)
def get_target_prices(source, tickers):
if source not in SOURCES:
raise NotImplementedError('Source {} not supported, only {} supported'.format(source, ', '.join(SOURCES)))
if source == 'quandl':
data = quandl.get_table('ZACKS/TP', ticker=tickers, api_key=QUANDL_API_KEY)
return data
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,340 | yh2n/financial-analysis-v2 | refs/heads/master | /fe_charts_django/views.py | from django.http import HttpResponse
from django.shortcuts import render
import sys
sys.path.append('./scripts')
import io
from chart_seasonality_example import rendering_charts
def home_page(request):
return render(request, "home.html")
# def get_new_charts(request):
# if request.method == 'POST':
# selected_ticker = request.POST.get('ticker')
# daily_img_path = "reports/charts/seasonality_daily_{selected_ticker}_2011-01-01_2020-12-31.jpg".format(selected_ticker=selected_ticker)
# monthly_img_path = "reports/charts/seasonality_monthly_{selected_ticker}_2011-01-01_2020-12-31.jpg".format(selected_ticker=selected_ticker)
# rendering_charts(selected_ticker)
# print(f"********** Getting {selected_ticker} charts **********")
# return render(request, "charts.html", {"daily_img_path": daily_img_path, "monthly_img_path": monthly_img_path})
def get_new_charts(request):
if request.method == 'POST':
selected_ticker = request.POST.get('ticker')
daily_img_path = "charts/seasonality_daily_{selected_ticker}_2011-01-01_2020-12-31.jpg".format(selected_ticker=selected_ticker)
monthly_img_path = "charts/seasonality_monthly_{selected_ticker}_2011-01-01_2020-12-31.jpg".format(selected_ticker=selected_ticker)
rendering_charts(selected_ticker)
print(f"********** Getting {selected_ticker} charts **********")
return render(request, "charts.html", {"daily_img_path": daily_img_path, "monthly_img_path": monthly_img_path})
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,341 | yh2n/financial-analysis-v2 | refs/heads/master | /scripts/chart_seasonality_example.py | import json
from datetime import datetime
from pathlib import Path
import io
import sys
sys.path.append('../src')
from src.utils.get_prices import get_prices
from src.charting.chart_seasonality import (chart_monthly_seasonality,
chart_cum_avg_daily_rtns)
SECRETS_PATH = Path('tiingo_secrets.json')
OUTPUT_PATH = Path('static/reports/charts')
Path('OUTPUT_PATH').mkdir(parents=True, exist_ok=True)
# if __name__ == '__main__':
# def rendering_charts(selected_ticker):
# with open(SECRETS_PATH) as f:
# api_key = json.load(f)['api_key']
# lookback = 10
# today = datetime.today()
# end = f'{today.year - 1}-12-31'
# start = f'{today.year - lookback}-01-01'
# # tk = 'SPY'
# tk = selected_ticker
# print(f"+++++++++++ Ticker: {tk} ++++++++++")
# prices = get_prices([tk], start, end, api_key=api_key)['adj_close'][tk]
# output1_name = f'seasonality_monthly_{tk}_{start}_{end}.jpg'
# output1_path = OUTPUT_PATH / output1_name
# output2_name = f'seasonality_daily_{tk}_{start}_{end}.jpg'
# output2_path = OUTPUT_PATH / output2_name
# print("Charting...")
# chart_monthly_seasonality(prices, output1_path)
# chart_cum_avg_daily_rtns(prices, output2_path)
# print("Done!")
def rendering_charts(selected_ticker):
with open(SECRETS_PATH) as f:
api_key = json.load(f)['api_key']
lookback = 10
today = datetime.today()
end = f'{today.year - 1}-12-31'
start = f'{today.year - lookback}-01-01'
tk = selected_ticker
print(f"+++++++++++ Ticker: {tk} ++++++++++")
prices = get_prices([tk], start, end, api_key=api_key)['adj_close'][tk]
output1_name = f'seasonality_monthly_{tk}_{start}_{end}.jpg'
output1_path = OUTPUT_PATH / output1_name
output2_name = f'seasonality_daily_{tk}_{start}_{end}.jpg'
output2_path = OUTPUT_PATH / output2_name
print("Charting...")
chart_monthly_seasonality(prices, output1_path)
chart_cum_avg_daily_rtns(prices, output2_path)
print("Done!")
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,342 | yh2n/financial-analysis-v2 | refs/heads/master | /src/evaluation/bootstrap.py | def bootstrap(returns, statistic_fn, size, num_samples):
"""
:returns: Series of returns to sample from
:statistic_fn: Function expecting a single series argument
and returning a scalar value
:size: Size of each bootstrap sample
:num_samples: Number of bootstrap sample to return
Returns a list of statistics calculated on each bootstrap sample
"""
returns = returns[~returns.isna()]
return [
statistic_fn(returns.sample(size, replace=True))
for _ in range(num_samples)
]
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,343 | yh2n/financial-analysis-v2 | refs/heads/master | /src/utils/whale_wisdom_api.py | import base64
import hashlib
import hmac
import os
import json
import requests
import time
import pandas as pd
from pathlib import Path
from urllib.parse import urlencode
API_ENDPOINT = 'https://whalewisdom.com/shell/command.json'
MAX_NUM_FILERS = 10
# Only request these columns from the API to reduce space usage
keep_cols = ['filer_id', 'filer_name', 'stock_id', 'stock_name',
'stock_ticker', 'security_type', 'shares_change',
'position_change_type', 'current_ranking', 'previous_ranking',
'current_percent_of_portfolio', 'previous_percent_of_portfolio',
'current_mv', 'previous_mv', 'current_shares', 'previous_shares',
'percent_ownership', 'avg_price', 'percent_change',
'quarter_id_owned', 'quarter', 'quarter_id', 'sector', 'industry']
# Requests require IDs, so these are the IDs of the above columns
keep_col_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 20, 25, 26, 27]
def make_holdings_request(filer_ids, quarter_ids, temp_dir=None):
"""Get holdings data for all `filer_ids` for all `quarter_ids`.
Internally breaks requests into chunks of `MAX_NUM_FILERS` filer_ids.
Passing `temp_dir` persists each batch to `temp_dir`, so that not all
is lost in the case of unexpected errors.
Parameters
----------
filer_ids : list of int
quarter_ids : list of int
temp_dir : None, optional
Directory to save intermediate results to.
Helpful in case of an unexpected error during a large fetch
Returns
-------
pd.DataFrame
"""
filer_ids = sorted(filer_ids)
all_dfs = []
for i in range(0, len(filer_ids), MAX_NUM_FILERS):
filer_chunk = filer_ids[i:i + MAX_NUM_FILERS]
df = _make_single_holdings_request(
filer_chunk, quarter_ids, temp_dir=temp_dir)
all_dfs.append(df)
print(f'Finished batch {i / MAX_NUM_FILERS}')
return pd.concat(all_dfs)
def make_quarters_request():
"""Simple request to fetch all quarters metadata.
"""
args = {'command': 'quarters'}
res = _make_ww_request(args)
_check_ww_response(res)
df = pd.DataFrame(res.json()['quarters'])
df['filing_period'] = pd.to_datetime(df['filing_period'])
return df.set_index('filing_period')
def _make_single_holdings_request(
filer_ids, quarter_ids, temp_dir=None):
"""Wrapped WW holdings request. Returns a DataFrame.
Accepts up to MAX_NUM_FILERS in filer_ids and any number of quarters.
Optionally save results a `temp_dir`.
Parameters
----------
See `make_holdings_request`.
Returns
-------
pd.DataFrame
Holdings data for each filer in `filer_ids` for each quarter
in `quarter_ids`. Each row corresponds to a single position for a fund
for a quarter.
"""
if len(filer_ids) > MAX_NUM_FILERS:
raise ValueError(
f'Cannot include more than {MAX_NUM_FILERS} filers in one request')
args = {
'command': 'holdings',
'filer_ids': filer_ids,
'quarter_ids': quarter_ids,
'columns': keep_col_ids
}
res = _make_ww_request(args)
_check_ww_response(res)
df = _process_holdings_results(res.json()['results'])
if temp_dir is not None:
if isinstance(temp_dir, str):
temp_dir = Path(temp_dir)
filer_key = f'{min(filer_ids)}-{max(filer_ids)}'
quarter_key = f'{min(quarter_ids)}-{max(quarter_ids)}'
fname = f'ww_filers_{filer_key}_quarters_{quarter_key}.csv'
df.to_csv(temp_dir / fname)
return df
def _make_ww_request(args):
"""
Returns
-------
requests.Response object
"""
if isinstance(args, dict):
args = json.dumps(args)
params = _ww_arguments(args)
params_str = urlencode(params, safe='/+=')
return requests.get(API_ENDPOINT, params=params_str)
def _ww_arguments(arguments):
"""Convert serialized arguments dict to the request parameters
that WW expects
Parameters
----------
arguments : str
"""
shared_key = os.getenv('WW_SHARED_KEY')
timestamp = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
sig = _ww_sig(arguments, timestamp)
return {
'args': arguments,
'api_shared_key': shared_key,
'api_sig': sig,
'timestamp': timestamp
}
def _ww_sig(arguments, timestamp):
"""Generate the WW signature string required for each request
Details: https://whalewisdom.com/shell/api_help
Parameters
----------
arguments : str
timestamp : str
"""
digest = hashlib.sha1
secret_key = os.getenv('WW_SECRET_KEY')
raw_args = arguments + '\n' + timestamp
hmac_hash = hmac.new(
secret_key.encode(), raw_args.encode(), digest).digest()
return base64.b64encode(hmac_hash).rstrip().decode()
def _check_ww_response(resp):
resp.raise_for_status()
if resp.text == 'Unknown error processing your command':
raise ValueError('Got unknown WhaleWisdom error with 200 status')
if 'errors' in resp:
raise ValueError(f'WhaleWisdom responded with error: {resp["errors"]}')
def _process_holdings_results(results_json):
"""
Parameters
----------
results_json : list
value of the 'results' entry from a valid response
from the WW holdings endpoint.
Contains a list of `filer_results`, dictionaries containing
holdings info for a filer in a quarter.
Returns
-------
pd.DataFrame
All extracted holdings of all filers.
"""
holdings_each_filing = [
_flatten_holdings_filing_record(filing_record)
for filer_results in results_json
for filing_record in filer_results['records']
]
flattened = [
position
for holdings in holdings_each_filing
for position in holdings
]
df = pd.DataFrame(flattened)
if not df.empty:
df['quarter'] = pd.to_datetime(df['quarter'])
return df
def _flatten_holdings_filing_record(holdings_result):
"""
Parameters
----------
holdings_result : dict
All holdings data for one filer for one quarter.
`holdings` key contains the actual stock holdings for
the quarter. Other keys represent quarter and fund
level data.
Quarter and fund level data is extracted and added to each holdings
data 'row' so that each final row has all relevant information.
Returns
-------
list of dicts
Each dict is data for a holding in that quarter.
"""
holdings = holdings_result['holdings']
other_items = holdings_result.copy()
del other_items['holdings']
for holding in holdings:
holding.update(other_items)
return holdings
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,344 | yh2n/financial-analysis-v2 | refs/heads/master | /src/test/test_portfolio.py | import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
from strats.portfolio import Portfolio
class TestPortfolio(unittest.TestCase):
days = ['2012-01-03', '2012-01-04', '2012-01-05']
prices = pd.DataFrame(
[{'AAL': 2.0, 'ZG': 10.0},
{'AAL': 1.0, 'ZG': 10.0},
{'AAL': 1.0, 'ZG': 10.0}],
index=days)
tickers = ['AAL', 'ZG']
def setUp(self):
self.p = Portfolio(self.prices)
def test_buy(self):
day = self.days[0]
self.p.buy(day, ['AAL'], self.prices.loc[day])
expected_pos = pd.Series({'AAL': 1.0})
expected_last_buys = pd.Series({'AAL': 2.0})
expected_buy_signals = pd.DataFrame({'AAL': 1.0}, index=[day])
assert_series_equal(self.p._positions, expected_pos)
assert_series_equal(self.p.last_buy_price, expected_last_buys)
assert_frame_equal(self.p.buy_signals, expected_buy_signals)
def test_sell(self):
day = self.days[0]
sell_prices = pd.Series([4.0, 10.0], index=self.tickers)
self.p.buy(day, ['AAL'], self.prices.loc[day])
self.p.sell(day, ['AAL'], sell_prices)
expected_pos = pd.Series(dtype=float)
expected_last_buys = pd.Series(dtype=float)
expected_buy_signals = pd.DataFrame({'AAL': 1.0}, index=[day])
expected_sell_signals = expected_buy_signals
expected_returns = pd.DataFrame({'AAL': [1.0]},
index=[day])
assert_series_equal(self.p._positions, expected_pos)
assert_series_equal(self.p.last_buy_price, expected_last_buys)
assert_frame_equal(self.p.buy_signals, expected_buy_signals)
assert_frame_equal(self.p.sell_signals, expected_sell_signals)
assert_frame_equal(self.p.returns, expected_returns)
def test_sell_next_day(self):
buy_day = self.days[0]
sell_day = self.days[1]
self.p.buy(buy_day, ['AAL'], self.prices.loc[buy_day])
self.p.sell(sell_day, ['AAL'], self.prices.loc[sell_day])
expected_pos = pd.Series(dtype=float)
expected_last_buys = pd.Series(dtype=float)
expected_buy_signals = pd.DataFrame({'AAL': 1.0}, index=[buy_day])
expected_sell_signals = pd.DataFrame({'AAL': 1.0}, index=[sell_day])
expected_returns = pd.DataFrame({'AAL': -0.5}, index=[sell_day])
assert_series_equal(self.p._positions, expected_pos)
assert_series_equal(self.p.last_buy_price, expected_last_buys)
assert_frame_equal(self.p.buy_signals, expected_buy_signals)
assert_frame_equal(self.p.sell_signals, expected_sell_signals)
assert_frame_equal(self.p.returns, expected_returns)
def test_buy_consecutive(self):
buy_day = self.days[0]
buy_day2 = self.days[1]
self.p.buy(buy_day, ['AAL'], self.prices.loc[buy_day])
self.p.buy(buy_day2, ['ZG'], self.prices.loc[buy_day2])
expected_pos = pd.Series({'AAL': 1.0, 'ZG': 1.0})
expected_last_buys = pd.Series({'AAL': 2.0, 'ZG': 10.0})
expected_buy_signals = pd.DataFrame(
[{'AAL': 1.0, 'ZG': np.nan},
{'AAL': np.nan, 'ZG': 1.0}], index=[buy_day, buy_day2])
assert_series_equal(self.p._positions, expected_pos)
assert_series_equal(self.p.last_buy_price, expected_last_buys)
assert_frame_equal(self.p.buy_signals, expected_buy_signals)
def test_buy_multiple(self):
buy_day = self.days[0]
prices = pd.Series([2.0, 10.0], index=self.tickers)
self.p.buy(buy_day, ['AAL', 'ZG'], prices)
expected_pos = pd.Series({'AAL': 1.0, 'ZG': 1.0})
expected_last_buys = pd.Series({'AAL': 2.0, 'ZG': 10.0})
expected_buy_signals = pd.DataFrame(
{'AAL': 1.0, 'ZG': 1.0}, index=[buy_day])
assert_series_equal(self.p._positions, expected_pos)
assert_series_equal(self.p.last_buy_price, expected_last_buys)
assert_frame_equal(self.p.buy_signals, expected_buy_signals)
def test_sell_multiple(self):
buy_day = self.days[0]
sell_day = self.days[1]
self.p.buy(buy_day, ['AAL', 'ZG'], self.prices.loc[buy_day])
self.p.sell(sell_day, ['AAL', 'ZG'], self.prices.loc[sell_day])
expected_pos = pd.Series(dtype=float)
expected_last_buys = pd.Series(dtype=float)
expected_buy_signals = pd.DataFrame(
{'AAL': 1.0, 'ZG': 1.0}, index=[buy_day])
expected_sell_signals = pd.DataFrame(
{'AAL': 1.0, 'ZG': 1.0}, index=[sell_day])
expected_returns = pd.DataFrame(
{'AAL': -0.5, 'ZG': 0.0}, index=[sell_day])
assert_series_equal(self.p._positions, expected_pos)
assert_series_equal(self.p.last_buy_price, expected_last_buys)
assert_frame_equal(self.p.buy_signals, expected_buy_signals)
assert_frame_equal(self.p.sell_signals, expected_sell_signals)
assert_frame_equal(self.p.returns, expected_returns)
def test_hold_buy(self):
# Buy on day 1 and hold for the rest of the period
day = self.days[0]
self.p.buy(day, ['AAL'], self.prices.loc[day])
expected_pos = pd.Series({'AAL': 1.0})
expected_last_buys = pd.Series({'AAL': 2.0})
expected_buy_signals = pd.DataFrame({'AAL': 1.0}, index=[day])
expected_returns = pd.DataFrame(
{'AAL': [0.0, 0.0]}, index=self.days[1:])
expected_holding_returns = pd.DataFrame(
{'AAL': [-0.5, -0.5]}, index=self.days[1:])
expected_days_held = pd.Series({'AAL': 2})
assert_series_equal(self.p._positions, expected_pos)
assert_series_equal(self.p.last_buy_price, expected_last_buys)
assert_frame_equal(self.p.buy_signals, expected_buy_signals)
assert_frame_equal(self.p.returns, expected_returns)
assert_frame_equal(self.p.holding_returns, expected_holding_returns)
assert_series_equal(self.p.days_held, expected_days_held)
def test_tick_diff_buys(self):
day = self.days[0]
day2 = self.days[1]
day3 = self.days[2]
self.p.buy(day, ['AAL'], self.prices.loc[day])
self.p.buy(day2, ['ZG'], self.prices.loc[day2])
expected_pos = pd.Series({'AAL': 1.0, 'ZG': 1.0})
expected_last_buys = pd.Series({'AAL': 2.0, 'ZG': 10.0})
expected_buy_signals = pd.DataFrame(
[{'AAL': 1.0, 'ZG': np.nan},
{'AAL': np.nan, 'ZG': 1.0}], index=[day, day2])
expected_returns = pd.DataFrame(
[{'AAL': 0.0, 'ZG': np.nan},
{'AAL': 0.0, 'ZG': 0.0}], index=[day2, day3])
expected_holding_returns = pd.DataFrame(
[{'AAL': -0.5, 'ZG': np.nan},
{'AAL': -0.5, 'ZG': 0.0}], index=[day2, day3])
expected_days_held = pd.Series({'AAL': 2, 'ZG': 1})
assert_series_equal(self.p._positions, expected_pos)
assert_series_equal(self.p.last_buy_price, expected_last_buys)
assert_frame_equal(self.p.buy_signals, expected_buy_signals)
assert_frame_equal(self.p.returns, expected_returns)
assert_frame_equal(self.p.holding_returns, expected_holding_returns)
assert_series_equal(
self.p.days_held, expected_days_held, check_names=False)
if __name__ == '__main__':
unittest.main()
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,345 | yh2n/financial-analysis-v2 | refs/heads/master | /src/test/test_close_to_rolling_high.py | import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from strats.close_to_rolling_high import (
ctrh_returns,
close_to_rolling_high)
def single_col_df(series):
return pd.DataFrame({'AAPL': series}, dtype=float)
class TestCloseToRollingHigh(unittest.TestCase):
def test_sell(self):
close_prices = single_col_df(
[1, 1, 1, 1, 1] + [1, 1])
hi_prices = single_col_df(
[1, 1, 1.5, 1, 1] + [1, 2])
ctrh = close_to_rolling_high(close_prices, hi_prices)
expected_ctrh = single_col_df(
([np.nan] * 5) + [0.5, 1.0])
assert_frame_equal(ctrh, expected_ctrh)
portfolio = ctrh_returns(close_prices, hi_prices, 1)
expected = pd.DataFrame({'AAPL': 0.5}, index=[6])
assert_frame_equal(
portfolio.returns, expected)
def test_holds_right_stock(self):
close_prices = pd.DataFrame({
'AAPL': [1] * 5 + [1, 2, 2],
'AAPL_2': [1] * 5 + [1, 1, 1]
})
hi_prices = pd.DataFrame({
'AAPL': [1, 1, 1.5, 1, 1] + [1, 2, 3],
'AAPL_2': [1] * 5 + [1, 3, 1]
})
ctrh = close_to_rolling_high(close_prices, hi_prices)
expected_ctrh = pd.DataFrame({
'AAPL': ([np.nan] * 5) + [0.5, 1.0, 2.0],
'AAPL_2': ([np.nan] * 5) + [0.0, 2.0, 2.0]
})
assert_frame_equal(ctrh, expected_ctrh)
portfolio = ctrh_returns(close_prices, hi_prices, 1)
expected = pd.DataFrame(
[{'AAPL': 0.5, 'AAPL_2': np.nan},
{'AAPL': np.nan, 'AAPL_2': 0.0}], index=[6, 7])
assert_frame_equal(
portfolio.returns, expected)
def test_picks_new_stock(self):
# Buy stock 1 on day 5 due to 1 -> 1.5 close to rolling high
# then sell on 1st day of 2nd week (day 6) as returns are 100% > 50%.
stock1_wk1_close = [1, 1, 1, 1, 1]
stock1_wk2_close = [1, 2, 1]
stock1_wk1_hi = [1, 1, 1.5, 1, 1]
stock1_wk2_hi = [1, 2, 1]
# On day 6 we can buy again after selling.
# rolling close-to-high of stock 1 is going to be
# 50% (day 2 close vs high over days 2-7)
# while stock 2's is going to be 200%
# (due to the high on day 5).
# So stock 2 will be bought on day 6 at a price of 0.5
# and then sold the following day.
stock2_wk1_close = [1, 1, 1, 1, 1]
stock2_wk2_close = [1, 0.5, 1.5]
stock2_wk1_hi = [1, 1, 1, 1, 1]
stock2_wk2_hi = [1, 3, 1.5]
close_prices = pd.DataFrame({
'AAPL': stock1_wk1_close + stock1_wk2_close,
'AAPL_2': stock2_wk1_close + stock2_wk2_close
})
hi_prices = pd.DataFrame({
'AAPL': stock1_wk1_hi + stock1_wk2_hi,
'AAPL_2': stock2_wk1_hi + stock2_wk2_hi,
})
ctrh = close_to_rolling_high(close_prices, hi_prices)
expected_ctrh = pd.DataFrame({
'AAPL': ([np.nan] * 5) + [0.5, 1.0, 1.0],
'AAPL_2': ([np.nan] * 5) + [0.0, 2.0, 2.0]
})
assert_frame_equal(ctrh, expected_ctrh)
portfolio = ctrh_returns(close_prices, hi_prices, 1)
expected = pd.DataFrame(
[{'AAPL': 0.5, 'AAPL_2': np.nan},
{'AAPL': np.nan, 'AAPL_2': 2.0}], index=[6, 7])
assert_frame_equal(
portfolio.returns, expected)
if __name__ == '__main__':
unittest.main()
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,346 | yh2n/financial-analysis-v2 | refs/heads/master | /src/strats/portfolio.py | import pandas as pd
from collections import defaultdict
def union_add(this, that):
"""Adds the values in `that` to `this`,
filling with 0 wherever the index does not exist in
either.
Parameters
----------
this, that : pd.Series
"""
return this.add(that, fill_value=0)
def validate_tickers(tickers):
tickers = pd.Index(tickers).sort_values()
if tickers.has_duplicates:
raise ValueError('Duplicate tickers provided.')
return tickers
def validate_prices(tickers, prices):
if not tickers.isin(prices.index).all():
raise ValueError('Prices not provided for all tickers.')
def validate_weights(tickers, weights):
if isinstance(weights, (float, int)):
return pd.Series(weights, index=tickers)
elif not tickers.identical(weights.index):
if weights.index.sort_values().identical(tickers):
return weights.sort_index()
else:
raise ValueError('Weights index does not match tickers.')
return weights
def dict_to_df(dicto):
return pd.DataFrame(dicto.values(), index=dicto.keys())
def valid_range(df):
valid_dates = slice(df.first_valid_index(),
df.last_valid_index())
return df.loc[valid_dates]
class Portfolio:
def __init__(self, close_prices):
self._trading_days = close_prices.index.to_series()
self._close_prices = close_prices
self._positions = pd.Series(dtype=float)
self.last_buy_price = pd.Series(dtype=float)
self._returns = defaultdict(lambda: pd.Series(dtype=float))
self._buy_signals = defaultdict(lambda: pd.Series(dtype=float))
self._sell_signals = defaultdict(lambda: pd.Series(dtype=float))
self._holdings = defaultdict(lambda: pd.Series(dtype=float))
self._last_buy_prices = defaultdict(lambda: pd.Series(dtype=float))
def buy(self, day, tickers, prices, weights=1.0):
if not isinstance(weights, (int, float)) or weights != 1.0:
raise ValueError('Fractional buying not yet implemented.')
tickers = validate_tickers(tickers)
if tickers.empty:
return
validate_prices(tickers, prices)
weights = validate_weights(tickers, weights)
self._positions = union_add(self._positions, weights)
self.last_buy_price = union_add(
self.last_buy_price, prices[tickers] * weights)
self._buy_signals[day] = union_add(self._buy_signals[day], weights)
next_day = self._next_trading_day(day)
if next_day:
self._holdings[next_day] = self._positions.copy()
self._last_buy_prices[day] = self.last_buy_price.copy()
def sell(self, day, tickers, prices, weights=1.0):
if not isinstance(weights, (int, float)) or weights != 1.0:
raise ValueError('Fractional selling not yet implemented.')
tickers = validate_tickers(tickers)
if tickers.empty:
return
validate_prices(tickers, prices)
weights = validate_weights(tickers, weights)
self._positions[tickers] -= weights
returns = (prices[tickers] / self.last_buy_price[tickers]) - 1
self._returns[day] = union_add(self._returns[day], returns * weights)
self._sell_signals[day] = union_add(self._sell_signals[day], weights)
closed_positions = self.tickers_held[self._positions == 0]
self._positions.drop(closed_positions, inplace=True)
self.last_buy_price.drop(closed_positions, inplace=True)
next_day = self._next_trading_day(day)
if next_day:
self._holdings[next_day] = self._positions.copy()
def _next_trading_day(self, day):
"""Next trading day, according to self._trading_days as extracted
from the price series.
Used for cases where an action should be associated with the following
day for some bookkeeping. E.g. `self.holdings` returns what was held
at the beginning of a day, so should reflect buys from the
previous day.
Returns
-------
pd.Timestamp, None:
None if next day is not in the period.
"""
next_day = self._trading_days.shift(-1)[day]
return next_day if not pd.isnull(next_day) else None
def _tdays_between(self, day1, day2):
return self._trading_days.index.get_loc(day2) - \
self._trading_days.index.get_loc(day1)
@property
def tickers_held(self):
return self._positions.index.sort_values()
@property
def returns(self):
return dict_to_df(self._returns)
@property
def daily_returns(self):
all_returns = self._close_prices.pct_change()[self.holdings.columns]
daily_returns = self.holdings * all_returns
return daily_returns.mean(axis=1).dropna()
@property
def holdings(self):
holdings = dict_to_df(self._holdings) \
.reindex(self._trading_days, method='ffill')
return valid_range(holdings)
@property
def holding_returns(self):
"""At each time step, for positions held at that time,
the returns that would be realised if they were sold at that
point.
NOTE: Does not currently reflect different position weights.
Returns
-------
pd.DataFrame
of all trading days, for each ticker ever held over the backtest.
Entries are NaN for tickers not held on those days.
"""
buy_prices = dict_to_df(self._last_buy_prices) \
.reindex(self._trading_days, method='ffill')
holding_returns = (self.holdings * self._close_prices / buy_prices) - 1
return valid_range(holding_returns)[buy_prices.columns]
@property
def buy_signals(self):
return dict_to_df(self._buy_signals)
@property
def sell_signals(self):
return dict_to_df(self._sell_signals)
@property
def days_held(self):
return self.tickers_held.to_series() \
.apply(lambda tick: self._tdays_between(
self.buy_signals[tick].last_valid_index(),
self.holdings[tick].last_valid_index()))
def stats(self):
returns = self.daily_returns
mean = returns.mean()
stddev = returns.std()
return {
'mean': mean,
'stddev': stddev,
'sharpe': mean / stddev * (252 ** 0.5),
'num_traded_days': (~self.sell_signals.isna()).any(axis=1).sum()
}
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,347 | yh2n/financial-analysis-v2 | refs/heads/master | /src/strats/sharpe_rollover.py | import logging
import numpy as np
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar
from strats.portfolio import Portfolio
log = logging.getLogger('sharpe_rollover')
def sharpe(returns, freq='daily', verbose=False):
"""Calculate the annualized Sharpe ratio.
Parameters
----------
returns : pd.Series
freq : str, optional
Either 'daily' or 'monthly', to determine
annualizing factor.
"""
annualize_factor = None
if freq == 'daily':
annualize_factor = 252
elif freq == 'monthly':
annualize_factor = 12
else:
raise ValueError("freq must be one of 'daily' or 'monthly'")
if (~returns.isna()).sum() == 1:
if verbose:
log.warn(
'Sharpe undefined because there was only one '
'traded day in window.')
return np.nan
elif (returns[~returns.isna()] == 0).all():
if verbose:
log.warn('Sharpe undefined because all returns were 0.')
return np.nan
return np.mean(returns) / np.std(returns) * (annualize_factor ** 0.5)
def next_business_day(date):
"""
Parameters
----------
date : pd.Timestamp, str
Returns
-------
pd.Timestamp
`date`, if `date` is on a business day. Otherwise
the following business day.
"""
return pd.tseries.offsets.CustomBusinessDay(
n=1, calendar=USFederalHolidayCalendar()).rollforward(date)
def prev_business_day(date):
"""
Parameters
----------
date : pd.Timestamp, str
Returns
-------
pd.Timestamp
`date`, if `date` is on a business day. Otherwise
the previous business day.
"""
return pd.tseries.offsets.CustomBusinessDay(
n=1, calendar=USFederalHolidayCalendar()).rollback(date)
def get_trade_dates(start, end, step):
out = []
day = start
while day < end:
day = next_business_day(day)
out.append(day)
day += step
return out
def sharpe_rollover_returns(
close_prices, window_duration, hold_duration, top_k):
""""Sharpe rollover" strategy:
- Take equal-weighted positions in the `top_k` tickers
with the highest Sharpe ratio over the past `window_duration`.
- After `hold_duration`, exit all positions and enter again as
above.
Parameters
----------
close_prices : pd.Series
window_duration : pd.DateOffset
hold_duration : pd.DateOffset
top_k : int
Returns
-------
portfolio: Portfolio
"""
start = close_prices.index[0] + window_duration
end = close_prices.index[-1]
trade_dates = get_trade_dates(start, end, hold_duration)
p = Portfolio(close_prices)
for i, rollover in enumerate(trade_dates):
if not p.tickers_held.empty:
p.sell(rollover, p.tickers_held, close_prices.loc[rollover])
window_begin = prev_business_day(rollover - window_duration)
current_sharpes = close_prices.loc[window_begin:rollover] \
.pct_change().apply(sharpe).sort_values(ascending=False)
top_scorers = current_sharpes.head(top_k).index
p.buy(rollover, top_scorers, close_prices.loc[rollover])
return p
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,348 | yh2n/financial-analysis-v2 | refs/heads/master | /src/utils/load_bucket_prices.py | import pandas as pd
from pathlib import Path
from .get_prices import get_prices
DATA_PATH = Path('data/raw')
BASKET_PATH = Path('data/baskets')
BASKET_NAME = 'scorecard_single_ticker'
TICKER_PATH = BASKET_PATH / '{}.csv'.format(BASKET_NAME)
def load_bucket_prices(project_root, start, end, data_source='tiingo',
basket=BASKET_NAME, api_key=None):
filename = f'prc_{basket}_{start}_{end}_{data_source}.csv'
if isinstance(project_root, str):
project_root = Path(project_root)
filepath = project_root / DATA_PATH / filename
ticker_path = BASKET_PATH / f'{basket}.csv'
tickers = pd.read_csv(project_root / ticker_path, header=None,
names=['Ticker'], squeeze=True)
if Path(filepath).exists():
print("Found existing data file. Reading...")
df = pd.read_csv(filepath, header=[0, 1], index_col=0,
parse_dates=True)
print("Data read from:", filepath)
else:
print(f'No existing file found. Fetching data for \
{len(tickers)} tickers...')
df = get_prices(tickers, start, end,
data_source=data_source, api_key=api_key)
df.to_csv(filepath)
print("Results saved to:", filepath)
return df
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,349 | yh2n/financial-analysis-v2 | refs/heads/master | /scripts/threshold_momentum_buckets.py | import argparse
import pandas as pd
import numpy as np
from datetime import datetime
from src.strats.threshold_momentum import threshold_momentum_returns
from src.utils.load_bucket_prices import load_bucket_prices
def bucket_stats(all_returns, threshold):
pos_marks = [0, .01, 0.05, .1]
neg_marks = [-x for x in pos_marks]
out = []
for tick in all_returns.columns:
returns = all_returns[tick].dropna()
results = {'Ticker': tick, 'Threshold': threshold,
'Count': returns.shape[0]}
for mark in neg_marks[::-1]:
key = f'Pr(return < {int(mark * 100)}%)'
results[key] = np.mean(returns < mark)
for mark in pos_marks:
key = f'Pr(return >= {int(mark * 100)}%)'
results[key] = np.mean(returns >= mark)
out.append(results)
return out
def bucket_returns_for_thresholds(close_prices, hi_prices):
all_stats = []
MOVE_THRESHOLDS = [0.02, 0.03, 0.04, 0.05]
for threshold in MOVE_THRESHOLDS:
returns = threshold_momentum_returns(
close_prices, hi_prices, threshold)
all_stats.extend(bucket_stats(returns, threshold))
close_to_hi = (hi_prices - close_prices.shift()) / close_prices.shift()
all_stats.extend(bucket_stats(close_to_hi, 'All'))
return pd.DataFrame(all_stats)
if __name__ == '__main__':
start = '2016-01-01'
parser = argparse.ArgumentParser()
parser.add_argument('--today', action='store_const', const=str(
datetime.today().date()), default='2021-02-11')
args = parser.parse_args()
end = args.today
output_name = f'threshold_momentum_returns_{start}_to_{end}.csv'
prices = load_bucket_prices('.', start, end)
buckets = bucket_returns_for_thresholds(
prices['adj_close'], prices['adj_high'])
buckets.round(decimals=4).sort_values(['Ticker', 'Threshold']).to_csv(
output_name, index=False)
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,350 | yh2n/financial-analysis-v2 | refs/heads/master | /src/utils/get_prices.py | import os
import time
import pandas_datareader.data as web
import pandas as pd
import numpy as np
__version__ = '0.0.3'
def _tiingo_type_mapper(typestr):
out = ''
for c in typestr:
if c.isupper():
out += '_'
c = c.lower()
out += c
return out
SOURCES = ['yahoo', 'tiingo']
VALID_TYPES = {
'yahoo': ['Adj Close', 'Close', 'High', 'Low', 'Open', 'Volume'],
'tiingo': ['close', 'high', 'low', 'open', 'volume', 'adjClose', 'adjHigh',
'adjLow', 'adjOpen', 'adjVolume', 'divCash', 'splitFactor']
}
TYPE_MAPPERS = {
'yahoo': lambda typestr: typestr.replace(' ', '_').lower(),
'tiingo': _tiingo_type_mapper
}
def get_prices(tickers,
start,
end,
types=None,
data_source='tiingo',
out_path=None,
sort_tks=False,
api_key=None):
"""Download prices from external source.
Args:
tickers (str or list): The tickers to be downloaded.
start, end (str): The start date and end date of target period.
types: The price type(s) to download. If not specified will download
all types.
data_source: The data source to use for downloading.
See pandas_datareader doc.
out_path: If specified, the results will be saved to specified path.
sort_tks: If the tickers in result should be sorted.
api_key: If specified will use provided api_key.
Returns:
pandas.DataFrame
"""
if isinstance(tickers, str):
tickers = [tickers]
if isinstance(types, str):
types = [types]
if (sort_tks):
tickers = sorted(tickers)
if data_source not in SOURCES:
raise ValueError(
'data_source must be one of {SOURCES}.'
)
print(f'Downloading prices from {data_source.capitalize()}...')
df = get_prices_from_source(tickers, start, end, data_source, types,
api_key)
if out_path is not None:
try:
df.to_csv(out_path)
print("Results saved to: ", out_path)
except (IOError, PermissionError):
Warning("Failed to output to file!")
print("Download finished.")
return df
def get_tiingo_prices(tickers, start, end, api_key=None):
"""Wrapper to fetch Tiingo prices.
Naively using pandas_datareader doesn't work, since if a ticker in
`tickers` has no data over `start`-`end`, a `KeyError` exception is
raised.
Two things must be done:
1. The problem exception must be handled
2. Tickers must be fetched one-at-a-time to prevent missing data
in batches that contain a ticker with no data.
For future, the actual fix would be a simple PR to make to the
pandas_datareader repo.
Returns
-------
pd.DataFrame
"""
all_results = []
if api_key is None:
api_key = os.getenv('TIINGO_API_KEY')
# Sort tickers so that error logging can be used to identify progress
tickers = sorted(tickers)
for i, ticker in enumerate(tickers):
try:
df = web.DataReader(name=ticker,
data_source='tiingo',
start=start,
end=end,
api_key=api_key)
df = df[['adjClose']]
except KeyError as e:
if e.args[0] == 'date':
# Patch to handle issue in pandas_datareader
# where empty results cause a KeyError
print(f'Got empty df for i={i}, ticker={tickers[i]}')
df = pd.DataFrame()
except Exception as e:
print('Received an unexpected error:', e)
print(f'Only fetched up to {i-1} inclusive. Returning.')
return pd.concat(all_results)
if (i % 50 == 0) and i > 0:
# Sleep to avoid timeouts. Empirically found 20s to be sufficient
time.sleep(20)
all_results.append(df)
return pd.concat(all_results)
def get_prices_from_source(tickers, start, end, source, types=None,
api_key=None):
"""Download daily prices from Yahoo!."""
if types is not None and not all(i in VALID_TYPES[source] for i in types):
raise ValueError(
f"Wrong 'types' provided for source {source}. Must be chosen from "
f'{VALID_TYPES[source]}.')
params = {}
if source == 'tiingo':
df = get_tiingo_prices(tickers, start, end, api_key)
else:
df = web.DataReader(name=tickers,
data_source=source,
start=start,
end=end,
**params)
df = df.rename(mapper=TYPE_MAPPERS[source], axis=1)
if source == 'tiingo':
df = df.unstack(level=0)
if df.empty:
return df
df.index.name = 'date'
df.columns.names = ['attributes', 'symbols']
# hardcoded 1 day before inception dates(for fixing yahoo data)
inception_dates = {
'DOMO': '2018-06-28',
'PS': '2018-05-16',
'SMAR': '2018-04-26',
'TWLO': '2016-06-22',
'ZUO': '2018-04-11',
'MB': '2015-06-21',
'GDDY': '2015-04-15',
'HDP': '2014-12-14',
'SHOP': '2015-05-21',
'TEAM': '2015-12-15',
'PD': '2019-04-11'
}
# fix inception dates
for tk in tickers:
if tk in inception_dates:
df.loc[:inception_dates[tk], pd.IndexSlice[:, tk]] = np.nan
# filter types if provided
if types is not None:
df = df[types]
df = df.apply(_print_and_fill_gaps)
# QC: send warnings if no data
df.apply(lambda i: print("WARNING: ", i.name,
"has no data during the selected period!")
if i.isna().all() else None)
return df
def _print_and_fill_gaps(series):
if series.isna().all():
return series
s = series.copy()
trading_idx = s.loc[~s.isna()].index
first_day = min(trading_idx)
last_day = max(trading_idx)
s_trading = s[first_day:last_day]
if s_trading.isna().any():
print("Gaps found and filled in ", s.name, " :")
print(s_trading[s_trading.isna()].index.strftime('%Y%m%d').tolist())
s[first_day:last_day] = s[first_day:last_day].fillna(method='ffill')
return s
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,351 | yh2n/financial-analysis-v2 | refs/heads/master | /src/utils/whale_wisdom_utils.py | import numpy as np
import pandas as pd
def get_fehf_holdings(holdings, fehf_ids, use_names=False):
"""Get just the holdings data corresponding to the FEHFs, as
determined by `fehf_ids`.
`fehf_ids` could also be a collection of names, in which case
`use_names` should be set to True
Parameters
----------
holdings : pd.DataFrame
fehf_ids : list of int or list of str
IDs or strings to identify the FEHFs
use_names : bool, default False
Treat `fehf_ids` as a collection of names
Returns
-------
pd.DataFrame
Holdings of the FEHF funds
"""
key = 'filer_name' if use_names else 'filer_id'
fehf_holdings = holdings.loc[holdings[key].isin(fehf_ids)]
missing_ids = set(fehf_ids) - set(fehf_holdings[key])
missing_names = holdings.loc[
holdings[key].isin(missing_ids), 'filer_name'].unique()
print(f'The following names are FEHF but not in holdings: {missing_names}')
return fehf_holdings
def median_holdings_each_quarter(holdings):
return holdings \
.groupby(['filer_name', 'quarter']) \
.count()['filer_id'] \
.groupby('quarter') \
.median()
def median_size_each_quarter(holdings):
return holdings \
.groupby(['filer_name', 'quarter']) \
['current_percent_of_portfolio'].median() \
.groupby('quarter') \
.median()
def pct_in_top_k(holdings, k):
def _fn(filer_quarter):
topk = filer_quarter[filer_quarter['current_ranking'] < k]
return topk['current_percent_of_portfolio'] \
.sort_values() \
.sum()
return holdings \
.groupby(['filer_name', 'quarter']) \
.apply(_fn)
def turnover(holdings):
"""Calculate ticker-based turnover metric for each fund for each quarter.
Only ticker-based. So regardless of the position size, it wont be
counted as turned-over if it is still in the holdings. Values from this
will likely be higher than whatever is ideal.
Parameters
----------
holdings : pd.DataFrame
Returns
-------
pd.Series
index is a pd.MultiIndex of (filer_name, quarter). Values are turnover,
which is in [0, 1].
"""
def _fn(all_for_fund):
by_quarter = all_for_fund.groupby('quarter')['stock_ticker']
out = pd.Series(dtype=float)
last_q_holdings = None
current_q_holdings = None
for date, quarter_holdings in by_quarter:
current_q_holdings = set(quarter_holdings)
if last_q_holdings is not None:
out[date] = len(last_q_holdings - current_q_holdings)
out[date] /= len(last_q_holdings)
else:
out[date] = np.nan
last_q_holdings = current_q_holdings
return out
return holdings.groupby('filer_name').apply(_fn)
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,352 | yh2n/financial-analysis-v2 | refs/heads/master | /src/charting/chart_seasonality.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
def calc_hit_rate(ts, threshold=0):
"""Calculate hit rate for timeseries.
Args:
ts (pd.Series): Timeseries.
threshold (float): The minimum value to be considered 'hit'.
Returns:
A single number of hit rate.
"""
return (ts > threshold).sum() / ts.count()
def calc_monthly_rtns(series, s_type='price'):
"""Calculate the calendar monthly returns.
Args:
series (pd.Series): Daily price timeseries or daily return timeseries.
s_type (str): 'price' or 'return'.
Returns:
A pd.Series of monthly returns.
"""
if s_type == 'price':
r_daily = series.pct_change().dropna().squeeze()
elif s_type == 'return':
r_daily = series
else:
raise ValueError("s_type must be 'price' or 'return'")
r_monthly = r_daily.dropna().resample('M').apply(lambda x: (1+x).prod()-1)
return r_monthly
def calc_avg_daily_rtns(prices):
"""Calculate the average returns of every calendar day
based on historical prices.
Args:
prices (pd.Series): Daily prices timeseries.
Returns:
A pd.Series of average daily returns
"""
tk = prices.name
rtns = prices.pct_change()
avg_d_rtns = rtns.groupby([prices.index.month, prices.index.day]).mean()
avg_d_rtns.index.rename(['Month', 'Day'], [0, 1], inplace=True)
avg_d_rtns = avg_d_rtns.reset_index()
avg_d_rtns.index = pd.to_datetime('2020-'
+ avg_d_rtns['Month'].apply(str)
+ '-'
+ avg_d_rtns['Day'].apply(str),
format='%Y-%m-%d')
return avg_d_rtns[tk]
def calc_cum_avg_daily_rtns(prices):
"""Calculate cumulative average daily returns."""
avg_d_rtns = calc_avg_daily_rtns(prices)
cum_avg_d_rtn = (1 + avg_d_rtns).cumprod() - 1
return cum_avg_d_rtn
def calc_monthly_seasonality_stats(prices):
"""Calculate mean returns and hit rates for each calendar month.
Args:
prices (pd.Series): Daily prices used for calculating seasonality.
Returns:
mean_rtn (pd.Series) and hit_rate (pd.Series) for 12 calendar months,
indexed by the month index from 1 to 12.
"""
r_monthly = calc_monthly_rtns(prices)
mean_rtn = r_monthly.groupby(r_monthly.index.month).mean()
hit_rate = r_monthly.groupby(r_monthly.index.month).apply(calc_hit_rate)
return mean_rtn, hit_rate
def chart_monthly_seasonality(prices, output_path=None):
"""Chart monthly seasonal mean returns and hit rates.
Args:
prices (pd.Series): Daily prices used for calculating seasonality.
output_path (str): If specified, chart will be saved to the
specified path.
"""
tk = prices.name.upper()
mean_rtn, hit_rate = calc_monthly_seasonality_stats(prices)
mean_rtn.index = pd.to_datetime(mean_rtn.index, format='%m').strftime('%b')
hit_rate.index = pd.to_datetime(hit_rate.index, format='%m').strftime('%b')
inception_yr = prices.index[1].strftime('%Y')
# Create a new figure, plot barchart of monthly mean returns
fig, ax = plt.subplots(figsize=(20, 9))
x = np.arange(len(mean_rtn))
y = mean_rtn * 100
ax.bar(x[y > 0], y[y > 0], color='midnightblue',
label='Average Return')
ax.bar(x[y <= 0], y[y <= 0], color='darkred')
ax.yaxis.set_major_locator(plticker.MultipleLocator(base=0.5))
ax.set_xlabel('Month')
ax.set_ylabel('Mean Monthly Return %')
ax.set_title(f'{tk} Seasonality: 1 Month Return (Since {inception_yr})')
# Plot line chart of hit rates using existing x-axis
ax1 = ax.twinx()
ax1.set_ylim([0, 100])
ax1.set_ylabel('% of Time Positive')
ax1.plot(hit_rate*100, color='deepskyblue', label='% Up')
ax1.grid(False)
# Add legend
fig.legend(loc="upper left", bbox_to_anchor=(0, 1),
bbox_transform=ax.transAxes)
if output_path is not None:
fig.savefig(output_path)
def chart_cum_avg_daily_rtns(prices, output_path=None):
"""Chart cumulative avg daily returns.
Args:
prices (pd.Series): Daily prices used for calculating seasonality.
output_path (str): If specified, chart will be saved to the
specified path.
"""
tk = prices.name
prices = prices.dropna()
cum_avg_d_rtns = calc_cum_avg_daily_rtns(prices)
# avg_d_rtns = calc_avg_daily_rtns(prices)
# avg_m_rtns = calc_monthly_rtns(avg_d_rtns, 'return')
fig, ax = plt.subplots(figsize=(20, 9))
df = 100 + cum_avg_d_rtns*100
df.index = df.index.strftime('%b-%d')
ax.plot(df, color='deepskyblue')
ax.xaxis.set_major_locator(plticker.MultipleLocator(base=15))
ax.yaxis.set_major_locator(plticker.MultipleLocator(base=5))
ax.yaxis.tick_right()
today_idx = df.index.get_loc(pd.Timestamp.today().strftime('%b-%d'))
ax.axvline(today_idx, color='black')
init_date = prices.index[0].strftime('%Y-%m-%d')
last_date = prices.index[-1].strftime('%Y-%m-%d')
ax.set_title(f'{tk} Seasonality: Cumulative Avg Daily Return'
f' ({init_date} to {last_date})')
if output_path is not None:
fig.savefig(output_path)
def calc_monthly_seasonality_signals(prices, signal_metric, n_exclude=None,
threshold=None):
"""Generate trading signals according to monthly seasonality stats.
Signals: -1 means short, 0 means flat, 1 means long.
Args:
prices: Daily prices used to calculate everything.
signal_metric: The type of metric used to determine signal.
Should be either 'return' or 'hit_rate'.
n_exclude (int): Number of worst months to exclude(not trade).
If not None, threshold will be ignored.
threshold (float): The min value required on the metric to enter
long position. If n_exclude is not None, threshold will be ignored.
Returns:
signals (pd.Series): Trade signals, indexed by month index(1 to 12).
"""
if n_exclude is None and threshold is None:
raise ValueError("Either n_exclude or threshold cannot be None!")
if signal_metric not in ['return', 'hit_rate']:
raise ValueError(
"signal_metric can only be either 'return' or 'hit_rate'!")
mean_rtn_lb, hit_rate_lb = calc_monthly_seasonality_stats(prices)
vals = mean_rtn_lb if signal_metric == 'return' else hit_rate_lb
signals = vals.copy()
if n_exclude is not None:
idx_ex = signals.sort_values().head(n_exclude).index
signals[idx_ex] = 0
signals[~signals.index.isin(idx_ex)] = 1
else:
signals[signals < threshold] = 0
signals[signals >= threshold] = 1
signals.loc[:] = 1
signals.loc[[9, 10]] = 0
return signals
def trade_monthly_seasonality(prices, lb_start, lb_end, hd_start, hd_end,
signal_metric, n_exclude, threshold):
"""Calculate monthly returns from tradding monthly seasonality.
Args:
prices: Daily prices.
lb_start, lb_end (str): Start/end date of lookback period
in format '%Y-%m-%d'.
hd_start, hd_end (str): Start/end date of holding period
in format '%Y-%m-%d'.
signal_metric (str): The type of metric used to determine signal.
Should be either 'return' or 'hit_rate'.
n_exclude (int): Number of worst months to exclude(not trade).
If not None, threshold will be ignored.
threshold (float): The min value required on the metric to enter
long position. If n_exclude is not None, threshold will be ignored.
Returns:
pd.Series of monthly returns from trading monthly seasonality signals.
"""
prc_lb = prices.loc[lb_start:lb_end]
rtn_hd_m = calc_monthly_rtns(prices).loc[hd_start:hd_end]
signals = calc_monthly_seasonality_signals(
prc_lb, signal_metric, n_exclude, threshold)
signals_this_yr = pd.Series({
i: np.nan if i.month not in signals.index else signals[i.month]
for i in rtn_hd_m.index
})
return signals_this_yr * rtn_hd_m
def backtest(prices,
lookback=10,
holding=1,
signal_metric='return',
n_exclude=2,
threshold=None):
"""Wrapper function for backtesting trade_monthly_seasonality().
Returns the monthly returns timeseries from trading seasonality.
"""
trades = []
lb_start = prices.index[0].year
while True:
lb_end = lb_start + lookback - 1
hd_start = lb_end + 1
hd_end = hd_start + holding - 1
if hd_end > prices.index[-1].year:
break
trades.append(
trade_monthly_seasonality(prices, str(lb_start), str(lb_end),
str(hd_start), str(hd_end),
signal_metric, n_exclude, threshold))
lb_start += 1
trades = pd.concat(trades)
return trades
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,353 | yh2n/financial-analysis-v2 | refs/heads/master | /src/strats/close_to_rolling_high.py | import pandas as pd
from strats.portfolio import Portfolio
MONTH_DAYS = 21
def close_to_rolling_high(close_prices, hi_prices):
"""Calculate returns from the close one week ago to the high over
that period.
i.e. the value for `t` will be
(max(hi_{t-5}, ... hi_t) / c_{t-5}) - 1
"""
rolling_high = hi_prices.rolling(5).max()
return (rolling_high / close_prices.shift(5)) - 1
def _top_score_limit_returns(
scores, close_prices, hi_prices, top_k,
entry=None, one_week_only=False, dump_period=None,
rebalance=True):
"""Strategy that takes positions based on top `scores`, and sets
sell limits to those scores.
Parameters
----------
scores : pd.DataFrame
close_prices : pd.DataFrame
hi_prices : pd.DataFrame
top_k : int
Number of positions to hold
entry : str, pd.Timestamp
Date to (attempt) to start strategy. If < top_k tickers on
`entry` have non-nan `scores`, then successive days are tried.
one_week_only : boolean
Run strategy just for one week. Forces `dump_period = 5` to exit all
positions at the end of the week.
dump_period : int, optional
Maximum number of days to hold a position for. Sells on the
`dump_period`th day no matter what. If None, holds indefinitely.
Set to 5 if `one_week_only == True`.
rebalance : boolean
Take new positions after selling according to current top `scores`.
Returns
-------
p : Portfolio
"""
entry = entry or scores.first_valid_index()
days_run = 0
p = Portfolio(close_prices)
if one_week_only:
dump_period = 5
for day in scores.loc[entry:].index:
if one_week_only and days_run > dump_period:
break
top_scoring = scores.loc[day] \
.sort_values(ascending=False).head(top_k)
if days_run == 0:
if not top_scoring.isna().any():
p.buy(day, top_scoring.index, close_prices.loc[day])
sell_limits = top_scoring.sort_index()
days_run += 1
elif days_run > 0:
curr_returns = (hi_prices.loc[day, p.tickers_held] \
/ p.last_buy_price) - 1
selling = curr_returns.index[curr_returns >= sell_limits]
sell_prices = (sell_limits + 1) * p.last_buy_price
if len(selling) > 0:
p.sell(day, selling, sell_prices[selling])
p.tick(day, close_prices.loc[day])
if dump_period is not None:
dumping = p.tickers_held[p.days_held >= dump_period]
if len(dumping) > 0:
p.sell(day, dumping, close_prices.loc[day])
sell_limits = sell_limits[p.tickers_held]
if rebalance:
new_buys = top_scoring.drop(
p.tickers_held, errors='ignore').head(
top_k - len(p.tickers_held))
p.buy(day, new_buys.index, close_prices.loc[day])
sell_limits = pd.concat((sell_limits, new_buys)).sort_index()
days_run += 1
return p
def ctrh_returns(close_prices, hi_prices, top_k, dump_period=None):
"""Runs `top_score_limit` strategy according to last week's
close-to-rolling-high.
See `close_to_rolling_high` for definition of those scores.
Strategy runs over entire provided period and rebalances as positions
are sold.
"""
ctrh_returns = close_to_rolling_high(close_prices, hi_prices)
return _top_score_limit_returns(
ctrh_returns, close_prices, hi_prices,
top_k, rebalance=True, dump_period=dump_period)
def ctrh_conf_returns(close_prices, hi_prices, top_k, dump_period=None):
"""Runs `top_score_limit` strategy according to:
1. 10th percentile close_to_rolling_high over the past 3 months
2. where that value is less than 10th percentile ctrh over the
most recent month.
The score (and therefore sell_limit) is the 3m value.
The idea is that
- Taking a low (10th pctile) value yields high probability of
a positive sell
- Requiring that 1m value > 3m value ensures the behaviour is
sustaining.
"""
ctrh_returns = close_to_rolling_high(close_prices, hi_prices)
conf_3m = ctrh_returns.rolling(3 * MONTH_DAYS).quantile(0.1)
conf_1m = ctrh_returns.rolling(MONTH_DAYS).quantile(0.1)
scores = conf_3m.where(conf_1m >= conf_3m)
return _top_score_limit_returns(
scores, close_prices, hi_prices,
top_k, rebalance=True, dump_period=dump_period)
def sampled_1wk_ctrh_conf_returns(
close_prices, hi_prices, top_k, num_samples):
"""Runs the `ctrh_conf_returns` for just one week at a time,
randomly sampling the entrypoints and selling off all positions
at the end of the week.
The output is a list of the total returns for each of the
`num_samples` sampled runs.
"""
ctrh_returns = close_to_rolling_high(close_prices, hi_prices)
conf_3m = ctrh_returns.rolling(3 * MONTH_DAYS).quantile(0.1)
conf_1m = ctrh_returns.rolling(MONTH_DAYS).quantile(0.1)
scores = conf_3m.where(conf_1m >= conf_3m)
tradeable_dates = conf_3m.index[~conf_3m.isna().all(axis=1)].to_series()
start_dates = tradeable_dates.sample(num_samples).index
weekly_returns = []
for date in start_dates:
portfolio = _top_score_limit_returns(
scores, close_prices, hi_prices, top_k,
entry=date, one_week_only=True, rebalance=False)
weekly_returns.append(portfolio.returns.sum().sum() / top_k)
return weekly_returns
| {"/scripts/chart_seasonality_example.py": ["/src/utils/get_prices.py", "/src/charting/chart_seasonality.py"], "/src/utils/load_bucket_prices.py": ["/src/utils/get_prices.py"], "/scripts/threshold_momentum_buckets.py": ["/src/strats/threshold_momentum.py", "/src/utils/load_bucket_prices.py"]} |
73,354 | jleclanche/mudserve | refs/heads/master | /mudserve/character/handler.py | from mudserve.mudrpc.character import CharacterService
from mudserve.mudrpc.character.ttypes import *
class CharacterHandler(object):
def __init__(self):
self.characters = []
def ping(self):
print "ping()"
def createCharacter(self, name):
char = Character(id=(1 + len(self.characters)), name=name)
self.characters.append(char)
return char
def getCharacters(self):
return self.characters
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,355 | jleclanche/mudserve | refs/heads/master | /mudserve/serialize/serializer.py | from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from thrift.transport.TTransport import TMemoryBuffer
class Serializer(object):
"""
Takes care of serializing Thrift structures to and from disk.
"""
def __init__(self, Transport=TMemoryBuffer, Protocol=TBinaryProtocol):
"""
Initializes a serializer object for serializing to and from objects
of the structure Struct.
"""
self._transport = Transport
self._protocol = Protocol
def from_string(self, Struct, str):
"""
Deserializes the object from a string to a fully constructed object.
"""
# Set up our transport and protocol
transport = self._transport(str)
protocol = self._protocol(transport)
# Create a new empty instance
inst = Struct()
# Construct the instance by reading the byte string
inst.read(protocol)
# Return the fully constructed instance
return inst
def to_string(self, inst):
"""
Serializes the object to a string from a fully constructed object.
"""
# Set up our transport and protocol
transport = self._transport()
protocol = self._protocol(transport)
# Write the instance serialization to the protocol
inst.write(protocol)
# Return the serialization as a byte string
return transport.getvalue()
def from_file(self, Struct, filepath):
"""
Deserializes an object given a file path. This is a pure utility method.
"""
f = open(filepath, "rb")
inst = self.from_string(Struct, f.read())
f.close()
return inst
def to_file(self, inst, filepath):
"""
Serializes an object given an instance and a file path.
This is a pure utility method.
"""
str = self.to_string(inst)
f = open(filepath, "w+b")
f.write(str)
f.close()
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,356 | jleclanche/mudserve | refs/heads/master | /mudserve/combat/handler.py | from mudserve.auth.authhandler import AuthHandler
from mudserve.combat.fight import Fight
class CombatHandler(AuthHandler):
def castSpell(self, authToken, combatGuid, spellId, targetGuid):
self.validate_token(authToken)
fight = Fight(combatGuid)
# TODO: Grab this from authToken
casterGuid = "player1"
fight.cast_spell(spellId, casterGuid, targetGuid)
pass
# spell = getSpellByID(spell)
# caster = getCaster()
# if caster.canCast(spell):
# return target.apply(spell)
# raise CANNOT_CAST_SPELL
def getStatus(self, authToken, combatGuid):
fight = Fight(combatGuid)
return fight.get_status()
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,357 | jleclanche/mudserve | refs/heads/master | /tests/__init__.py | import sys
from os import mkdir
from shutil import rmtree
from subprocess import call
from unittest import TestSuite, TextTestRunner
from . import constants
# Test suites
from . import serializer
# Make sure the generated thrift files are on the python path
sys.path.insert(0, constants.GEN_PATH)
# Create our suite
suite = TestSuite((serializer.suite,))
# Run it with the standard runner
runner = TextTestRunner()
runner.run(suite)
# Delete the created folders
rmtree(constants.GEN_PATH)
rmtree(constants.TMP_PATH)
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,358 | jleclanche/mudserve | refs/heads/master | /mudserve/spell/spelleffect.py | """
Handles executing spell effects on targets.
This is accomplished by some black magic, mostly to make the registration
of new handlers easier. We create a metaclass that keeps track of all
sub classes and their registered effect codes.
"""
from mudserve.mudrpc.spell.effect.ttypes import SpellEffect, SpellEffectCode
from mudserve.database.objects import DatabaseObjectHandler
class _SpellEffectMeta(type):
def __init__(cls, *args):
super(_SpellEffectMeta, cls).__init__(*args)
# If the cls._handler attribute does not exist,
# we're currently handling the SpellEffectHandler
# base class, so we'll just initialize it without
# adding the EFFECT_CODE to the dict (since there is none).
if not hasattr(cls, '_handlers'):
cls._handlers = {}
else:
cls._handlers[cls.EFFECT_CODE] = cls
class SpellEffectHandler(DatabaseObjectHandler):
__metaclass__ = _SpellEffectMeta
def __new__(cls, effect):
"""
Retrieves a handler given an effect code.
@param effect_code
The effect code to retrieve the handler for.
@return
An initialized spell effect handler for the given effect code.
@throws KeyError
If the handler is not found.
"""
# Make sure this is the call to SpellEffectHandler and not
# a subclass, since initialization of the subclass also
# calls this __new__ method.
if cls is SpellEffectHandler:
handler = cls._handlers[effect.effectCode](effect)
return handler
else:
return super(SpellEffectHandler, cls).__new__(cls)
def __init__(self, effect):
self.effect = effect
def execute(self, caster, target, *args):
raise NotImplementedError
@classmethod
def from_python(cls, data):
effect = SpellEffect()
effect.effectCode = SpellEffectCode._NAMES_TO_VALUES[data['effectCode']]
effect.arg1 = data.get('arg1')
effect.arg2 = data.get('arg2')
effect.arg3 = data.get('arg3')
return effect
class DamageEffectHandler(SpellEffectHandler):
EFFECT_CODE = SpellEffectCode.DAMAGE
def execute(self, caster, target):
target.damage(self.effect.arg1)
class HealEffectHandler(SpellEffectHandler):
EFFECT_CODE = SpellEffectCode.HEAL
def execute(self, caster, target):
target.heal(self.effect.arg1)
class KillEffectHandler(SpellEffectHandler):
EFFECT_CODE = SpellEffectCode.KILL
def execute(self, caster, target):
target.kill()
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,359 | jleclanche/mudserve | refs/heads/master | /mudserve/database/databases.py | from os.path import abspath, join
from mudserve.mudrpc.database.ttypes import SpellDB, MapDB
from mudserve.spell.spell import SpellHandler
from mudserve.map.map import MapHandler
from mudserve.settings import MUDSERVE_ROOT
class DatabaseBase(object):
@classmethod
def get_file_path(cls):
"""
Returns the path to the database file relative to MUDSERVE_ROOT.
"""
return "database/db/%s.db" % cls.DATABASE_NAME
@classmethod
def get_json_path(cls):
"""
Returns the path to the json file containing object definitions,
relative to MUDSERVE_ROOT.
"""
return "database/src/%s.json" % cls.DATABASE_NAME
@classmethod
def get_object(cls, object_id):
"""
Retrieves the object from the database with the given object id.
"""
db = cache.get_memory_file(cls.DATABASE_STRUCT, cls.get_file_path())
# We use 0-indexed lists but like our object id's to start at 1.
obj = db.objects[object_id]
return cls.OBJECT_HANDLER(obj)
@classmethod
def from_python(cls, objects):
"""
Converts an id => object dict into a database instance.
"""
object_dict = {}
for obj in objects:
object_dict[obj['id']] = cls.OBJECT_HANDLER.from_python(obj)
# Create and return database
db = cls.DATABASE_STRUCT(objects=object_dict)
return db
class SpellDatabase(DatabaseBase):
DATABASE_NAME = "spell"
DATABASE_STRUCT = SpellDB
OBJECT_HANDLER = SpellHandler
class MapDatabase(DatabaseBase):
DATABASE_NAME = "map"
DATABASE_STRUCT = MapDB
OBJECT_HANDLER = MapHandler
# A map from db name to db.
DATABASE_MAP = dict((db.DATABASE_NAME, db) for db in
(SpellDatabase, MapDatabase)
)
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,360 | jleclanche/mudserve | refs/heads/master | /mudserve/__init__.py | from sqlalchemy.orm import sessionmaker
from mudserve.models.base import engine
dbsession = sessionmaker(bind=engine)
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,361 | jleclanche/mudserve | refs/heads/master | /tests/serializer/serializer.py | import subprocess
import sys
import unittest
from os.path import dirname
from ..utils import compile_thrift, get_tempfile
from mudserve.serialize import Serializer
class TestSerializer(unittest.TestCase):
def setUp(self):
compile_thrift("serialize.thrift")
from serialize.ttypes import TestObj
self.cls = TestObj
self.serializer = Serializer()
def test_str(self):
inst = self.cls(name="test", id=5)
str = self.serializer.to_string(inst)
obj = self.serializer.from_string(self.cls, str)
assert obj == inst
def test_file(self):
f = get_tempfile()
inst = self.cls(name="test", id=6)
str = self.serializer.to_file(inst, f.name)
obj = self.serializer.from_file(self.cls, f.name)
assert obj == inst
if __name__ == "__main__":
unittest.main()
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,362 | jleclanche/mudserve | refs/heads/master | /mudserve/database/objects.py | class DatabaseObjectHandler(object):
"""
Defines the minimum interface for database object handlers.
These methods are required in order to properly serialize to
and from disk and for generating Thrift structures from json.
The exposed interface may implement whatever methods deemed
necessary as long as the minimum interface defined below is implemented.
"""
@classmethod
def from_python(cls, data):
"""
Returns a Thrift structure instance from python's dict representation
of the object. This is used for serializing the json files into
Thrift structures and later to disk.
"""
raise NotImplementedError
def __init__(self, thrift_object):
"""
Takes a Thrift structure and initializes the object handler for
use in python code.
"""
raise NotImplementedError
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,363 | jleclanche/mudserve | refs/heads/master | /tests/serializer/__init__.py | import unittest
from .serializer import TestSerializer
suite = unittest.makeSuite(TestSerializer, 'test')
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,364 | jleclanche/mudserve | refs/heads/master | /mudserve/models/base.py | # Database configuration
from mudserve.settings import DATABASE
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine("%(type)s://%(username)s:%(password)s@%(hostname)s:%(port)d/%(database)s" % DATABASE)
Base = declarative_base(bind=engine)
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,365 | jleclanche/mudserve | refs/heads/master | /mudserve/map/map.py | from mudserve import cache
class MapHandler(object):
def __init__(self, map):
self.map = map
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,366 | jleclanche/mudserve | refs/heads/master | /mudserve/models/user.py | from mudserve.models.base import Base
from sqlalchemy import Column, String, Integer
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
email = Column(String(50))
password = Column(String(30))
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,367 | jleclanche/mudserve | refs/heads/master | /mudserve/server.py | from mudserve.combat.handler import CombatHandler
from mudserve.mudrpc.combat import CombatService
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
PORT = 9090
handler = CombatHandler()
processor = CombatService.Processor(handler)
transport = TSocket.TServerSocket(PORT)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TForkingServer(processor, transport, tfactory, pfactory)
print "Starting the server on port %i..." % (PORT)
server.serve()
print "done."
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,368 | jleclanche/mudserve | refs/heads/master | /tests/constants.py | from os.path import join
from tempfile import mkdtemp
TESTS_PATH = "./tests/"
GEN_PATH = join(TESTS_PATH, "gen-py/")
TMP_PATH = mkdtemp()
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,369 | jleclanche/mudserve | refs/heads/master | /mudserve/settings.py | from os.path import join, dirname, abspath
# The absolute path to the mudserve root.
MUDSERVE_ROOT = abspath(dirname(__file__))
# The database configuration
DATABASE = {
"type": "postgresql",
"username": "ctmud",
"password": "sR89fl1GhM5",
"hostname": "",
"database": "mudserve",
"port": 5432
}
# Installed database models
INSTALLED_MODELS = (
"mudserve.models.user",
)
SECRET_KEY = "11f458a541f44dcf8752c84d16ac8858"
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,370 | jleclanche/mudserve | refs/heads/master | /mudserve/models/auth.py | from hashlib import md5
from sqlalchemy import Column, String, Integer, ForeignKey, DateTime
from sqlalchemy.orm import relationship, backref
from mudserve.models.base import Base
from mudserve.models.user import User
class Auth(Base):
__tablename__ = "auth"
auth_token = Column(String(40), primary_key=True)
user_id = Column(Integer, ForeignKey("users.id"))
expire_time = Column(DateTime, index=True)
user = relationship(User, backref=backref("auth_set"))
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,371 | jleclanche/mudserve | refs/heads/master | /mudserve/cache.py | import memcache
from mudserve.serialize import Serializer
from mudserve.settings import MUDSERVE_ROOT
from os.path import join, normpath
from hashlib import sha1
MEMCACHED_HOST = "127.0.0.1:11211"
client = memcache.Client([MEMCACHED_HOST], debug=0)
def get(key, namespace=None):
if namespace is not None:
key = str(namespace)+"-"+key
return client.get(key)
def set(key, value, namespace=None):
if namespace is not None:
key = str(namespace)+"-"+key
return client.set(key, value)
# Utility methods
def get_struct(Struct, key):
"""
Retrieves a Thrift structure from memcached memory.
"""
data = client.get(key)
if data is None:
return None
return _serializer.from_string(Struct, data)
def set_struct(Struct, key, inst):
"""
Sets a Thrift structure in memcached memory.
"""
return client.set(key, _serializer.to_string(inst))
def get_memory_file(Struct, filepath):
"""
Retrieves a Thrift struct from memory, or if it does not exist,
retrieves it from disk and caches it in memory.
@param Struct
The type of Thrift struct to retrieve.
@param filepath
The file path to the file on disk, relative to DATA_ROOT.
"""
key = _get_path_key(filepath)
# Check if it exists in memory
obj = get_struct(Struct, key)
if obj is not None:
return obj
# Read from disk
path = join(MUDSERVE_ROOT, filepath)
f = open(path, "rb")
data = f.read()
f.close()
# Send raw data to memcached
client.set(key, data)
return _serializer.from_string(Struct, data)
def expire_memory_file(filepath):
"""
This method expires the cache related to a specific file in memory.
"""
client.delete(_get_path_key(filepath))
# Help variables below
_serializer = Serializer()
def _get_path_key(filepath):
# Compute key as the sha1-hash of the file path
# (should be a path relative to MUDSERVE_ROOT)
path = normpath(filepath)
key = sha1(path).hexdigest()
return key
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,372 | jleclanche/mudserve | refs/heads/master | /utils/client.py | #!/usr/bin/env python
import sys
import time
sys.path.append("../")
from mudserve.mudrpc.combat.CombatService import Client
from thrift import Thrift
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TBufferedTransport
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
try:
# Make socket
transport = TSocket("213.100.51.33", 9090)
# Buffering is critical. Raw sockets are very slow
transport = TBufferedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = Client(protocol)
# Connect!
transport.open()
while True:
status = client.getStatus("auth1", "fight1")
print "Status update:\n%r" % (status)
if status.currentTurn == "player1":
print "It's your turn, casting spell!\n"
client.castSpell("auth1", "fight1", 1, "player2")
print "---------------------"
time.sleep(1)
# Close!
transport.close()
except Thrift.TException, e:
print e.message
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,373 | jleclanche/mudserve | refs/heads/master | /manage.py | #!/usr/bin/env python
"""
Provides several utility methods:
- manage.py build
Builds the thrift files in the project
- manage.py syncdb
Creates all registered database tables.
- manage.py dbshell
Opens the PostgreSQL database shell.
- manage.py serialize <database>
Serializes the Thrift database <database> to disk.
"""
import sys
import os
from os.path import join, dirname, abspath, join, normpath
from simplejson import loads
from subprocess import call
from mudserve.models import base
from mudserve.settings import MUDSERVE_ROOT, INSTALLED_MODELS, DATABASE
from mudserve.serialize import Serializer
from mudserve import cache
def main():
args = sys.argv
if len(args) < 2:
print "manage.py requires an action. See help(manage)."
return
arg = args[1]
if arg in ("b", "build"):
print "Building Thrift files...",
cmd_build()
elif arg in ("s", "serialize"):
cmd_serialize(args[2:])
elif arg == "syncdb":
print "Synchronizing database...",
cmd_syncdb()
elif arg == "dbshell":
cmd_dbshell()
else:
print "Argument %r not recognized." % (arg)
return
print "Ok."
def cmd_build():
gen_path = abspath(join(MUDSERVE_ROOT, "gen-py/"))
gen_rpc_path = join(gen_path, "mudserve/mudrpc/")
mudrpc_path = abspath(join(MUDSERVE_ROOT, "mudrpc/"))
thrift_path = abspath(join(MUDSERVE_ROOT, "include.thrift"))
# Remove existing ./mudrpc folder
call(["rm", "-rf", mudrpc_path])
# Generate thrift files
call(["thrift", "-r", "--gen", "py:new_style", "-o", MUDSERVE_ROOT, thrift_path])
# Move ./gen-py/mudserve/mudrpc/ to ./mudrpc/
call(["mv", gen_rpc_path, mudrpc_path])
# Remove generated folder that is now empty
call(["rm", "-rf", gen_path])
def cmd_syncdb():
for app in INSTALLED_MODELS:
try:
__import__(app)
except ImportError:
print "Could not import module '%s'. Aborting." % app
break
base.Base.metadata.create_all(base.engine)
def cmd_dbshell():
args = ["psql"]
if DATABASE["username"]:
args += ["-U", DATABASE["username"]]
if DATABASE["hostname"]:
args += ["-h", DATABASE["hostname"]]
if DATABASE["port"]:
args += ["-p", str(DATABASE["port"])]
args += [DATABASE["database"]]
if os.name == "nt":
sys.exit(os.system(" ".join(args)))
else:
os.execvp(args[0], args)
def cmd_serialize(args):
from mudserve.database.databases import DATABASE_MAP
for arg in set(args):
if arg in DATABASE_MAP:
db = DATABASE_MAP[arg]
# Grab the objects
json_path = join(MUDSERVE_ROOT, db.get_json_path())
f = open(json_path, "r")
data = loads(f.read())
f.close()
# Create the database instance
obj = db.from_python(data)
# Serialize it to file
db_file_path = db.get_file_path()
serialize = Serializer()
serialize.to_file(obj, join(MUDSERVE_ROOT, db_file_path))
# Expire the cache entry
cache.expire_memory_file(db_file_path)
if __name__ == "__main__":
main()
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,374 | jleclanche/mudserve | refs/heads/master | /mudserve/combat/unit.py | """
Unit logic, for targets in a combat
"""
class UnitHandler(object):
"""
Proxies a unit in order to provide a richer set of methods as well
as providing a history of what has been done in order to correctly
update the history and provide for instance combat log messages.
"""
def __init__(self, unit):
self.unit = unit
def damage(self, amount):
"""
Damages the unit by a specific amount.
"""
# Can't have negative health
self.unit.health = max(self.unit.health-amount, 0)
def heal(self, amount):
"""
Heals the unit by a specific amount, provided they are not dead.
"""
# Make sure the unit is alive; can't heal dead people
# (although they are certainly visible)
if self.is_alive():
self.unit.health += amount
def kill(self):
"""
Instantly kills the unit
"""
self.unit.health = 0
def apply_aura(self, aura):
"""
Apply an aura on the unit.
"""
#self.unit.auras.append(aura)
#self.log['auras'].append(aura)
pass # NYI
def is_alive(self):
return self.unit.health > 0
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,375 | jleclanche/mudserve | refs/heads/master | /mudserve/combat/fight.py | from mudserve import cache
from mudserve.spell.spell import SpellDatabaseHandler
from mudserve.mudrpc.combat.types.ttypes import CombatStatus
from mudserve.combat.unit import UnitHandler
class Fight(object):
"""
The abstraction of a specific fight instance. This class will be
instantiated dynamically from memory any time we want to perform some
logic during a fight.
NOTE! Only one method may be called for any one instance of the fight class,
as the methods may mutate the internal state of the class and required
attributes for other methods may be unset to facilitate speedy transfer
with other methods.
"""
def __init__(self, guid):
# Get the CombatStatus from memory
self.status = cache.get_struct(CombatStatus, guid)
if self.status is None:
# We need to grab the information from the database as this is
# either an invalid fight or a fight no longer residing in memory
# because it is not currently occurring.
raise AssertionError("Fight GUID not found.")
self.fight_guid = guid
def get_status(self, known_turn=None):
"""
Returns a statusupdate of the fight. This may unset some fields if there
has been no further updates since the last update. This is controlled
by the known_turn parameter, described below.
@param known_turn
Controls how much data to return. If an integer is passed in it is
used in order to judge how much data has already been sent to the
client. If the known_turn parameter equals the internal current turn,
several fields are unset in order to decrease total bandwidth and
speed up delivery.
@return
Returns the current status of the fight.
"""
# TODO: Update turnTime according to current time
status = self.status
if not status.active or status.turnId != known_turn:
return status
# Unset fields that are already known
for field in ("currentTurn", "units"):
setattr(status, field, None)
return status
def cast_spell(self, spellId, casterGuid, targetGuid):
"""
Casts the given spell targetting the given target.
This function naively assumes that the request is issued by the current
turn holder; THIS MUST BE VERIFIED WHEN RECEIVING THE REQUEST.
@param spellId
The id of the spell to cast.
@param targetGuid
The guid of the target to cast the spell on.
"""
status = self.status
# TODO: We have to verify that the player 1) has the spell and 2) can cast it
spell = SpellDatabaseHandler.get_spell(spellId)
# Retrieve the target and caster
caster_unit = status.units[casterGuid]
target_unit = status.units[targetGuid]
caster = UnitHandler(caster_unit)
target = UnitHandler(target_unit)
spell.execute(caster, target)
# Update the turn count and so on
status.turnId += 1
# TODO: This needs to correctly retrieve the next turn's player.
# We'll just use the target of the spell for now.
status.currentTurn = targetGuid
self.update_status()
def update_status(self):
cache.set_struct(CombatStatus, self.fight_guid, self.status)
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,376 | jleclanche/mudserve | refs/heads/master | /mudserve/spell/spell.py | from mudserve import cache
from mudserve.serialize import Serializer
from mudserve.database.objects import DatabaseObjectHandler
from mudserve.spell.spelleffect import SpellEffectHandler
from mudserve.mudrpc.spell.types.ttypes import Spell
class SpellHandler(DatabaseObjectHandler):
def __init__(self, spell):
self.spell = spell
def can_use(self, player, target):
# TODO: Fix this
return True
def execute(self, player, target):
for effect in self.spell.effects:
handler = SpellEffectHandler(effect)
handler.execute(player, target)
@classmethod
def from_python(cls, data):
obj = Spell()
obj.id = data['id']
obj.name = data['name']
obj.effects = []
for effect in data['effects']:
obj.effects.append(SpellEffectHandler.from_python(effect))
return obj
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,377 | jleclanche/mudserve | refs/heads/master | /tests/utils.py | from os.path import dirname, abspath
from subprocess import call
from sys import _getframe
from tempfile import NamedTemporaryFile
from . import constants
def compile_thrift(file, up=1):
"""
Utility method to compile a thrift file existing in a subdirectory while
placing the output in tests/gen-py.
"""
args = ["thrift", "--gen", "py:new_style", "-o",
dirname(abspath(__file__)), file]
cwd = abspath(_getframe(1).f_code.co_filename)
call(args, cwd=dirname(cwd))
def get_tempfile(mode="rw+b"):
return NamedTemporaryFile(mode=mode, dir=constants.TMP_PATH, delete=False)
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,378 | jleclanche/mudserve | refs/heads/master | /mudserve/auth/authhandler.py | from sqlalchemy.orm.exc import NoResultFound
from datetime import datetime
from time import time as utctime
from cPickle import loads, dumps
from base64 import encodestring, decodestring
from hashlib import md5
from mudserve.settings import SECRET_KEY
from mudserve.mudrpc.auth.types.ttypes import User, MUDAUserException, MUDAErrorCode
from mudserve.models.auth import Auth
from mudserve import cache
from mudserve import dbsession
class AuthHandler(object):
def validate_token(self, auth_token):
# Decode token
user_id = cache.get(auth_token, namespace="auth")
if user_id is None:
# Check database
try:
auth = dbsession.query(Auth).filter(Auth.auth_token == auth_token,
Auth.expire_time<datetime.utcnow()).one()
except NoResultFound:
# The auth has expired
raise MUDAUserException(errorCode=MUDAErrorCode.AUTH_EXPIRED)
# Auth token valid, cache the result
user_id = auth.user_id
cache.set(auth_token, user_id, namespace="auth")
return user_id
def _generate_auth_token(user_id):
data = (userId, utctime())
pickled_data = dumps(data)
pickled_md5 = md5(pickled_data + SECRET_KEY).hexdigest()
return encodestring(pickled + pickled_md5)
| {"/mudserve/combat/handler.py": ["/mudserve/auth/authhandler.py", "/mudserve/combat/fight.py"], "/mudserve/spell/spelleffect.py": ["/mudserve/database/objects.py"], "/mudserve/database/databases.py": ["/mudserve/spell/spell.py", "/mudserve/map/map.py", "/mudserve/settings.py"], "/mudserve/__init__.py": ["/mudserve/models/base.py"], "/tests/serializer/serializer.py": ["/tests/utils.py"], "/tests/serializer/__init__.py": ["/tests/serializer/serializer.py"], "/mudserve/models/base.py": ["/mudserve/settings.py"], "/mudserve/map/map.py": ["/mudserve/__init__.py"], "/mudserve/models/user.py": ["/mudserve/models/base.py"], "/mudserve/models/auth.py": ["/mudserve/models/base.py", "/mudserve/models/user.py"], "/mudserve/cache.py": ["/mudserve/settings.py"], "/mudserve/combat/fight.py": ["/mudserve/__init__.py", "/mudserve/spell/spell.py", "/mudserve/combat/unit.py"], "/mudserve/spell/spell.py": ["/mudserve/__init__.py", "/mudserve/database/objects.py", "/mudserve/spell/spelleffect.py"], "/tests/utils.py": ["/tests/__init__.py"], "/mudserve/auth/authhandler.py": ["/mudserve/settings.py", "/mudserve/models/auth.py", "/mudserve/__init__.py"]} |
73,396 | tyone-jp/kaggle-steel | refs/heads/master | /missing_predict.py | import os
import json
import cv2
import numpy as np
import pandas as pd
import keras
from keras import layers
from keras.applications import DenseNet121
from keras.callbacks import Callback,ModelCheckpoint,ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.optimizers import Adam,Nadam
import tensorflow as tf
from tqdm import tqdm
from logging import StreamHandler,DEBUG,Formatter,FileHandler,getLogger
logger=getLogger(__name__)
DIR='../result/tmp/'
def load_img(code,base,resize=True):
path=f'{base}/{code}'
img=cv2.imread(path)
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
if resize:
img=cv2.resize(img,(256,256))
return img
def create_datagen():
return ImageDataGenerator(zoom_range=0.1,
fill_mode='constant',
cval=0.,
rotation_range=10,
height_shift_range=0.1,
width_shift_range=0.1,
horizontal_flip=True,
vertical_flip=True,
rescale=1/255.,
validation_split=0.15)
def create_test_gen():
return ImageDataGenerator(rescale=1/255.).flow_from_dataframe(test_nan_df,
directory='../input/test_images/',
x_col='ImageId',
class_mode=None,
target_size=(256,256),
batch_size=BATCH_SIZE,
shuffle=False)
def create_flow(datagen,subset):
return datagen.flow_from_dataframe(train_nan_df,
directory='../tmp/train',
x_col='ImageId',
y_col='allMissing',
class_mode='other',
target_size=(256,256),
batch_size=BATCH_SIZE,
subset=subset)
def build_model():
densenet=DenseNet121(include_top=False,
input_shape=(256,256,3),
weights='../input/weight/DenseNet-BC-121-32-no-top.h5')
model=Sequential()
model.add(densenet)
model.add(layers.GlobalAveragePooling2D())
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512,activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=Nadam(),
metrics=['accuracy'])
return model
def tta_prediction(datagen,model,image,n_examples):
samples=np.expand_dims(image,axis=0)
it=datagen.flow(samples,batch_size=n_examples)
yhats=model.predict_generator(it,steps=n_examples,verbose=0)
summed=np.sum(yhats,axis=0)/n_examples
return summed
if __name__=='__main__':
log_fmt=Formatter('%(asctime)s %(name)s %(lineno)d [%(levelname)s] [%(funcName)s] %(message)s')
handler=StreamHandler()
handler.setLevel('INFO')
handler.setFormatter(log_fmt)
logger.addHandler(handler)
handler=FileHandler(DIR+'train.py.log','a')
handler.setLevel(DEBUG)
handler.setFormatter(log_fmt)
logger.setLevel(DEBUG)
logger.addHandler(handler)
logger.info('start')
train_df=pd.read_csv('../input/train.csv')
submission_df=pd.read_csv('../input/sample_submission.csv')
logger.info('train_df shape:{}'.format(train_df.shape))
logger.info('submisson_df shape:{}'.format(submission_df.shape))
unique_test_images=submission_df['ImageId_ClassId'].apply(lambda x:x.split('_')[0]).unique()
train_df['isNan']=pd.isna(train_df['EncodedPixels'])
train_df['ImageId']=train_df['ImageId_ClassId'].apply(lambda x:x.split('_')[0])
train_nan_df=train_df.groupby(by='ImageId',axis=0).agg('sum')
train_nan_df.reset_index(inplace=True)
train_nan_df.rename(columns={'isNan':'missingCount'},inplace=True)
train_nan_df['missingCount']=train_nan_df['missingCount'].astype(np.int32)
train_nan_df['allMissing']=(train_nan_df['missingCount']==4).astype(int)
train_nan_df['ImageId']=train_nan_df['ImageId'].apply(lambda x:x.replace('.jpg','.png'))
logger.info('train_nan_df shape:{}'.format(train_nan_df.shape))
test_nan_df=pd.DataFrame(unique_test_images,columns=['ImageId'])
logger.info('test_nan_df shape:{}'.format(test_nan_df.shape))
logger.info('data download finished')
logger.info('train start')
logger.info('create data_generator')
BATCH_SIZE=32
data_generator=create_datagen()
train_gen=create_flow(data_generator,'training')
val_gen=create_flow(data_generator,'validation')
test_gen=create_test_gen()
model=build_model()
logger.info('train')
total_steps=train_nan_df.shape[0]/BATCH_SIZE
checkpoint=ModelCheckpoint('../output/model.h5',
monitor='val_acc',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto')
reduce_lr=ReduceLROnPlateau(monitor='val_loss',
patience=5,
verbose=1,
min_lr=1e-6)
history=model.fit_generator(train_gen,
steps_per_epoch=total_steps*0.85,
validation_data=val_gen,
validation_steps=total_steps*0.15,
epochs=30,
callbacks=[checkpoint,reduce_lr])
logger.info('train finished')
history_df=pd.DataFrame(history.history)
history_df.to_csv('../output/history.csv')
logger.info('history saved')
model.load_weights('../output/model.h5')
y_test=np.empty(test_nan_df.shape)
for i,code in enumerate(tqdm(test_nan_df['ImageId'])):
y_test[i]=tta_prediction(datagen=create_datagen(),
model=model,
image=load_img(base='../input/test_images',code=code),
n_examples=20)
logger.info('tta finished')
test_nan_df['allMissing']=y_test
train_nan_df.to_csv('train_missing_count.csv',index=False)
test_nan_df.to_csv('test_missing_count.csv',index=False)
logger.info('finish')
| {"/train_unet.py": ["/generator.py"], "/generator.py": ["/mask2rle.py"], "/train.py": ["/generator.py", "/mask2rle.py"]} |
73,397 | tyone-jp/kaggle-steel | refs/heads/master | /train_unet.py | from segmentation_models import Unet
from segmentation_models.backbones import get_preprocessing
from loss_function import dice_coef
import pandas as pd
from generator import DataGenerator
from keras.callbacks import ModelCheckpoint,CSVLogger
path='../output/model/'
train=pd.read_csv('../input/train2.csv')
train.fillna('',inplace=True)
train.reset_index(drop=True,inplace=True)
checkpoint=ModelCheckpoint(filepath=path+'unet.h5',monitor='val_dice_coef',save_best_only=True)
csv_logger=CSVLogger('../output/training.log')
if __name__=='__main__':
preprocess=get_preprocessing('resnet34')
model=Unet('resnet34',input_shape=(128,800,3),classes=4,activation='sigmoid')
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=[dice_coef])
idx=int(0.8*len(train))
train_batches=DataGenerator(train.iloc[:idx],shuffle=True,preprocess=preprocess)
valid_batches=DataGenerator(train.iloc[idx:],preprocess=preprocess)
history=model.fit_generator(train_batches,validation_data=valid_batches,epochs=30,verbose=1,callbacks=[checkpoint,csv_logger])
| {"/train_unet.py": ["/generator.py"], "/generator.py": ["/mask2rle.py"], "/train.py": ["/generator.py", "/mask2rle.py"]} |
73,398 | tyone-jp/kaggle-steel | refs/heads/master | /generator.py | import keras
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import pandas as pd
import cv2
from mask2rle import rle2maskResize
from PIL import Image
def create_test_gen(test_df):
return ImageDataGenerator(rescale=1/255.).flow_from_dataframe(
test_df,
directory='../input/test_images',
x_col='ImageId',
class_mode=None,
target_size=(256, 256),
batch_size=64,
shuffle=False)
class DataGenerator(keras.utils.Sequence):
def __init__(self,df,batch_size=16,subset='train',shuffle=False,preprocess=None,info={}):
super().__init__()
self.batch_size = batch_size
self.df = df
self.shuffle = shuffle
self.preprocess=preprocess
self.subset=subset
self.info=info
if self.subset == 'train':
self.data_path='../input/train_images/'
elif self.subset == 'test':
self.data_path='../input/test_images/'
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.df) / self.batch_size))
def __getitem__(self, index):
X=np.empty((self.batch_size,128,800,3),dtype=np.float32)
y=np.empty((self.batch_size,128,800,4),dtype=np.int8)
indexes=self.indexes[index*self.batch_size:(index+1)*self.batch_size]
for i,f in enumerate(self.df['ImageId'].iloc[indexes]):
self.info[index*self.batch_size+i]=f
X[i,]=Image.open(self.data_path+f).resize((800,128))
if self.subset=='train':
for j in range(4):
y[i,:,:,j]=rle2maskResize(self.df['e'+str(j+1)].iloc[indexes[i]])
if self.preprocess != None:
X=self.preprocess(X)
if self.subset=='train':
return X,y
else:return X
def on_epoch_end(self):
self.indexes=np.arange(len(self.df))
if self.shuffle == True:
np.random.shuffle(self.indexes)
| {"/train_unet.py": ["/generator.py"], "/generator.py": ["/mask2rle.py"], "/train.py": ["/generator.py", "/mask2rle.py"]} |
73,399 | tyone-jp/kaggle-steel | refs/heads/master | /train_data.py | import pandas as pd
import numpy as np
train_df=pd.read_csv('../input/train.csv')
if __name__=='__main__':
train_df['ImageId']=train_df['ImageId_ClassId'].map(lambda x:x.split('_')[0])
train2_df=pd.DataFrame({'ImageId':train_df['ImageId'][::4]})
train2_df['e1']=train_df['EncodedPixels'][::4].values
train2_df['e2']=train_df['EncodedPixels'][1::4].values
train2_df['e3']=train_df['EncodedPixels'][2::4].values
train2_df['e4']=train_df['EncodedPixels'][3::4].values
train2_df.reset_index(inplace=True,drop=True)
train2_df.fillna('',inplace=True)
train2_df.to_csv('../input/train2.csv')
| {"/train_unet.py": ["/generator.py"], "/generator.py": ["/mask2rle.py"], "/train.py": ["/generator.py", "/mask2rle.py"]} |
73,400 | tyone-jp/kaggle-steel | refs/heads/master | /mask2rle.py | import numpy as np
from PIL import Image
import pandas as pd
def mask2rle(img):
pixels= img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def rle2mask(rle, input_shape):
height, width = input_shape[:2]
mask= np.zeros( width*height ).astype(np.uint8)
array = np.asarray([int(x) for x in rle.split()])
starts = array[0::2]
lengths = array[1::2]
for index, start in enumerate(starts):
mask[int(start):int(start+lengths[index])] = 1
return mask.reshape((height,width),order='F')
def build_masks(rles, input_shape):
depth = len(rles)
masks = np.zeros((*input_shape, depth))
for i, rle in enumerate(rles):
if type(rle) is str:
masks[:, :, i] = rle2mask(rle, input_shape)
return masks
def build_rles(masks):
height, width, depth = masks.shape
rles = [mask2rle(masks[:, :, i]) for i in range(depth)]
return rles
def rle2maskResize(rle):
if (pd.isnull(rle)) | (rle==''):
return np.zeros((128,800),dtype=np.uint8)
height=256
width=1600
mask=np.zeros(width*height,dtype=np.uint8)
array=np.asarray([int(x) for x in rle.split()])
starts=array[0::2]-1
lengths=array[1::2]
for index,start in enumerate(starts):
mask[int(start):int(start+lengths[index])]=1
return mask.reshape((height,width),order='F')[::2,::2]
| {"/train_unet.py": ["/generator.py"], "/generator.py": ["/mask2rle.py"], "/train.py": ["/generator.py", "/mask2rle.py"]} |
73,401 | tyone-jp/kaggle-steel | refs/heads/master | /train.py | import os
import json
import gc
import cv2
import keras
from keras import backend as K
from keras import layers
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model,load_model
from keras.layers import Input
from keras.layers.convolutional import Conv2D,Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.optimizers import Adam
from keras.callbacks import Callback,ModelCheckpoint
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from logging import StreamHandler,DEBUG,Formatter,FileHandler,getLogger
from generator import create_test_gen,DataGenerator
from mask2rle import build_rles,build_masks
from model import build_model
from loss_function import dice_coef
logger=getLogger(__name__)
DIR='../result/'
def load_img(code,base,resize=True):
path=f'{base}/{code}'
img=cv2.imread(path)
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
if resize:
img=cv2.resize(img,(256,256))
return img
def validate_path(path):
if notos.path.exits(path):
os.makedirs(path)
if __name__=='__main__':
log_fmt=Formatter('%(asctime)s %(name)s %(lineno)d [%(levelname)s] [%(funcName)s] %(message)s')
handler=StreamHandler()
handler.setLevel('INFO')
handler.setFormatter(log_fmt)
logger.addHandler(handler)
handler=FileHandler(DIR+'train.py.log','a')
handler.setLevel(DEBUG)
handler.setFormatter(log_fmt)
logger.setLevel(DEBUG)
logger.addHandler(handler)
logger.info('start')
logger.info('preprocessing')
train_df=pd.read_csv('../input/train.csv')
train_df['ImageId']=train_df['ImageId_ClassId'].apply(lambda x:x.split('_')[0])
train_df['ClassId']=train_df['ImageId_ClassId'].apply(lambda x:x.split('_')[1])
train_df['hasMask']=~train_df['EncodedPixels'].isna()
logger.info('train_df shape:{}'.format(train_df.shape))
mask_count_df=train_df.groupby('ImageId').agg(np.sum).reset_index()
mask_count_df.sort_values('hasMask',ascending=False,inplace=True)
logger.info('mask_count_df shape:{}'.format(mask_count_df.shape))
sub_df=pd.read_csv('../input/sample_submission.csv')
sub_df['ImageId']=sub_df['ImageId_ClassId'].apply(lambda x:x.split('_')[0])
test_imgs=pd.DataFrame(sub_df['ImageId'].unique(),columns=['ImageId'])
logger.info('test_imgs shape:{}'.format(test_imgs.shape))
non_missing_train_idx=mask_count_df[mask_count_df['hasMask']>0]
logger.info('non_missing_train_idx shape:{}'.format(non_missing_train_idx.shape))
logger.info('remove test images without defects')
test_gen=create_test_gen(test_df=test_imgs)
remove_model=load_model('../output/model.h5')
remove_model.summary()
test_missing_pred=remove_model.predict_generator(
test_gen,steps=len(test_gen),verbose=1)
test_imgs['allMissing']=test_missing_pred
logger.info('test_imgs',test_imgs.head())
filtered_test_imgs=test_imgs[test_imgs['allMissing']<0.5]
logger.info('filtered_test_imgs shape:{}'.format(filtered_test_imgs.shape))
filtered_mask=sub_df['ImageId'].isin(filtered_test_imgs['ImageId'].values)
filtered_sub_df=sub_df[filtered_mask].copy()
null_sub_df=sub_df[~filtered_mask].copy()
null_sub_df['EncodedPixels']=null_sub_df['EncodedPixels'].apply(lambda x:' ')
filtered_sub_df.reset_index(drop=True,inplace=True)
filtered_test_imgs.reset_index(drop=True,inplace=True)
logger.info('filtered_sub_df shape:{}'.format(filtered_sub_df.shape))
logger.info('filtered_test_imgs shape:{}'.format(filtered_test_imgs.shape))
BATCH_SIZE=16
train_idx,val_idx=train_test_split(non_missing_train_idx.index,random_state=2019,test_size=0.15)
train_generator=DataGenerator(train_idx,df=mask_count_df,target_df=train_df,batch_size=BATCH_SIZE,n_classes=4)
val_generator=DataGenerator(val_idx,df=mask_count_df,target_df=train_df,batch_size=BATCH_SIZE,n_classes=4)
model=build_model((256,1600,1))
model.summary()
checkpoint=ModelCheckpoint('../output/model-unet.h5',monitor='val_loss',verbose=0,save_best_only=True,save_weights_only=False,mode='auto')
history=model.fit_generator(train_generator,validation_data=val_generator,callbacks=[checkpoint],use_multiprocessing=False,workers=1,epochs=10)
| {"/train_unet.py": ["/generator.py"], "/generator.py": ["/mask2rle.py"], "/train.py": ["/generator.py", "/mask2rle.py"]} |
73,410 | d0p3t/TwitchChatBot | refs/heads/master | /main.py | from parser import Parser
print("===============================================")
print("TWITCH CHAT BOT - SENTENCE GENERATOR BASED ON TWITCH CHAT DATASET")
print("VERSION 0.0.1")
print("===============================================\n")
DATA_PARSER = Parser(key='TWITCHCHAT:CLEAN', batch_size=64, seq_length=32)
| {"/main.py": ["/parser.py"]} |
73,411 | d0p3t/TwitchChatBot | refs/heads/master | /parser.py | import collections
import os
import re
import redis
import numpy as np
from six.moves import cPickle
REDIS = redis.StrictRedis(host='localhost', port=6379, db=0, charset='utf-8', decode_responses=True)
class Parser():
def __init__(self, key, data_dir='datasets', batch_size=64, seq_length=32):
self.redis = REDIS
self.key = key
self.data_dir = data_dir
self.batch_size = batch_size
self.seq_length = seq_length
vocab_file = os.path.join(data_dir, "vocab.pkl")
tensor_file = os.path.join(data_dir, "data.npy")
count = self.redis.scard(self.key)
if count is None:
print("PARSER - NO CHAT MESSAGES FOUND!")
else:
print(f"PARSER - TOTAL CHAT MESSAGES: {count}")
if (os.path.exists(vocab_file) and os.path.exists(tensor_file)) is False:
self.preprocess(key=key, vocab_file=vocab_file, tensor_file=tensor_file)
else:
self.load_preprocessed(vocab_file=vocab_file, tensor_file=tensor_file)
self.create_batches()
self.reset_batch_pointer()
print("PARSER - READY TO TRAIN")
def preprocess(self, key, vocab_file, tensor_file):
print("PARSER - PREPROCESSING DATA...")
messages = self.redis.smembers(key)
data = ""
for message in messages:
data += " " + message
clean_data = self.clean_str(string=data)
x_text = clean_data.split()
self.vocab, self.chars = self.build_vocab(messages=x_text)
self.vocab_size = len(self.chars)
with open(vocab_file, 'wb') as filename:
cPickle.dump(self.chars, filename)
self.tensor = np.array(list(map(self.vocab.get, x_text)))
np.save(tensor_file, self.tensor)
print(f"PARSER - FOUND {self.vocab_size} UNIQUE WORDS")
print("PARSER - PREPROCESSING DONE")
def load_preprocessed(self, vocab_file, tensor_file):
print("PARSER - LOADING PREPROCESSED DATA...")
with open(vocab_file, 'rb') as filename:
self.words = cPickle.load(filename)
self.vocab_size = len(self.words)
self.vocab = dict(zip(self.words, range(len(self.words))))
self.tensor = np.load(tensor_file)
self.num_batches = int(self.tensor.size / (self.batch_size * self.seq_length))
print(f"PARSER - FOUND {self.vocab_size} UNIQUE WORDS")
print("PARSER - LOADED PREPROCESSED DATA")
def build_vocab(self, messages):
print("PARSER - BUILDING VOCABULARY...")
word_counts = collections.Counter(messages)
vocabulary_inv = [x[0] for x in word_counts.most_common()]
vocabulary_inv = list(sorted(vocabulary_inv))
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
print("PARSER - BUILDING VOCABULARY DONE")
return [vocabulary, vocabulary_inv]
def create_batches(self):
print("PARSER - CREATING BATCHES...")
self.num_batches = int(self.tensor.size / (self.batch_size *
self.seq_length))
if self.num_batches == 0:
assert False, "PARSER - NOT ENOUGH DATA. MAKE SEQ_LENGTH AND BATCH_SIZE SMALLER!"
self.tensor = self.tensor[:self.num_batches *
self.batch_size * self.seq_length]
xdata = self.tensor
ydata = np.copy(self.tensor)
ydata[:-1] = xdata[1:]
ydata[-1] = xdata[0]
self.x_batches = np.split(xdata.reshape(
self.batch_size, -1), self.num_batches, 1)
self.y_batches = np.split(ydata.reshape(
self.batch_size, -1), self.num_batches, 1)
print("PARSER - CREATING BATCHES DONE")
def next_batch(self):
x, y = self.x_batches[self.pointer], self.y_batches[self.pointer]
self.pointer += 1
return x, y
def reset_batch_pointer(self):
self.pointer = 0
def clean_str(self, string):
string = re.sub(
r"(http:\/\/www\.|https:\/\/www\.|http:\/\/|https:\/\/)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)", " ", string)
string = re.sub(r"[^가-힣A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(u'[\u3131-\ucb4c]', " ", string) # Korean Hangul
string = re.sub(u'[\u1100-\u11ff]', " ", string) # Korean Hangul
string = re.sub(u'[\uac00-\ud7a3]', " ", string) # Korean Hangul
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
| {"/main.py": ["/parser.py"]} |
73,412 | d0p3t/TwitchChatBot | refs/heads/master | /utils.py | import re
def clean_str(string):
string = re.sub(
r"(http:\/\/www\.|https:\/\/www\.|http:\/\/|https:\/\/)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)", " ", string)
string = re.sub(r"[^가-힣A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\.", " . ", string)
string = re.sub(r"\,", " , ", string)
string = re.sub(r"\!", " ! ", string)
string = re.sub(r"\(", " ( ", string)
string = re.sub(r"\)", " ) ", string)
string = re.sub(r"\?", " ? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string
def clean_prediction(string):
string = re.sub(r"\.", ". ", string)
string = re.sub(r"\,", ", ", string)
string = re.sub(r"\!", "! ", string)
string = re.sub(r"\(", " (", string)
string = re.sub(r"\)", " )", string)
string = re.sub(r"\?", "? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string
| {"/main.py": ["/parser.py"]} |
73,413 | d0p3t/TwitchChatBot | refs/heads/master | /bot.py | from __future__ import print_function
import irc.bot
import requests
import tensorflow as tf
import os
import re
import json
import random
from six.moves import cPickle
from model import Model
class TwitchBot(irc.bot.SingleServerIRCBot):
def __init__(self, username, client_id, token, channel):
self.client_id = client_id
self.token = token
self.channel = '#' + channel
self.msg_count = 0
# Get the channel id, we will need this for v5 API calls
url = 'https://api.twitch.tv/kraken/users?login=' + channel
headers = {'Client-ID': client_id,
'Accept': 'application/vnd.twitchtv.v5+json'}
r = requests.get(url, headers=headers).json()
self.channel_id = r['users'][0]['_id']
# Create IRC bot connection
server = 'irc.chat.twitch.tv'
port = 6667
print(f'Connecting to {server} on port {port}...')
irc.bot.SingleServerIRCBot.__init__(
self, [(server, port, token)], username, username)
with open(os.path.join('datasets', 'config.pkl'), 'rb') as f:
saved_args = cPickle.load(f)
with open(os.path.join('datasets', 'vocab.pkl'), 'rb') as f:
self.words, self.vocab = cPickle.load(f)
self.model = Model(saved_args, True)
self.twitch_emotes = self.js_r('twitch_global_emotes.json')
self.custom_emotes = self.js_r('twitch_custom_emotes.json')
def on_welcome(self, c, e):
print(f'Joining {self.channel}')
# You must request specific capabilities before you can use them
c.cap('REQ', ':twitch.tv/membership')
c.cap('REQ', ':twitch.tv/tags')
c.cap('REQ', ':twitch.tv/commands')
c.join(self.channel)
def on_pubmsg(self, c, e):
self.msg_count += 1
if self.msg_count % 20 == 0:
self.do_predict(e)
if e.arguments[0][:1] == '!':
cmd = e.arguments[0].split(' ')[0][1:]
print(f'Received command: {cmd}')
self.do_command(e, cmd)
return
def js_r(self, filename):
with open(filename) as f_in:
return(json.load(f_in))
def clean_str(self, string):
string = re.sub(r" \(", " ", string)
string = re.sub(r" \)", " ", string)
string = re.sub(r" \\\?", "? ", string)
string = re.sub(r" 's", "'s", string)
string = re.sub(r" 've", "'ve", string)
string = re.sub(r" 't", "n't", string)
string = re.sub(r" 're", "'re", string)
string = re.sub(r" 'd", "'d", string)
string = re.sub(r" 'll", "'ll", string)
string = re.sub(r" n't", "n't", string)
string = re.sub(r" , ", ", ", string)
# string = re.sub(r" . ", ". ", string)
# string = re.sub(r" !", "! ", string)
string = re.sub(r"\s{2,}", " ", string)
return string
def do_predict(self, e):
c = self.connection
with tf.Session() as sess:
tf.global_variables_initializer().run()
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state('datasets')
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
output_length = random.randint(3, 10)
output = self.model.sample(
sess, self.words, self.vocab, output_length, ' ', 1, 1, 4)
print(output)
for word in output.split():
for emote in self.twitch_emotes:
if emote.lower() == word:
output = str.replace(output, word, emote)
for emote in self.custom_emotes:
if emote.lower() == word:
output = str.replace(output, word, emote)
final_output = self.clean_str(output)
c.privmsg(self.channel, final_output)
def do_command(self, e, cmd):
c = self.connection
if cmd == "chatbot":
message = "/me Using 900,000 chat messages, this chatbot has been trained to emulate a chat user. The model is a RNN trained word-by-word."
c.privmsg(self.channel, message)
def main():
username = "d0p3tbot"
client_id = "saxy6rk8qyaj31s5h2sxkujauwsr7c"
token = "oauth:k31tvibwb9i8fquqcctxos4wdj81td"
channel = "serpent_ai"
bot = TwitchBot(username, client_id, token, channel)
bot.start()
if __name__ == "__main__":
main()
| {"/main.py": ["/parser.py"]} |
73,417 | bernardpazio/artifact | refs/heads/master | /tests.py | import unittest
from adc import DeckEncoder, DeckDecoder
class ADC(unittest.TestCase):
deck = {'heroes': [{'card_id': 4005, 'turn': 2}, {'card_id': 10014, 'turn': 1}, {'card_id': 10017, 'turn': 3},
{'card_id': 10026, 'turn': 1}, {'card_id': 10047, 'turn': 1}],
'cards': [{'card_id': 3000, 'count': 2}, {'card_id': 3001, 'count': 1}, {'card_id': 10091, 'count': 3},
{'card_id': 10102, 'count': 3}, {'card_id': 10128, 'count': 3}, {'card_id': 10165, 'count': 3},
{'card_id': 10168, 'count': 3}, {'card_id': 10169, 'count': 3}, {'card_id': 10185, 'count': 3},
{'card_id': 10223, 'count': 1}, {'card_id': 10234, 'count': 3}, {'card_id': 10260, 'count': 1},
{'card_id': 10263, 'count': 1}, {'card_id': 10322, 'count': 3}, {'card_id': 10354, 'count': 3}],
'name': 'Green/Black Example'}
code = 'ADCJWkTZX05uwGDCRV4XQGy3QGLmqUBg4GQJgGLGgO7AaABR3JlZW4vQmxhY2sgRXhhbXBsZQ__'
def test_encoder(self):
encoded_deck = DeckEncoder.encode(self.deck)
assert self.code == encoded_deck
def test_decoder(self):
decoded_deck = DeckDecoder.decode(self.code)
assert decoded_deck == self.deck
| {"/benchmark.py": ["/adc_py.py"]} |
73,418 | bernardpazio/artifact | refs/heads/master | /setup.py | from distutils.core import setup, Extension
from distutils.command.sdist import sdist as _sdist
try:
from Cython.Distutils import build_ext
except ImportError:
use_cython = False
else:
use_cython = True
cmdclass = {}
ext_modules = []
if use_cython:
ext_modules += [
Extension("artifact.adc", ["adc.pyx"]),
Extension("artifact.cards", ['cards.pyx'])
]
cmdclass['build_ext'] = build_ext
class sdist(_sdist):
def run(self):
from Cython.Build import cythonize
cythonize(['adc.pyx', 'cards.pyx'])
_sdist.run(self)
cmdclass['sdist'] = sdist
else:
ext_modules += [
Extension("artifact.adc", ["adc.c"]),
Extension("artifact.card", ["card.c"])
]
setup(
cmdclass=cmdclass,
ext_modules=ext_modules,
name='artifact',
version='0.1',
description="A wrapper for Artifacts Card API with a python implementation of their deck code encoder/decoder",
url='https://github.com/bernardpazio/artifact',
author='Bernard Pazio',
author_email='bernardpazio@gmail.com',
install_requires=['requests']
) | {"/benchmark.py": ["/adc_py.py"]} |
73,419 | bernardpazio/artifact | refs/heads/master | /adc_py.py | import base64
class InvalidDeckException(Exception):
def __init__(self, deck, *args, **kwargs):
super(InvalidDeckException, self).__init__(*args, **kwargs)
self.deck = deck
class DeckEncodingException(Exception):
pass
class DeckDecodingException(Exception):
def __init__(self, deck_code, *args, **kwargs):
super(DeckDecodingException, self).__init__(*args, **kwargs)
self.deck_code = deck_code
class DeckEncoder:
version = 2
prefix = 'ADC'
header_size = 3
@staticmethod
def encode(deck: dict):
if not DeckEncoder._is_valid_deck(deck):
raise InvalidDeckException(deck)
heroes = sorted(deck['heroes'], key=lambda c: c['card_id'])
cards = sorted(deck['cards'], key=lambda c: c['card_id'])
buffer = bytearray()
version = DeckEncoder.version << 4 | DeckEncoder._extract_bits_with_carry(len(heroes), 3)
buffer.append(version)
dummy_checksum = 0
checksum_position = len(buffer)
buffer.append(dummy_checksum)
name = bytes(deck.get('name', ''), 'utf-8')
if len(name) > 63:
name = name[:63]
buffer.append(len(name))
DeckEncoder._add_remaining_to_buffer(len(heroes), 3, buffer)
last_card_id = 0
for hero in heroes:
DeckEncoder._add_card_to_buffer(hero['turn'], hero['card_id'] - last_card_id, buffer)
last_card_id = hero['card_id']
last_card_id = 0
for card in cards:
DeckEncoder._add_card_to_buffer(card['count'], card['card_id'] - last_card_id, buffer)
last_card_id = card['card_id']
full_checksum = DeckEncoder._compute_checksum(bytes(buffer[DeckEncoder.header_size:]))
small_checksum = full_checksum & 0x0FF
buffer[checksum_position] = small_checksum
buffer += name
deck_code = DeckEncoder.prefix + base64.b64encode(buffer).decode('utf-8')
deck_code = deck_code.replace('/', '-').replace('=', '_')
return deck_code
@staticmethod
def _compute_checksum(bytes_buffer: bytes):
checksum = 0
for b in bytes_buffer:
checksum += b
return checksum
@staticmethod
def _add_card_to_buffer(count: int, value: int, buffer: bytearray):
max_count_bits = 0x03
extended_count = (count - 1) >= max_count_bits
first_byte_count = max_count_bits if extended_count else (count - 1)
first_byte = first_byte_count << 6
first_byte |= DeckEncoder._extract_bits_with_carry(value, 5)
buffer.append(first_byte)
DeckEncoder._add_remaining_to_buffer(value, 5, buffer)
if extended_count:
DeckEncoder._add_remaining_to_buffer(count, 0, buffer)
@staticmethod
def _add_remaining_to_buffer(value: int, pos: int, buffer: bytearray):
value >>= pos
while value > 0:
next_byte = DeckEncoder._extract_bits_with_carry(value, 7)
buffer.append(next_byte)
value >>= 7
@staticmethod
def _extract_bits_with_carry(value: int, num_bits: int):
limit = 1 << num_bits
result = value & (limit - 1)
if value >= limit:
result |= limit
return result
@staticmethod
def _is_valid_deck(deck: dict):
if 'heroes' not in deck or 'cards' not in deck:
return False
heroes = deck['heroes']
cards = deck['cards']
if len(heroes) != 5:
return False
turns = [0, 0, 0]
for hero in heroes:
if 'turn' not in hero or 'card_id' not in hero:
return False
turn = hero['turn'] - 1
if turn < 0 or turn > 2:
return False
turns[turn] += 1
if turns[0] != 3 or turns[1] != 1 or turns[2] != 1:
return False
for card in cards:
if 'count' not in card or 'card_id' not in card:
return False
return True
class DeckDecoder:
version = 2
prefix = 'ADC'
@staticmethod
def decode(deck_code: str):
deck_code_prefix = deck_code[:len(DeckDecoder.prefix)]
if deck_code_prefix != DeckDecoder.prefix:
msg = f'Invalid deck code prefix: Got ({deck_code_prefix}) Expected ({DeckDecoder.prefix})'
raise DeckDecodingException(deck_code, msg)
deck_code_no_prefix = deck_code[len(DeckDecoder.prefix):].replace('-', '/').replace('_', '=')
deck_code_bytes = base64.decodebytes(bytes(deck_code_no_prefix, 'utf-8'))
current_byte = 0
total_bytes = len(deck_code_bytes)
version_and_heroes = deck_code_bytes[current_byte]
current_byte += 1
version = version_and_heroes >> 4
if version != DeckDecoder.version and version != 1:
msg = f'Deck code version ({version}) and decoder version ({DeckDecoder.version}) mismatch'
raise DeckDecodingException(deck_code, msg)
checksum = deck_code_bytes[current_byte]
current_byte += 1
string_length = 0
if version > 1:
string_length = deck_code_bytes[current_byte]
current_byte += 1
total_card_bytes = total_bytes - string_length
computed_checksum = DeckEncoder._compute_checksum(deck_code_bytes[current_byte:total_card_bytes]) & 0x0FF
if checksum != computed_checksum:
msg = f'Checksum in deck code ({checksum}) does not match computed checksum ({computed_checksum})'
raise DeckDecodingException(deck_code, msg)
num_heroes, current_byte = DeckDecoder._read_int(version_and_heroes, 3, deck_code_bytes, current_byte,
total_card_bytes)
heroes = []
last_card_id = 0
for i in range(num_heroes):
card_id, turn, current_byte = DeckDecoder._read_serialized_card(deck_code_bytes, current_byte,
total_card_bytes, last_card_id)
last_card_id = card_id
heroes.append({'card_id': card_id, 'turn': turn})
cards = []
last_card_id = 0
while current_byte < total_card_bytes:
card_id, count, current_byte = DeckDecoder._read_serialized_card(deck_code_bytes, current_byte,
total_card_bytes, last_card_id)
last_card_id = card_id
cards.append({'card_id': card_id, 'count': count})
name = ''
if current_byte <= total_bytes:
name = deck_code_bytes[-string_length:].decode('utf-8')
return {'heroes': heroes, 'cards': cards, 'name': name}
@staticmethod
def _read_serialized_card(data, start, end, last_card_id):
if start > end:
raise Exception()
header = data[start]
start += 1
extended_count = (header >> 6) == 0x03
card_delta, start = DeckDecoder._read_int(header, 5, data, start, end)
card_id = last_card_id + card_delta
if extended_count:
count, start = DeckDecoder._read_int(0, 0, data, start, end)
else:
count = (header >> 6) + 1
return card_id, count, start
@staticmethod
def _read_int(base_value, base_bits, data, start, end):
out = 0
delta_shift = 0
out, cont = DeckDecoder._read_bits_chunk(base_value, base_bits, delta_shift, out)
if base_bits == 0 or cont:
delta_shift += base_bits
while True:
if start > end:
raise Exception()
next_byte = data[start]
start += 1
out, cont = DeckDecoder._read_bits_chunk(next_byte, 7, delta_shift, out)
if not cont:
break
delta_shift += 7
return out, start
@staticmethod
def _read_bits_chunk(chunk, num_bits, current_shift, out):
continue_bit = 1 << num_bits
new_bites = chunk & (continue_bit - 1)
out |= new_bites << current_shift
return out, (chunk & continue_bit) != 0
| {"/benchmark.py": ["/adc_py.py"]} |
73,420 | bernardpazio/artifact | refs/heads/master | /benchmark.py | import time
import datetime
import adc_py
import cards_py
import pyximport; pyximport.install()
import adc_c
import cards_c
from artifact import adc
from artifact import cards
import random
def run(name, cardlib, adclib):
start = time.time()
all_cards = cardlib.CardSet.load_card_set('00').cards + cardlib.CardSet.load_card_set('01').cards
hero_cards = [card for card in all_cards if card.card_type == 'Hero']
sig_cards = []
for hero_card in hero_cards:
for ref in hero_card.references:
if ref['ref_type'] == 'includes':
ref_card = None
for card in all_cards:
if card.card_id == ref['card_id']:
ref_card = card
sig_cards.append(ref_card)
item_cards = [card for card in all_cards if card.card_type == 'Item']
playable_cards = []
for card in all_cards:
if card not in sig_cards and card.card_type in ['Spell', 'Creep', 'Improvement']:
playable_cards.append(card)
load_time = time.time() - start
def deck_generator(count=100):
for i in range(count):
heroes = [random.choice(hero_cards) for i in range(5)]
items = [random.choice(item_cards) for i in range(random.randint(9, 18))]
main_deck = [random.choice(playable_cards) for i in range(random.randint(25, 50))]
yield cardlib.Deck(heroes, main_deck, items, str(i))
start = time.time()
runs = 1000
gen_start = time.time()
total_gen_time = 0
for deck in deck_generator(runs):
gen_end = time.time()
total_gen_time += gen_end - gen_start
deck_code_dict = deck.to_code_deck_dict()
deck_code_encode = adclib.DeckEncoder.encode(deck_code_dict)
deck_code_decode = adclib.DeckDecoder.decode(deck_code_encode)
new_deck = cardlib.Deck.from_code_deck_dict(deck_code_decode, all_cards)
gen_start = time.time()
end = time.time()
total_time = end - start
encode_decode_time = total_time - total_gen_time
print(f'{name} for {runs} runs:', datetime.timedelta(seconds=total_time))
print(f'gen_time: {total_gen_time: .3f}s | ed_time: {encode_decode_time: .3f}s | load_time: {load_time: .3f}s')
run('Pure Python', cards_py, adc_py)
run('Cython', cards_c, adc_c)
run('"Optimized" Cython', cards, adc)
| {"/benchmark.py": ["/adc_py.py"]} |
73,425 | mospina/paranuara | refs/heads/master | /companies/views.py | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .models import Company
from .serializers import CompanySerializer
class CompanyDetail(APIView):
"""
Retrieve a company
"""
def get_object(self, index):
try:
return Company.objects.get(index=index)
except Company.DoesNotExist:
raise status.HTTP_404_NOT_FOUND
def get(self, request, index, format=None):
company = self.get_object(index)
serializer = CompanySerializer(company)
return Response(serializer.data)
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,426 | mospina/paranuara | refs/heads/master | /companies/serializers.py | from rest_framework import serializers
from companies.models import Company
class CompanySerializer(serializers.ModelSerializer):
employees = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = Company
fields = ("id", "index", "name", "employees")
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,427 | mospina/paranuara | refs/heads/master | /companies/tests/test_views.py | import json
from django.test import TestCase
from rest_framework.test import APIClient
from rest_framework import status
from people.tests.factories import PersonFactory
from .factories import CompanyFactory
class CompanyViewTest(TestCase):
def set_data(self):
self.company1 = CompanyFactory(index=1, name="Company")
self.company2 = CompanyFactory(index=2, name="Corporation")
self.company3 = CompanyFactory(index=3, name="Enterprise")
self.person1 = PersonFactory(index=1, company=self.company1)
self.person2 = PersonFactory(index=2, company=self.company1)
self.person3 = PersonFactory(index=3, company=self.company1)
self.person4 = PersonFactory(index=4, company=self.company3)
def setUp(self):
self.client = APIClient()
self.base_url = "/v1/companies/{company_index}"
self.set_data()
def test_company_get_returns_200(self):
response = self.client.get(
self.base_url.format(company_index=self.company1.index)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response["content-type"], "application/json")
def test_company_get_returns_list_of_employees(self):
response = self.client.get(
self.base_url.format(company_index=self.company1.index)
)
self.assertEqual(json.loads(response.content)["employees"], [1, 2, 3])
def test_company_get_returns_empty_list(self):
response = self.client.get(
self.base_url.format(company_index=self.company2.index)
)
self.assertEqual(json.loads(response.content)["employees"], [])
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,428 | mospina/paranuara | refs/heads/master | /people/models.py | from collections import namedtuple
from django.db import models
from companies.models import Company
GENDER_CHOICES = (("M", "Male"), ("F", "Female"), ("N", "No response"))
KNOWN_FRUITS = ["orange", "banana", "strawberry", "apple"]
KNOWN_VEGETABLES = ["cucumber", "carrot", "celery", "beetroot"]
class Tag(models.Model):
label = models.CharField(max_length=64)
def __str__(self):
return self.label
class Fruit(models.Model):
name = models.CharField(max_length=32)
def __str__(self):
return self.name
class Vegetable(models.Model):
name = models.CharField(max_length=32)
def __str__(self):
return self.name
class Person(models.Model):
created_at = models.DateTimeField(auto_now_add=True, editable=False)
modified_at = models.DateTimeField(auto_now=True, editable=False)
_id = models.CharField(max_length=128, unique=True)
index = models.IntegerField(unique=True)
guid = models.CharField(max_length=128, unique=True) # UUI
has_died = models.BooleanField()
balance = models.DecimalField(max_digits=10, decimal_places=2)
picture = models.URLField()
age = models.IntegerField()
eyeColor = models.CharField(max_length=32)
name = models.CharField(max_length=128)
gender = models.CharField(max_length=2, choices=GENDER_CHOICES)
company = models.ForeignKey(
Company, related_name="employees", on_delete=models.SET_NULL, null=True
)
email = models.EmailField()
phone = models.CharField(max_length=128)
address = models.CharField(max_length=256)
about = models.TextField()
registered = models.DateTimeField()
tags = models.ManyToManyField(Tag)
friends = models.ManyToManyField("self")
greeting = models.CharField(max_length=256)
favouriteFruits = models.ManyToManyField(Fruit)
favouriteVegetables = models.ManyToManyField(Vegetable)
def __str__(self):
return self.name
People = namedtuple("People", ["friends", "people"])
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,429 | mospina/paranuara | refs/heads/master | /companies/urls.py | from django.urls import path
from companies import views
urlpatterns = [path("<int:index>", views.CompanyDetail.as_view())]
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,430 | mospina/paranuara | refs/heads/master | /people/views.py | from django.shortcuts import redirect
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .models import Person, People
from .serializers import PersonSerializer, PeopleSerializer
class PersonDetail(APIView):
"""
Retrieve a single person
"""
def get_object(self, index):
try:
return Person.objects.get(index=index)
except Person.DoesNotExist:
raise status.HTTP_404_NOT_FOUND
def get(self, request, index, format=None):
people = self.get_object(index)
serializer = PersonSerializer(people)
return Response(serializer.data)
class PeopleList(APIView):
"""
Retrieve two or more people
"""
def get(self, request, format=None):
indexes = request.query_params.getlist("index")
if len(indexes) == 1:
return redirect(PersonDetail.as_view(indexes[0]))
if indexes:
people = Person.objects.filter(index__in=indexes)
else:
people = Person.objects.all()
friends = [
f
for f in self.get_common_friends(people)
if f.eyeColor == "brown" and not f.has_died
]
data = People(friends=friends, people=people)
serializer = PeopleSerializer(data)
return Response(serializer.data)
@classmethod
def get_common_friends(cls, people):
"""
Given two list of friends, return a list of common friends. That's it
friends that are in both lists
[person] -> [person] -> [person]
Cross product
-------------
"""
friends = people[0].friends.all()
for person in people:
common_friends = filter(lambda x: x in person.friends.all(), friends)
friends = list(common_friends)
return friends
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,431 | mospina/paranuara | refs/heads/master | /people/management/commands/load_people.py | from django.core.management.base import BaseCommand, CommandError
from people.loader import load_data_from_file
class Command(BaseCommand):
help = "Read data from a json file and store it in the database"
def add_arguments(self, parser):
parser.add_argument("filenames", nargs="+", type=str)
def handle(self, *args, **options):
for filename in options["filenames"]:
load_data_from_file(filename)
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,432 | mospina/paranuara | refs/heads/master | /core/tests.py | from io import StringIO
from unittest import skip
from django.test import TestCase
from django.core.management import call_command
from people.models import Person
class LoadDataTest(TestCase):
def setUp(self):
self.command = "load_data"
self.companies = "resources/companies.json"
self.people = "resources/people.json"
@skip("Test take too long to run")
def test_load_data(self):
out = StringIO()
before_count = Person.objects.count()
call_command(
self.command, companies=[self.companies], people=[self.people], stdout=out
)
after_count = Person.objects.count()
self.assertGreater(after_count, before_count)
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,433 | mospina/paranuara | refs/heads/master | /people/loader.py | import json
from decimal import Decimal
import re
from datetime import datetime, timedelta
import logging
from companies.models import Company
from people.models import Tag, Fruit, Vegetable, Person
from people.models import KNOWN_FRUITS, KNOWN_VEGETABLES
logger = logging.getLogger(__name__)
def load_data_from_file(file_path):
with open(file_path, newline="") as fh:
json_data = json.load(fh)
fh.close()
for entry in json_data:
get_person(entry, json_data)
def get_person(person, data):
if not person:
return None
fruits, vegetables, _unknown = split_favourite_food(person["favouriteFood"])
obj, _ = Person.objects.get_or_create(
index=person["index"],
defaults={
"_id": person["_id"],
"guid": person["guid"],
"has_died": person["has_died"],
"balance": currency_to_decimal(person["balance"]),
"picture": person["picture"],
"age": person["age"],
"eyeColor": person["eyeColor"],
"name": person["name"],
"gender": get_gender(person["gender"]),
"email": person["email"],
"phone": person["phone"],
"address": person["address"],
"about": person["about"],
"registered": format_date(person["registered"]),
"greeting": person["greeting"],
},
)
obj.tags.add(*get_tags(person["tags"]))
# favouriteFood
obj.favouriteFruits.add(*get_fruits(fruits))
obj.favouriteVegetables.add(*get_vegetables(vegetables))
# friends
friends = get_friends(
[p for p in person["friends"] if p["index"] != person["index"]], data
)
for friend in friends:
if friend:
obj.friends.add(friend)
# company
company = get_company(person["company_id"])
if company:
obj.company = company
obj.save()
return obj
def get_friends(friends, data):
"""
Given a list of friends' ids return a list of Person Objects
[{index: int}], data -> [Person]
- index: is the unique identifier for a person given in the data
- data: is the list of people in json format
"""
if not friends:
return []
head, tail = friends[0], friends[1:]
try:
obj = Person.objects.get(index=head["index"])
except Person.DoesNotExist:
friend = find_friend(head["index"], data)
return [get_person(friend, data)] + get_friends(tail, data)
else:
return [obj] + get_friends(tail, data)
def find_friend(index, data):
"""
Return person entry with id index from the data.
number, data -> person
index: a integer representing the unique identifier of the person
data: a json representation of the a list of people
person: a json representation of a person
"""
person = [p for p in data if p["index"] == index]
return person[0] if person else {}
def get_company(company_index):
"""
Try to search for a matching company in the RDB.
Return object on success or raise an error.
"""
try:
obj = Company.objects.get(index=company_index)
except Company.DoesNotExist:
logger.warning("Company {} doesn't exist".format(company_index))
return None
# raise Company.DoesNotExist("{} doesn't exist. Load companies first.".format(company_index))
else:
return obj
def get_fruits(fruits):
"""
Given a list of fruit names return a list of Fruit Objects
[str] -> [Fruit]
"""
objs = [Fruit.objects.get_or_create(name=f) for f in fruits]
return [f for f, _ in objs]
def get_vegetables(vegetables):
"""
Given a list of vegetable names return a list of Vegetable Objects
[str] -> [Fruit]
"""
objs = [Vegetable.objects.get_or_create(name=v) for v in vegetables]
return [v for v, _ in objs]
def get_tags(tags):
"""
Given a list of tags return a list of Tag Objects
[str] -> [Tag]
"""
objs = [Tag.objects.get_or_create(label=t) for t in tags]
return [t for t, _ in objs]
def currency_to_decimal(currency):
"""
Given a string representing a currency, return a decimal.
[Currency] -> decimal
where Currency is a string of the format '$ number
"""
if not currency:
return Decimal(0)
regex = re.compile(r"\$(?P<quantity>[\d,.]+)")
match = regex.match(currency)
if not match:
return Decimal(0)
value = match.group("quantity").replace(",", "")
return Decimal(float(value))
def format_date(date):
"""
Given a date in an unsupported format, return a django supported string
string -> string
"""
regex = re.compile(
r"(?P<Y>\d+)-(?P<m>\d+)-(?P<d>\d+)T(?P<H>\d+):(?P<M>\d+):(?P<S>\d+)\s*-(?P<OH>\d+):(?P<OM>\d+)"
)
match = regex.match(date)
if not match:
dt = datetime.now()
else:
dt = datetime(
int(match.group("Y")),
int(match.group("m")),
int(match.group("d")),
int(match.group("H")),
int(match.group("M")),
int(match.group("S")),
) - timedelta(hours=int(match.group("OH")), minutes=int(match.group("OM")))
return dt.strftime("%Y-%m-%d %H:%M:%S")
def split_favourite_food(
list_of_food, known_fruits=KNOWN_FRUITS, known_vegetables=KNOWN_VEGETABLES
):
"""
Given a list of food, it split it into fruits, vegetables and unknown.
([list_of_food],
[known_fruits],
[known_vegetables]) -> ([fruits], [vegetables], [unknown])
where:
list_of_food = List of strings representing food to be split
known_fruits = list of strings representing fruits that are known
known_vegetables = list of strings representing vegetables that are known
Return a tuple that include a list of fruits, a list of vegetables and
a list of unknown food.
"""
fruits = []
vegetables = []
unknown = []
for i in list_of_food:
if i in known_fruits:
fruits.append(i)
elif i in known_vegetables:
vegetables.append(i)
else:
unknown.append(i)
return (fruits, vegetables, unknown)
def get_gender(gender):
gender = gender.capitalize()
if gender in ("M", "Male"):
return "M"
if gender in ("F", "Female"):
return "F"
return "N"
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,434 | mospina/paranuara | refs/heads/master | /people/tests/test_loader.py | from unittest import skip
import json
from decimal import Decimal
from django.test import TestCase
from companies.models import Company
from people import loader
from people.models import Person
from people.models import Fruit, Vegetable, Tag
PEOPLE = """
[
{
"_id": "595eeb9b96d80a5bc7afb106",
"index": 0,
"guid": "5e71dc5d-61c0-4f3b-8b92-d77310c7fa43",
"has_died": true,
"balance": "$2,418.59",
"picture": "http://placehold.it/32x32",
"age": 61,
"eyeColor": "blue",
"name": "Carmella Lambert",
"gender": "female",
"company_id": 1,
"email": "carmellalambert@earthmark.com",
"phone": "+1 (910) 567-3630",
"address": "628 Sumner Place, Sperryville, American Samoa, 9819",
"about": "Non duis dolore ad enim. Est id reprehenderit cupidatat tempor excepteur.",
"registered": "2016-07-13T12:29:07 -10:00",
"tags": [
"id",
"quis",
"ullamco",
"consequat",
"laborum",
"sint",
"velit"
],
"friends": [
{
"index": 0
},
{
"index": 1
},
{
"index": 2
}
],
"greeting": "Hello, Carmella Lambert! You have 6 unread messages.",
"favouriteFood": [
"orange",
"apple",
"banana",
"strawberry"
]
},
{
"_id": "595eeb9b1e0d8942524c98ad",
"index": 1,
"guid": "b057bb65-e335-450e-b6d2-d4cc859ff6cc",
"has_died": false,
"balance": "$1,562.58",
"picture": "http://placehold.it/32x32",
"age": 60,
"eyeColor": "brown",
"name": "Decker Mckenzie",
"gender": "male",
"company_id": 2,
"email": "deckermckenzie@earthmark.com",
"phone": "+1 (893) 587-3311",
"address": "492 Stockton Street, Lawrence, Guam, 4854",
"about": "Consectetur aute consectetur dolor aliquip dolor sit id.",
"registered": "2017-06-25T10:03:49 -10:00",
"tags": [
"veniam",
"irure",
"mollit",
"sunt",
"amet",
"fugiat",
"ex"
],
"friends": [
{
"index": 0
},
{
"index": 1
},
{
"index": 2
},
{
"index": 5
}
],
"greeting": "Hello, Decker Mckenzie! You have 2 unread messages.",
"favouriteFood": [
"cucumber",
"beetroot",
"carrot",
"celery"
]
},
{
"_id": "595eeb9bb3821d9982ea44f9",
"index": 2,
"guid": "49c04b8d-0a96-4319-b310-d6aa8269adca",
"has_died": false,
"balance": "$2,119.44",
"picture": "http://placehold.it/32x32",
"age": 54,
"eyeColor": "blue",
"name": "Bonnie Bass",
"gender": "female",
"company_id": 1,
"email": "bonniebass@earthmark.com",
"phone": "+1 (823) 428-3710",
"address": "455 Dictum Court, Nadine, Mississippi, 6499",
"about": "Non voluptate reprehenderit ad elit veniam nulla ut ea ex.",
"registered": "2017-06-08T04:23:18 -10:00",
"tags": [
"quis",
"sunt",
"sit",
"aliquip",
"pariatur",
"quis",
"nulla"
],
"friends": [
{
"index": 0
},
{
"index": 1
},
{
"index": 2
}
],
"greeting": "Hello, Bonnie Bass! You have 10 unread messages.",
"favouriteFood": [
"orange",
"beetroot",
"banana",
"strawberry"
]
},
{
"_id": "595zzza9bb3821d9982ea44f9",
"index":3,
"guid": "49z04b8z-0z96-4319-b310-d6aa8269adca",
"has_died": false,
"balance": "$2,119.44",
"picture": "http://placehold.it/32x32",
"age": 54,
"eyeColor": "blue",
"name": "Bronnie Brass",
"gender": "female",
"company_id": 0,
"email": "bronniebrass@earthmark.com",
"phone": "+1 (823) 428-3710",
"address": "455 Dictum Court, Nadine, Mississippi, 6499",
"about": "Non voluptate reprehenderit ad elit veniam nulla ut ea ex.",
"registered": "2017-06-08T04:23:18 -10:00",
"tags": [
"quis",
"sunt",
"sit",
"aliquip",
"pariatur",
"quis",
"nulla"
],
"friends": [],
"greeting": "Hello, Bronnie Brass! You have 10 unread messages.",
"favouriteFood": [
"orange",
"beetroot",
"banana",
"strawberry"
]
}
]
"""
class TestLoader(TestCase):
def test_split_favourite_food(self):
favourite_food = ["cucumber", "beetroot", "strawberry", "cookie"]
(fruits, vegetables, unknown) = loader.split_favourite_food(favourite_food)
self.assertIn("cucumber", vegetables)
self.assertIn("beetroot", vegetables)
self.assertIn("strawberry", fruits)
self.assertIn("cookie", unknown)
def test_get_fruits(self):
fruits = ["apple", "strawberry"]
objs = loader.get_fruits(fruits)
self.assertTrue(len(objs) > 0)
for f in objs:
self.assertIsInstance(f, Fruit)
def test_get_vegetables(self):
vegetables = ["cucumber", "beetroot"]
objs = loader.get_vegetables(vegetables)
self.assertTrue(len(objs) > 0)
for f in objs:
self.assertIsInstance(f, Vegetable)
def test_get_tags(self):
tags = ["cupidatat", "id", "anim", "tempor"]
objs = loader.get_tags(tags)
self.assertTrue(len(objs) > 0)
for o in objs:
self.assertIsInstance(o, Tag)
def test_get_company(self):
company = "5"
# Return None when company doesn't exist
self.assertIsNone(loader.get_company(company))
Company.objects.create(index=5)
obj = loader.get_company(company)
self.assertIsInstance(obj, Company)
def test_currency_to_decimal(self):
currency = "$2,418.59"
# Empty string returns 0
self.assertEqual(0, loader.currency_to_decimal(""))
# Wrong representation of currency returns 0
self.assertEqual(0, loader.currency_to_decimal("AUD2345"))
# String representing currency return decimal values
result = loader.currency_to_decimal(currency)
self.assertIsInstance(result, Decimal)
self.assertEqual(Decimal(2418.59), result)
def test_format_date(self):
input_date = "2016-07-13T12:29:07 -10:00"
output_date = "2016-07-13 02:29:07"
self.assertEqual(output_date, loader.format_date(input_date))
def test_get_person(self):
Company.objects.create(index=0, name="NETBOOK")
Company.objects.create(index=1, name="NETBOOK")
Company.objects.create(index=2, name="NETBOOK")
data = json.loads(PEOPLE)
# Return None when person is empty
self.assertIsNone(loader.get_person({}, data))
# Return a Person object on success
self.assertIsInstance(loader.get_person(data[0], data), Person)
@skip
def test_get_friends(self):
"""
Person is {
index: int,
friends: [Person]
}
def fn_for_person(person):
... person['index'],
fn_for_lop(person['friends'])
def fn_for_lop(people):
if not people:
...
else:
... fn_for_person(people[0]), fn_for_lop(people[1:])
----
json_data = data
def fn_for_person(person):
# Prerequisite: person must contain all raw data
obj = cls.objects.get_or_create(
person['index']
)
obj.add(*fn_for_lop(person['friends']))
return obj
def fn_for_lop(people):
if not people:
return []
else:
if obj = cls.objects.get('index'=people[0]['index']):
return [obj] + fn_for_lop(people[1:])
else:
return [fn_for_person(find_person(people[0], json_data))] + fn_for_lop(people[1:])
"""
person0 = {"index": 0, "friends": [{"index": 1}]}
person1 = {"index": 1, "friends": [{"index": 2}, {"index": 0}]}
person2 = {"index": 2, "friends": [{"index": 1}]}
people = [person0, person1, person2]
objs = Person.get_friends(person0["friends"], people)
self.assertTrue(len(objs) > 0)
for o in objs:
self.assertIsInstance(o, Person)
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,435 | mospina/paranuara | refs/heads/master | /people/migrations/0002_auto_20191206_0030.py | # Generated by Django 2.2.7 on 2019-12-06 00:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("people", "0001_initial")]
operations = [
migrations.AlterField(
model_name="person",
name="friends",
field=models.ManyToManyField(
related_name="_person_friends_+", to="people.Person"
),
)
]
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,436 | mospina/paranuara | refs/heads/master | /companies/migrations/0002_auto_20191204_0323.py | # Generated by Django 2.2.7 on 2019-12-04 03:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("companies", "0001_initial")]
operations = [
migrations.AlterField(
model_name="company", name="index", field=models.IntegerField(unique=True)
)
]
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,437 | mospina/paranuara | refs/heads/master | /companies/tests/test_models.py | from django.test import TestCase
from companies.models import Company
class TestCompanyFileLoader(TestCase):
def test_load_data_from_file(self):
before_count = Company.objects.count()
Company.load_data_from_file("../../resources/companies.json")
after_count = Company.objects.count()
self.assertGreater(after_count, before_count)
def test_index_must_be_unique(self):
Company.load_data_from_file("../../resources/companies.json")
before_count = Company.objects.count()
Company.load_data_from_file("../../resources/companies.json")
after_count = Company.objects.count()
self.assertEqual(after_count, before_count)
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,438 | mospina/paranuara | refs/heads/master | /people/tests/test_views.py | import json
from django.test import TestCase
from rest_framework.test import APIClient
from rest_framework import status
from people.tests.factories import PersonFactory, FruitFactory, VegetableFactory
class PeopleViewTest(TestCase):
def set_data(self):
self.friends = []
for _ in range(5):
person = PersonFactory()
self.friends.append(person)
self.blue_eye_person = PersonFactory(eyeColor="blue")
self.friends.append(self.blue_eye_person)
self.person1 = PersonFactory(friends=self.friends)
self.person2 = PersonFactory(friends=self.friends)
self.person3 = PersonFactory(
favouriteFruits=[FruitFactory(name="banana"), FruitFactory(name="apple")],
favouriteVegetables=[
VegetableFactory(name="beetroot"),
VegetableFactory(name="lettuce"),
],
)
def setUp(self):
self.client = APIClient()
self.base_url = "/v1/people/"
self.set_data()
def test_people_get_one_returns_200(self):
response = self.client.get(self.base_url + "{}".format(self.person3.index))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response["content-type"], "application/json")
self.assertEqual(json.loads(response.content)["fruits"], ["banana", "apple"])
self.assertEqual(
json.loads(response.content)["vegetables"], ["beetroot", "lettuce"]
)
def test_people_get_two_return_friends(self):
response = self.client.get(
self.base_url, {"index": [self.person1.index, self.person2.index]}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response["content-type"], "application/json")
self.assertEqual(len(json.loads(response.content)["common_friends"]), 5)
self.assertEqual(len(json.loads(response.content)["people"]), 2)
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,439 | mospina/paranuara | refs/heads/master | /core/management/commands/load_data.py | from django.core.management.base import BaseCommand
from companies.models import Company
from people.loader import load_data_from_file
class Command(BaseCommand):
help = "Read data from a json file and store it in the database"
usage = "usage: python manage.py load_data --companies companies.json --people people.json"
def add_arguments(self, parser):
parser.add_argument("--companies", nargs="+", dest="companies", type=str)
parser.add_argument("--people", nargs="+", dest="people", type=str)
def handle(self, *args, **options):
if not options.get("companies") or not options.get("people"):
self.stdout.write(self.usage)
return
# First load companies
for elem in options["companies"]:
Company.load_data_from_file(elem)
# Then load people
for elem in options["people"]:
load_data_from_file(elem)
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,440 | mospina/paranuara | refs/heads/master | /companies/models.py | import json
from django.db import models
class Company(models.Model):
created_at = models.DateTimeField(auto_now_add=True, editable=False)
modified_at = models.DateTimeField(auto_now=True, editable=False)
index = models.IntegerField(unique=True)
name = models.CharField(max_length=100)
def __str__(self):
return "{} - {}".format(self.index, self.name)
@classmethod
def load_data_from_file(cls, file_path):
"""
Save the data from a json file in the RDB
"""
with open(file_path, newline="") as fh:
json_data = json.load(fh)
fh.close()
for entry in json_data:
cls.objects.get_or_create(index=entry["index"], name=entry["company"])
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,441 | mospina/paranuara | refs/heads/master | /people/serializers.py | from rest_framework import serializers
from companies.serializers import CompanySerializer
from people.models import Tag, Fruit, Vegetable, Person
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ("id", "label")
class FruitSerializer(serializers.ModelSerializer):
class Meta:
model = Fruit
fields = ("id", "name")
class VegetableSerializer(serializers.ModelSerializer):
class Meta:
model = Vegetable
fields = ("id", "name")
class PersonSerializer(serializers.ModelSerializer):
tags = TagSerializer(read_only=True, many=True)
# friends = PersonSerializer(read_only=True, many=True)
company = CompanySerializer(read_only=True, many=False)
fruits = serializers.SerializerMethodField("favouriteFruits")
vegetables = serializers.SerializerMethodField("favouriteVegetables")
class Meta:
model = Person
fields = (
"created_at",
"modified_at",
"_id",
"index",
"guid",
"has_died",
"balance",
"picture",
"age",
"eyeColor",
"name",
"gender",
"company",
"email",
"phone",
"address",
"about",
"registered",
"tags",
"friends",
"greeting",
"fruits",
"vegetables",
)
def favouriteFruits(self, obj):
return [o.name for o in obj.favouriteFruits.all()]
def favouriteVegetables(self, obj):
return [o.name for o in obj.favouriteVegetables.all()]
class PeopleSerializer(serializers.Serializer):
common_friends = serializers.SerializerMethodField("get_friends")
people = serializers.SerializerMethodField("get_people")
def get_friends(self, obj):
return [{"name": o.name, "index": o.index} for o in obj.friends]
def get_people(self, obj):
return [
{"name": o.name, "age": o.age, "address": o.address, "phone": o.phone}
for o in obj.people
]
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,442 | mospina/paranuara | refs/heads/master | /people/urls.py | from django.urls import path
from people import views
urlpatterns = [
path("", views.PeopleList.as_view()),
path("<int:index>", views.PersonDetail.as_view()),
]
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,443 | mospina/paranuara | refs/heads/master | /companies/tests/factories.py | from factory.django import DjangoModelFactory
from factory import Sequence
from companies.models import Company
class CompanyFactory(DjangoModelFactory):
class Meta:
model = Company
index = Sequence(lambda n: n)
name = "Company"
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,444 | mospina/paranuara | refs/heads/master | /people/tests/factories.py | from factory.django import DjangoModelFactory
import factory
from companies.tests.factories import CompanyFactory
from people.models import Tag, Fruit, Vegetable, Person
class TagFactory(DjangoModelFactory):
class Meta:
model = Tag
label = "tag"
class FruitFactory(DjangoModelFactory):
class Meta:
model = Fruit
name = "apple"
class VegetableFactory(DjangoModelFactory):
class Meta:
model = Vegetable
name = "carrot"
class PersonFactory(DjangoModelFactory):
class Meta:
model = Person
_id = factory.Sequence(lambda n: "id%d" % n)
index = factory.Sequence(lambda n: n)
guid = factory.Sequence(lambda n: "guid%d" % n)
has_died = "False"
balance = 1000.00
picture = "http://www.example.com/photo.jpg"
age = 50
eyeColor = "brown"
name = factory.Faker("name")
gender = "M"
company = factory.SubFactory(CompanyFactory)
email = "namesur@example.com"
phone = "900088812345"
address = "10 Street Avenue"
about = "About me"
registered = "2010-10-10 10:10:10"
greeting = "hello"
@factory.post_generation
def tags(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for tag in extracted:
self.tags.add(tag)
@factory.post_generation
def friends(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for friend in extracted:
self.friends.add(friend)
@factory.post_generation
def favouriteFruits(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for food in extracted:
self.favouriteFruits.add(food)
@factory.post_generation
def favouriteVegetables(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for food in extracted:
self.favouriteVegetables.add(food)
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,445 | mospina/paranuara | refs/heads/master | /people/tests/test_models.py | from django.test import TestCase
from people.models import Person, Fruit, Vegetable, Tag
class TestModel(TestCase):
pass
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,446 | mospina/paranuara | refs/heads/master | /people/migrations/0001_initial.py | # Generated by Django 2.2.7 on 2019-12-06 00:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [("companies", "0002_auto_20191204_0323")]
operations = [
migrations.CreateModel(
name="Fruit",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name="Tag",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("label", models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name="Vegetable",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name="Person",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("modified_at", models.DateTimeField(auto_now=True)),
("_id", models.CharField(max_length=128, unique=True)),
("index", models.IntegerField(unique=True)),
("guid", models.CharField(max_length=128, unique=True)),
("has_died", models.BooleanField()),
("balance", models.DecimalField(decimal_places=2, max_digits=10)),
("picture", models.URLField()),
("age", models.IntegerField()),
("eyeColor", models.CharField(max_length=32)),
("name", models.CharField(max_length=128)),
(
"gender",
models.CharField(
choices=[("M", "Male"), ("F", "Female"), ("N", "No response")],
max_length=2,
),
),
("email", models.EmailField(max_length=254)),
("phone", models.CharField(max_length=128)),
("address", models.CharField(max_length=256)),
("about", models.TextField()),
("registered", models.DateTimeField()),
("greeting", models.CharField(max_length=256)),
(
"company",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="companies.Company",
),
),
("favouriteFruits", models.ManyToManyField(to="people.Fruit")),
("favouriteVegetables", models.ManyToManyField(to="people.Vegetable")),
(
"friends",
models.ManyToManyField(
blank=True,
null=True,
related_name="_person_friends_+",
to="people.Person",
),
),
("tags", models.ManyToManyField(to="people.Tag")),
],
),
]
| {"/companies/views.py": ["/companies/models.py", "/companies/serializers.py"], "/companies/serializers.py": ["/companies/models.py"], "/companies/tests/test_views.py": ["/people/tests/factories.py", "/companies/tests/factories.py"], "/people/models.py": ["/companies/models.py"], "/people/views.py": ["/people/models.py", "/people/serializers.py"], "/people/management/commands/load_people.py": ["/people/loader.py"], "/core/tests.py": ["/people/models.py"], "/people/loader.py": ["/companies/models.py", "/people/models.py"], "/people/tests/test_loader.py": ["/companies/models.py", "/people/models.py"], "/companies/tests/test_models.py": ["/companies/models.py"], "/people/tests/test_views.py": ["/people/tests/factories.py"], "/core/management/commands/load_data.py": ["/companies/models.py", "/people/loader.py"], "/people/serializers.py": ["/companies/serializers.py", "/people/models.py"], "/companies/tests/factories.py": ["/companies/models.py"], "/people/tests/factories.py": ["/companies/tests/factories.py", "/people/models.py"], "/people/tests/test_models.py": ["/people/models.py"]} |
73,456 | seisowl/pyjseisio | refs/heads/master | /test/demojsdataset.py | import numpy as np
import pyjseisio as js
dataset = js.open("./synth.js")
iframe = 2
fh = dataset.readFrameHeader(iframe, liveOnly=True)
frame = dataset.readFrame(iframe, readHdrs=False, liveOnly=True)
frame[:] = 200
dataset.writeFrame(iframe, frame, fh, fh.shape[0])
dataset.writeFrameToFile("./dataAs.js", iframe, frame, fh, fh.shape[0])
iframe = 3
frame[:] = 300
dataset.writeFrame(iframe, frame, fh, fh.shape[0])
dataset.writeFrameToFile("./dataAs.js", iframe, frame, fh, fh.shape[0])
# read to validate
datasetAs = js.open("./dataAs.js")
iframe = 3
fhnew = datasetAs.readFrameHeader(iframe, liveOnly=True)
framenew = datasetAs.readFrame(iframe, readHdrs=False, liveOnly=True)
if np.array_equal(framenew,frame) :
print ("Write check ok")
else :
print ("Write check error")
#for ir in xrange(fh.shape[0]):
# print dataset.hdrs['PAD_TRC'].getVal(fh[ir]), dataset.hdrs['TR_FOLD'].getVal(fh[ir])
#, dataset.hdrs['SHOT_2D'].getVal(fh[ir])
#usefulStuff = filter((lambda s: s[0:2]!='__'),dir(dataset.hdrs['XLINE_NO']))
#print usefulStuff
#####################################################################
#usefulStuff = filter((lambda s: s[0:2]!='__'),dir(dataset.axes[1]))
#print usefulStuff
#usefulStuff = filter((lambda s: s[0:2]!='__'),dir(jsswig))
#print usefulStuff
#usefulStuff = filter((lambda s: s[0:2]!='__'),dir(fr))
#print usefulStuff
#fr.getAxisLabels
| {"/test/demojsdataset.py": ["/pyjseisio/__init__.py"], "/pyjseisio/__init__.py": ["/pyjseisio/jsdataset.py"]} |
73,457 | seisowl/pyjseisio | refs/heads/master | /test/demo.py | import numpy as np
import pyjseisio as js
import matplotlib.pyplot as p
import pyjseisio.pyjseisio_swig as jsswig
# testing the SWIGGED methods of jsFileReader on the test dataset synth.js
fr = jsswig.jsFileReader()
# %matplotlib inline
usefulStuff = filter((lambda s: s[0:2]!='__'),dir(js))
print usefulStuff
dataset = js.open("../test/synth.js")
usefulStuff = filter((lambda s: s[0:2]!='__'),dir(dataset))
print usefulStuff
iframe = 2
fh = dataset.readFrameHeader(iframe, liveOnly=True)
frame = dataset.readFrame(iframe, readHdrs=False, liveOnly=True)
for ir in xrange(fh.shape[0]):
print dataset.hdrs['PAD_TRC'].getVal(fh[ir]), dataset.hdrs['TR_FOLD'].getVal(fh[ir])
#, dataset.hdrs['SHOT_2D'].getVal(fh[ir])
usefulStuff = filter((lambda s: s[0:2]!='__'),dir(dataset.hdrs['XLINE_NO']))
print usefulStuff
#####################################################################
usefulStuff = filter((lambda s: s[0:2]!='__'),dir(dataset.axes[1]))
print usefulStuff
usefulStuff = filter((lambda s: s[0:2]!='__'),dir(jsswig))
print usefulStuff
usefulStuff = filter((lambda s: s[0:2]!='__'),dir(fr))
print usefulStuff
fr.getAxisLabels
| {"/test/demojsdataset.py": ["/pyjseisio/__init__.py"], "/pyjseisio/__init__.py": ["/pyjseisio/jsdataset.py"]} |
73,458 | seisowl/pyjseisio | refs/heads/master | /pyjseisio/jsdataset.py | import numpy as np
import pyjseisio.pyjseisio_swig as jsswig
import os.path
def open(filename):
fpfile = filename+'/FileProperties.xml'
assert(os.path.isdir(filename)),('JavaSeis file not found: ' + filename)
assert(os.path.isfile(fpfile)), ('JavaSeis file not found: ' + fpfile)
return jsdataset.openForRead(filename)
class jsdataset(object):
@classmethod
def openForRead(cls, filename):
'''
Factory classmethod to open a JavaSeis dataset for reading.
Input: filename - path to JavaSeis dataset directory
Output: jsdataset object with file opened
'''
data = cls()
data._reader = jsswig.jsFileReader()
data._reader.Init(filename)
data._infilename = filename
data._writer = None
data._outfilename = None
data._outwriter = None
data.hdrs = {}
for hdr in data._reader.getHdrEntries():
data.hdrs[hdr.getName()] = hdr
data.axes = ()
labels = jsswig.StringVector()
units = jsswig.StringVector()
domains = jsswig.StringVector()
data._reader.getAxisLabels(labels)
data._reader.getAxisUnits(units)
data._reader.getAxisDomains(domains)
for idim in range(0,data._reader.getNDim()):
logValues = jsswig.LongVector()
data._reader.getAxisLogicalValues(idim,logValues)
physValues = jsswig.DoubleVector()
data._reader.getAxisPhysicalValues(idim,physValues)
newAxis = jsaxis(jsswig.vectorToList(labels)[idim],
jsswig.vectorToList(units)[idim],
jsswig.vectorToList(domains)[idim],
data._reader.getAxisLen(idim),
jsswig.vectorToList(logValues),
jsswig.vectorToList(physValues))
data.axes = data.axes + (newAxis,)
return data
def writeFrame(self, frameIndex, traces, headers=None, ntraces=-1):
'''
Overwrite the current frame/hdrs at given global frameIndex
:param frameIndex: global frame index
:param traces: the frame buffer
:param headers: the header buffer
:param ntraces: number trace in this frame
:return: number of trace written
'''
if self._writer is None:
self._writer = jsswig.jsFileWriter()
self._writer.setFileName(self._infilename)
self._writer.Init(self._reader)
if headers is None:
return self._writer.writeFrame(frameIndex, np.reshape(traces, (np.product(traces.shape),)))
else:
return self._writer.writeFrame(frameIndex, np.reshape(traces, (np.product(traces.shape),)),
np.reshape(headers, (np.product(headers.shape),)), ntraces)
def writeFrameToFile(self, outfilename, frameIndex, traces, headers=None, ntraces=-1):
'''
save the frame/header to outfilename file. if it is new file, it will copy all info from current input file,
then replace the specify frame with the given buffer.
:param outfilename: the filename to be written
:param frameIndex: global frame index
:param traces: the frame buffer
:param headers: the header buffer
:param ntraces: number trace in this frame
:return: number of trace written
'''
if outfilename == self._infilename :
self.overWriteFrame(frameIndex, np.reshape(traces, (np.product(traces.shape),)),
np.reshape(headers, (np.product(headers.shape),)), ntraces)
else :
if outfilename == self._outfilename :
self._outwriter.writeFrame(frameIndex, np.reshape(traces, (np.product(traces.shape),)),
np.reshape(headers, (np.product(headers.shape),)), ntraces)
else:
assert (not os.path.isdir(outfilename)), ('!!!File exist: ' + outfilename)
self._outwriter = jsswig.jsFileWriter()
self._outwriter.setFileName(outfilename)
self._outwriter.Init(self._reader)
self._outwriter.writeMetaData(2)
self._outfilename = outfilename
self._outwriter.writeFrame(frameIndex, np.reshape(traces, (np.product(traces.shape),)),
np.reshape(headers, (np.product(headers.shape),)), ntraces)
def readFrame(self, frameIndex, readHdrs=True, liveOnly=False):
'''
Read one frame from the dataset at the given global frameIndex.
By default, returns a tulple containing (frameData, frameHeader),
where frameData is a numpy ndarray with shape (AxisLen(1),AxisLen(0))
frameHeader ia a numpy ndarray with shape (AxisLen(1),NumBytesInHeader)
if readHdrs is set to False, only returns the frameData numpy array.
If liveOnly is set to True, then the data and header returned are
for the live traces within the frame only.
'''
ntraces = self.getNumOfLiveTraces(frameIndex) if liveOnly \
else self.axes[1].len
fullDataLength = self.axes[0].len * self.axes[1].len
dataLength = self.axes[0].len * ntraces
fullHdrLength = self.getNumBytesInHeader() * self.axes[1].len
hdrLength = self.getNumBytesInHeader() * ntraces
if readHdrs:
data = self._reader.readFrameDataAndHdrs(frameIndex,
fullDataLength,
fullHdrLength)
returnData = (data[1][0:dataLength].reshape(ntraces,
self.axes[0].len),
data[2][0:hdrLength].reshape(ntraces,
self.getNumBytesInHeader()))
else:
frame = self._reader.readFrameDataOnly(frameIndex,fullDataLength)[1]
returnData = frame[0:dataLength].reshape(ntraces, self.axes[0].len)
return returnData
def readFrameHeader(self, frameIndex, liveOnly=False):
'''
Read the headers of one frame from the dataset at the given global frameIndex.
Returns a numpy ndarray with shape (AxisLen(1),NumBytesInHeader)
Keyword argument 'liveOnly' determines whether to retrieve only the live
trace headers, or all headers.
'''
ntraces = self.getNumOfLiveTraces(frameIndex) if liveOnly \
else self.axes[1].len
fullHdrLength = self.getNumBytesInHeader() * self.axes[1].len
hdrLength = self.getNumBytesInHeader() * ntraces
hdrs = self._reader.readFrameHdrsOnly(frameIndex,fullHdrLength)[1]
return hdrs[0:hdrLength].reshape(ntraces, self.getNumBytesInHeader())
def readTraces(self, traceIndex, numTraces):
'''
Read multiple traces from the dataset starting
at the given global trace index.
Returns a numpy ndarray with shape (numTraces,AxisLen(0))
'''
length = numTraces*self.axes[0].len
trace = self._reader.readTracesDataOnly(traceIndex, numTraces, length)[1]
return trace.reshape(numTraces, self.axes[0].len)
def readTraceHeaders(self, traceIndex, numTraces):
'''
Read multiple trace headers from the dataset starting
at the given global trace index.
Returns a numpy ndarray with shape (numTraces,NumBytesInHeader)
'''
length = numTraces*self.getNumBytesInHeader()
trace = self._reader.readTraceHeadersOnly(traceIndex, numTraces, length)[1]
return trace.reshape(numTraces, self.getNumBytesInHeader())
# no-arg methods delegated to self._reader
def isRegular(self): return self._reader.isRegular()
def isSeisPEG(self): return self._reader.isSeisPEG()
def getNtr(self): return self._reader.getNtr()
def getNFrames(self): return self._reader.getNFrames()
def getNumHeaderWords(self): return self._reader.getNumHeaderWords()
def getNumBytesInHeader(self): return self._reader.getNumBytesInHeader()
def getNumBytesInRawFrame(self): return self._reader.getNumBytesInRawFrame()
def getIOBufferSize(self): return self._reader.getIOBufferSize()
def getNDim(self): return self._reader.getNDim()
def getFrameSizeOnDisk(self): return self._reader.getFrameSizeOnDisk()
def getByteOrder(self): return self._reader.getByteOrder()
def getByteOrderAsString(self): return self._reader.getByteOrderAsString()
def getTraceFormatName(self): return self._reader.getTraceFormatName()
def getDescriptiveName(self): return self._reader.getDescriptiveName()
def getDataType(self): return self._reader.getDataType()
def getVersion(self): return self._reader.getVersion()
def getNumOfExtents(self): return self._reader.getNumOfExtents()
def getNumOfVirtualFolders(self): return self._reader.getNumOfVirtualFolders()
def getHeaderWordsInfo(self): return self._reader.getHeaderWordsInfo(0)
# arg-full methods delegated to self._reader
def getNumOfLiveTraces(self, frameIndex):
return self._reader.getNumOfLiveTraces(frameIndex)
label2hdr = {'CROSSLINE':'XLINE_NO', 'INLINE':'ILINE_NO', 'SAIL_LINE':'S_LINE', 'TIME':'V_TIME', 'DEPTH':'V_DEPTH', 'CMP':'CDP', 'RECEIVER_LINE':'R_LINE', 'CHANNEL':'CHAN', 'RECEIVER':'REC_SLOC', 'OFFSET_BIN':'OFB_NO' }
class jsaxis:
def __init__(self, label, units, domain, length, logVals, physVals):
self.label = label
hdr = label2hdr.get(label)
self.hdr = hdr if hdr else label
self.units = units
self.domain = domain
self.len = length
self.logicalValues = logVals
self.physicalValues = physVals
| {"/test/demojsdataset.py": ["/pyjseisio/__init__.py"], "/pyjseisio/__init__.py": ["/pyjseisio/jsdataset.py"]} |
73,459 | seisowl/pyjseisio | refs/heads/master | /pyjseisio/__init__.py | from . jsdataset import open
| {"/test/demojsdataset.py": ["/pyjseisio/__init__.py"], "/pyjseisio/__init__.py": ["/pyjseisio/jsdataset.py"]} |
73,460 | seisowl/pyjseisio | refs/heads/master | /test/demoWrite.py | import numpy as np
import pyjseisio.pyjseisio_swig as jsswig
numDim = 4
NSamples = 501
NOffsets = 197
NXlines = 7
NInlines = 10
off0 = 0.0
doff = 100.0
xl0 = 10
dxl = 20
inl0 = 20
dinl = 40
# testing the SWIGGED methods of jsFileWriter on the test data to create a new dataset
jsWrtTest = jsswig.jsFileWriter()
jsWrtTest.setFileName("./dataTest.js")
jsWrtTest.initGridDim(numDim)
jsWrtTest.initGridAxis(0, "TIME", "SECONDS","TIME", NSamples, 0, 1, 0, 4)
jsWrtTest.initGridAxis(1, "OFFSET_BIN", "METERS", "SPACE", NOffsets, 0, 1, off0, doff)
jsWrtTest.initGridAxis(2, "CROSSLINE", "METERS", "SPACE", NXlines, 0, 1, xl0, dxl)
jsWrtTest.initGridAxis(3, "INLINE", "METERS", "SPACE", NInlines, 0, 1, inl0, dinl)
jsWrtTest.addProperty("NEW_HDR", "Header description", "INTEGER", 1)
jsWrtTest.addSurveyGeom(inl0,inl0 + (NInlines-1)*dinl,xl0, xl0 + (NXlines-1)*dxl,5.5,6.6,7.7,8.8,9.9,10.10)
jsWrtTest.addCustomProperty("Stacked", "boolean", "false")
jsWrtTest.writeMetaData()
itrcTypeHdr = jsWrtTest.getHdrEntry("TRC_TYPE")
iTimeHdr = jsWrtTest.getHdrEntry("TIME")
fOffsetHdr = jsWrtTest.getHdrEntry("OFFSET")
iOffsetBinHdr = jsWrtTest.getHdrEntry("OFB_NO")
dCdpXHdr = jsWrtTest.getHdrEntry("CDP_XD")
dCdpYHdr = jsWrtTest.getHdrEntry("CDP_YD")
iInLineHdr = jsWrtTest.getHdrEntry("ILINE_NO")
iXLineHdr = jsWrtTest.getHdrEntry("XLINE_NO")
traceheaderSize = jsWrtTest.getTraceHeaderSize()
frame = np.zeros((NOffsets*NSamples), dtype=np.float32)
hdbuf = np.zeros((NOffsets*traceheaderSize), dtype=np.byte)
hdbuf2d = np.reshape(hdbuf, (-1, traceheaderSize))
#print len(hdbuf), len(hdbuf2d)
iInline=0
while (iInline<NInlines):
iXline=0
while (iXline<NXlines):
iTraces=0
while (iTraces<NOffsets):
itrcTypeHdr.setIntVal(hdbuf2d[iTraces], 1)
iTimeHdr.setIntVal(hdbuf2d[iTraces], 0)
fOffsetHdr.setFloatVal(hdbuf2d[iTraces], off0 + iTraces*doff)
iOffsetBinHdr.setIntVal(hdbuf2d[iTraces], iTraces)
dCdpXHdr.setDoubleVal(hdbuf2d[iTraces], xl0 + iXline*dxl)
dCdpYHdr.setDoubleVal(hdbuf2d[iTraces], inl0 + iInline*dinl)
iInLineHdr.setIntVal(hdbuf2d[iTraces], iInline)
iXLineHdr.setIntVal(hdbuf2d[iTraces], iXline)
iSample=0
while (iSample<NSamples):
frame[iTraces*NSamples+iSample]= iSample + (iInline*NXlines + iXline)*NOffsets + iTraces
iSample +=1
iTraces+=1
numLiveTraces = jsWrtTest.leftJustify(frame, hdbuf, NOffsets)
frameInd=iInline*NXlines + iXline
print(frameInd, numLiveTraces)
ires = jsWrtTest.writeFrame(frameInd,frame, hdbuf, numLiveTraces)
if ires!=numLiveTraces:
print("Error while writing frame ", frameInd)
iXline=NXlines
iInline=NInlines
break
iXline+=1
iInline+=1
print ("Write OK")
# testing the SWIGGED methods of jsFileWriter on the test data to copy and update
fr = jsswig.jsFileReader()
fr.Init("./dataTest.js")
fr.closefp();
jsWrtTestCopy = jsswig.jsFileWriter()
jsWrtTestCopy.setFileName("./dataTestCopy.js")
jsWrtTestCopy.Init(fr);
ires = jsWrtTestCopy.writeMetaData(2);
itrcTypeHdr = jsWrtTestCopy.getHdrEntry("TRC_TYPE")
iTimeHdr = jsWrtTestCopy.getHdrEntry("TIME")
fOffsetHdr = jsWrtTestCopy.getHdrEntry("OFFSET")
iOffsetBinHdr = jsWrtTestCopy.getHdrEntry("OFB_NO")
dCdpXHdr = jsWrtTestCopy.getHdrEntry("CDP_XD")
dCdpYHdr = jsWrtTestCopy.getHdrEntry("CDP_YD")
iInLineHdr = jsWrtTestCopy.getHdrEntry("ILINE_NO")
iXLineHdr = jsWrtTestCopy.getHdrEntry("XLINE_NO")
ndim = jsWrtTestCopy.getNDim()
NInlines = jsWrtTestCopy.getAxisLen(3)
NXlines = jsWrtTestCopy.getAxisLen(2)
NOffsets = jsWrtTestCopy.getAxisLen(1)
NSamples = jsWrtTestCopy.getAxisLen(0)
traceheaderSize = jsWrtTestCopy.getTraceHeaderSize()
frame = np.zeros((NOffsets*NSamples), dtype=np.float32)
hdbuf = np.zeros((NOffsets*traceheaderSize), dtype=np.byte)
hdbuf2d = np.reshape(hdbuf, (-1, traceheaderSize))
#print len(hdbuf), len(hdbuf2d)
iInline=0
while (iInline<NInlines):
iXline=0
while (iXline<NXlines):
iTraces=0
while (iTraces<NOffsets):
itrcTypeHdr.setIntVal(hdbuf2d[iTraces], 1)
iTimeHdr.setIntVal(hdbuf2d[iTraces], 0)
fOffsetHdr.setFloatVal(hdbuf2d[iTraces], off0 + iTraces*doff)
iOffsetBinHdr.setIntVal(hdbuf2d[iTraces], iTraces)
dCdpXHdr.setDoubleVal(hdbuf2d[iTraces], xl0 + iXline*dxl)
dCdpYHdr.setDoubleVal(hdbuf2d[iTraces], inl0 + iInline*dinl)
iInLineHdr.setIntVal(hdbuf2d[iTraces], iInline)
iXLineHdr.setIntVal(hdbuf2d[iTraces], iXline)
iSample=0
while (iSample<NSamples):
frame[iTraces*NSamples+iSample]= 10 + iSample + (iInline*NXlines + iXline)*NOffsets + iTraces
iSample +=1
iTraces+=1
numLiveTraces = jsWrtTestCopy.leftJustify(frame, hdbuf, NOffsets)
frameInd=iInline*NXlines + iXline
print(frameInd, numLiveTraces)
ires = jsWrtTestCopy.writeFrame(frameInd,frame, hdbuf, numLiveTraces)
if ires!=numLiveTraces:
print("Error while writing frame ", frameInd)
iXline=NXlines
iInline=NInlines
break
iXline+=1
iInline+=1
print ("copy and update all OK")
# testing the SWIGGED methods of jsFileWriter on the test data to copy and update traces only
fr1 = jsswig.jsFileReader()
fr1.Init("./dataTest.js")
fr1.closefp();
jsWrtTestCopy = jsswig.jsFileWriter()
jsWrtTestCopy.setFileName("./dataTestCopy2.js")
jsWrtTestCopy.Init(fr1)
ires = jsWrtTestCopy.writeMetaData(2)
ndim = jsWrtTestCopy.getNDim()
NInlines = jsWrtTestCopy.getAxisLen(3)
NXlines = jsWrtTestCopy.getAxisLen(2)
NOffsets = jsWrtTestCopy.getAxisLen(1)
NSamples = jsWrtTestCopy.getAxisLen(0)
traceheaderSize = jsWrtTestCopy.getTraceHeaderSize()
frame = np.zeros((NOffsets*NSamples), dtype=np.float32)
iInline=0
while (iInline<NInlines):
iXline=0
while (iXline<NXlines):
iTraces=0
while (iTraces<NOffsets):
iSample=0
while (iSample<NSamples):
frame[iTraces*NSamples+iSample]= 10 + iSample + (iInline*NXlines + iXline)*NOffsets + iTraces
iSample +=1
iTraces+=1
frameInd=iInline*NXlines + iXline
print(frameInd)
ires = jsWrtTestCopy.writeFrame(frameInd,frame)
if ires!=NOffsets:
print("Error while writing frame ", frameInd)
iXline=NXlines
iInline=NInlines
break
iXline+=1
iInline+=1
print ("copy and update traces OK")
# testing the SWIGGED methods of jsFileWriter on the test data to update traces only
fr1 = jsswig.jsFileReader()
fr1.Init("./dataTestCopy2.js")
jsWrtTestCopy = jsswig.jsFileWriter()
jsWrtTestCopy.setFileName("./dataTestCopy2.js")
jsWrtTestCopy.Init(fr1)
jsWrtTestCopy.Initialize();
ndim = jsWrtTestCopy.getNDim()
NInlines = jsWrtTestCopy.getAxisLen(3)
NXlines = jsWrtTestCopy.getAxisLen(2)
NOffsets = jsWrtTestCopy.getAxisLen(1)
NSamples = jsWrtTestCopy.getAxisLen(0)
traceheaderSize = jsWrtTestCopy.getTraceHeaderSize()
frame = np.zeros((NOffsets*NSamples), dtype=np.float32)
iInline=1
while (iInline<NInlines):
iXline=1
while (iXline<NXlines):
iTraces=0
while (iTraces<NOffsets):
iSample=0
while (iSample<NSamples):
frame[iTraces*NSamples+iSample]= -100
iSample +=1
iTraces+=1
frameInd=iInline*NXlines + iXline
print(frameInd)
ires = jsWrtTestCopy.writeFrame(frameInd,frame)
if ires!=NOffsets:
print("Error while writing frame ", frameInd)
iXline=NXlines
iInline=NInlines
break
iXline+=1
iInline+=1
print ("over write traces OK")
#usefulStuff = filter((lambda s: s[0:2]!='__'),dir(js))
#print(usefulStuff)
#iframe = 2
#fh = dataset.readFrameHeader(iframe, liveOnly=True)
#frame = dataset.readFrame(iframe, readHdrs=False, liveOnly=True)
#for ir in xrange(fh.shape[0]):
# print dataset.hdrs['PAD_TRC'].getVal(fh[ir]), dataset.hdrs['TR_FOLD'].getVal(fh[ir]), dataset.hdrs['SHOT_2D'].getVal(fh[ir])
#usefulStuff = filter((lambda s: s[0:2]!='__'),dir(dataset.hdrs['XLINE_NO']))
#print usefulStuff
#####################################################################
#usefulStuff = filter((lambda s: s[0:2]!='__'),dir(dataset.axes[1]))
#print usefulStuff
usefulStuff = filter((lambda s: s[0:2]!='__'),dir(jsswig))
print(usefulStuff)
#usefulStuff = filter((lambda s: s[0:2]!='__'),dir(fr))
#print usefulStuff
fr.getAxisLabels
| {"/test/demojsdataset.py": ["/pyjseisio/__init__.py"], "/pyjseisio/__init__.py": ["/pyjseisio/jsdataset.py"]} |
73,462 | muyue-jpg/DLP | refs/heads/master | /main.py | # import time
import args
import func
if __name__ == "__main__":
ya_solver = func.Pohlig_Hellman_algorithm(func.calc_factors(
args.ord_g), args.g, args.ya, args.ord_g, args.p)
yb_solver = func.Pohlig_Hellman_algorithm(func.calc_factors(
args.ord_g), args.g, args.yb, args.ord_g, args.p)
ya_solver_pollard = func.pollard_algorithm(args.g, args.ya, args.ord_g, args.p)
yb_solver_pollard = func.pollard_algorithm(args.g, args.yb, args.ord_g, args.p)
xa = ya_solver.solve()
xb = yb_solver.solve()
print("ya的离散对数xa为:", xa)
print("yb的离散对数xb为:", xb)
print("ya^xb = \n", func.modular_exponent(args.ya, xb, args.p))
print("yb^xa = \n", func.modular_exponent(args.yb, xa, args.p))
print("两者相等,它们即DH密钥交换协议中的共同内容")
# start = time.time()
# for i in range(20):
# ya_solver.solve()
# end = time.time()
# print("使用Pohlig Hellman算法的运行时间:(计算十次)")
# print(end - start)
# start = time.time()
# for i in range(20):
# ya_solver_pollard.solve()
# end = time.time()
# print("使用Pollard rho算法的运行时间:(计算十次)")
# print(end - start)
| {"/main.py": ["/func.py"]} |
73,463 | muyue-jpg/DLP | refs/heads/master | /func.py | import random
def calc_factors(num):
factors = {}
while num != 1:
one_factor = calc_one_factor(num)
factors[one_factor] = factors.get(one_factor, 0) + 1
num = num // one_factor
return factors
def calc_one_factor(num):
sq_root = int(num**(1/2) + 1)
for i in range(2, sq_root):
if (num % i) == 0:
return i
return num
def calc_inverse(n, ele):
a = n
b = ele % n
t_0 = 0
t = 1
q = a // b
r = a % b
while r > 0:
temp = (t_0 - q*t) % n
t_0 = t
t = temp
a = b
b = r
q = a // b
r = a % b
if b != 1:
raise ValueError(b, t)
# 如果算法失败则有b = t*ele + kn
# b是最大公因数,t是方程的一个特解
return t
def modular_exponent(a, b, n):
mask = 1
result = 1
while mask <= b:
if mask & b:
result = (result * a) % n
a = (a * a) % n
mask = mask << 1
return result
class pollard_algorithm:
def __init__(self, alpha, beta, n, p):
self.alpha = alpha
self.beta = beta
self.n = n
self.p = p
self.func_list = {1: self.func_1, 0: self.func_2, 2: self.func_3}
def func_1(self, x, a, b):
return (self.beta*x % self.p, a, (b + 1) % self.n)
def func_2(self, x, a, b):
return (x**2 % self.p, 2*a % self.n, 2*b % self.n)
def func_3(self, x, a, b):
return (self.alpha*x % self.p, (a + 1) % self.n, b)
def func(self, x, a, b):
return self.func_list[x % 3](x, a, b)
def check_solve(self, x_0, d):
for i in range(d):
x = (x_0 + self.n // d * i) % self.n
# 遍历d个可能的解
if modular_exponent(self.alpha, x, self.p) == self.beta:
return x
raise ValueError("算法失败")
def solve(self):
a_inv = None
while not a_inv:
init_a = random.randint(0, self.n)
init_b = random.randint(0, self.n)
init_x = modular_exponent(
self.alpha, init_a, self.p)*modular_exponent(self.beta, init_b, self.p) % self.p
tuple_1 = self.func(init_x, init_a, init_b)
tuple_2 = self.func(*tuple_1)
while tuple_1[0] != tuple_2[0]:
tuple_1 = self.func(*tuple_1)
tuple_2 = self.func(*tuple_2)
tuple_2 = self.func(*tuple_2)
a_i = tuple_1[1]
a_2i = tuple_2[1]
b_i = tuple_1[2]
b_2i = tuple_2[2]
a = b_2i - b_i
b = a_i - a_2i
# 统一参数名称,求解同余方程ax = b(mod n)
try:
a_inv = calc_inverse(self.n, a)
except ValueError as v_error:
(d, x_0) = v_error.args
# d为gcd(a, n),x_0为方程ax = d的一个特解
if d < 1000:
return self.check_solve(x_0 * b // d, d)
# x_0 * b//d是原方程的一个特解
return b*a_inv % self.n
class Pohlig_Hellman_algorithm:
def __init__(self, factors: dict, alpha, beta, n, p):
self.factors = factors
self.alpha = alpha
self.beta = beta
self.n = n
self.p = p
def solve_one_factor(self, q, c):
j = 0
beta_j = self.beta
numbers = []
while j <= c - 1:
sigma = modular_exponent(beta_j, self.n // (q ** (j + 1)), self.p)
alpha = modular_exponent(self.alpha, (self.n // q), self.p)
if q > 1000:
solver = pollard_algorithm(alpha, sigma, q, self.p)
a_j = solver.solve()
else:
for i in range(q):
if modular_exponent(alpha, i, self.p) == sigma:
a_j = i
break
alpha_inv = calc_inverse(self.p, self.alpha)
beta_j = (beta_j * modular_exponent(alpha_inv, a_j*(q**j), self.p)) % self.p
j += 1
numbers.append(a_j)
walker = 1
ret = 0
for num in numbers:
ret += walker*num
walker = walker * q
return ret
def solve(self):
M_i = []
y_i = []
a_i = []
for (factor, power) in self.factors.items():
the_M_i = self.n // (factor ** power)
M_i.append(the_M_i)
y_i.append(calc_inverse(factor ** power, the_M_i))
a_i.append(self.solve_one_factor(factor, power))
result = 0
for (a, M, y) in zip(a_i, M_i, y_i):
result += a*M*y
result %= self.n
return result
| {"/main.py": ["/func.py"]} |
73,482 | famoraes/pyie | refs/heads/master | /pyie/__init__.py | # -*- coding: utf-8 -*-
__version__ = '0.1.4'
__author__ = 'Fabiano Moraes'
__licence__ = 'MIT'
from .pyie import ValideStateInscription | {"/pyie/__init__.py": ["/pyie/pyie.py"]} |
73,483 | famoraes/pyie | refs/heads/master | /setup.py | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name = "pyie",
version = '0.1.4',
description = "Validador de Inscrição Estadual para Python",
license = "MIT",
author = "Fabiano Moraes",
author_email = "fabiano.moraes@outlook.com",
url = "https://github.com/famoraes/pyie",
packages = find_packages(exclude = ['tests']),
keywords = "python inscrição estadual ie",
zip_safe = True
) | {"/pyie/__init__.py": ["/pyie/pyie.py"]} |
73,484 | famoraes/pyie | refs/heads/master | /pyie/pyie.py | # -*- coding: utf-8 -*-
import config, re
class ValideStateInscription(object):
def __init__(self, *args, **kwargs):
self.state = kwargs['state']
self.state_inscription = kwargs['state_inscription']
self.states_configs = config.STATES_CONFIGS
self.states = config.STATES
def validate(self):
if (self.state and not self.state_inscription or
not self.state and not self.state_inscription or
self.state_inscription.upper() == config.FREE_TERM):
return True
elif self.state in self.states:
return self.ie_param()
elif hasattr(self, "ie_%s" % self.state.lower()):
return getattr(self, "ie_%s" % self.state.lower())()
return False
def ie_param(self):
size = self.states_configs[self.state].get('size', 0)
state_inscription = unicode(self.state_inscription).strip()
state_inscription = re.sub('[^0-9]', '', state_inscription)
value_size = self.states_configs[self.state].get('value_size', size - 1)
starts_with = self.states_configs[self.state].get('starts_with', '')
state_inscription_int = [int(c) for c in state_inscription]
new_state_inscription = state_inscription_int[:value_size]
prod = self.states_configs[self.state].get(
'prod', [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2])
prod = prod[-value_size:]
if not len(state_inscription) == size:
return False
if not state_inscription.startswith(starts_with):
return False
while len(new_state_inscription) < size:
r = sum([x * y for (x, y) in zip(new_state_inscription, prod)]) % \
self.states_configs[self.state].get('div', 11)
if r > 1:
f = 11 - r
else:
f = 0
if not self.state in ['RR']:
new_state_inscription.append(f)
else:
new_state_inscription.append(r)
prod.insert(0, prod[0] + 1)
if not new_state_inscription == state_inscription_int:
return False
return True
def ie_ap(self):
state_inscription = re.sub('[^0-9]', '', self.state_inscription)
# verificando o tamanho da inscrição estadual
if len(state_inscription) != 9:
return False
# verificando os dois primeiros dígitos
if not state_inscription.startswith('03'):
return False
# Pega apenas os 8 primeiros dígitos da inscrição estadual e
# define os valores de 'p' e 'd'
state_inscription_int = int(state_inscription[:8])
if state_inscription_int <= 3017000:
inscr_est_p = 5
inscr_est_d = 0
elif state_inscription_int <= 3019022:
inscr_est_p = 9
inscr_est_d = 1
else:
inscr_est_p = 0
inscr_est_d = 0
# Pega apenas os 8 primeiros dígitos da inscrição estadual e
# gera o dígito verificador
state_inscription = map(int, state_inscription)
new_state_inscription = state_inscription[:8]
prod = [9, 8, 7, 6, 5, 4, 3, 2]
r = (inscr_est_p + sum([x * y for (x, y) in zip(
new_state_inscription, prod)])) % 11
if r > 1:
f = 11 - r
elif r == 1:
f = 0
else:
f = inscr_est_d
new_state_inscription.append(f)
if not new_state_inscription == state_inscription:
return False
return True
def ie_ba(self):
state_inscription = re.sub('[^0-9]', '', self.state_inscription)
state_inscription = map(int, state_inscription)
# verificando o tamanho da inscrição estadual
if len(state_inscription) == 8:
size = 8
value_size = 6
test_digit = 0
elif len(state_inscription) == 9:
size = 9
value_size = 7
test_digit = 1
else:
return False
new_state_inscription = state_inscription[:value_size]
prod = [8, 7, 6, 5, 4, 3, 2][-value_size:]
if state_inscription[test_digit] in [0, 1, 2, 3, 4, 5, 8]:
modulo = 10
else:
modulo = 11
while len(new_state_inscription) < size:
r = sum([x * y for (x, y) in zip(
new_state_inscription, prod)]) % modulo
if r > 0:
f = modulo - r
else:
f = 0
if f >= 10 and modulo == 11:
f = 0
if len(new_state_inscription) == value_size:
new_state_inscription.append(f)
else:
new_state_inscription.insert(value_size, f)
prod.insert(0, prod[0] + 1)
if not new_state_inscription == state_inscription:
return False
return True
def ie_go(self):
state_inscription = re.sub('[^0-9]', '', self.state_inscription)
# verificando o tamanho da inscrição estadual
if len(state_inscription) != 9:
return False
# verificando os dois primeiros dígitos
if not state_inscription[:2] in ['10', '11', '15']:
return False
# Pega apenas os 8 primeiros dígitos da inscrição estadual e
# define os valores de 'p' e 'd'
state_inscription_int = int(state_inscription[:8])
if (state_inscription_int >= 10103105 and
state_inscription_int <= 10119997):
inscr_est_d = 1
else:
inscr_est_d = 0
# Pega apenas os 8 primeiros dígitos da inscrição estadual e
# gera o dígito verificador
state_inscription = map(int, state_inscription)
new_state_inscription = state_inscription[:8]
prod = [9, 8, 7, 6, 5, 4, 3, 2]
r = sum([x * y for (x, y) in zip(new_state_inscription, prod)]) % 11
if r > 1:
f = 11 - r
elif r == 1:
f = inscr_est_d
else:
f = 0
new_state_inscription.append(f)
if not new_state_inscription == state_inscription:
return False
return True
def ie_mg(self):
state_inscription = re.sub('[^0-9]', '', self.state_inscription)
# verificando o tamanho da inscrição estadual
if len(state_inscription) != 13:
return False
# Pega apenas os 11 primeiros dígitos da inscrição estadual e
# gera os dígitos verificadores
state_inscription = map(int, state_inscription)
new_state_inscription = state_inscription[:11]
new_state_inscription_aux = list(new_state_inscription)
new_state_inscription_aux.insert(3, 0)
prod = [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]
r = str([x * y for (x, y) in zip(new_state_inscription_aux, prod)])
r = re.sub('[^0-9]', '', r)
r = map(int, r)
r = sum(r)
r2 = (r / 10 + 1) * 10
r = r2 - r
if r >= 10:
r = 0
new_state_inscription.append(r)
prod = [3, 2, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2]
r = sum([x * y for (x, y) in zip(new_state_inscription, prod)]) % 11
if r > 1:
f = 11 - r
else:
f = 0
new_state_inscription.append(f)
if not new_state_inscription == state_inscription:
return False
return True
def ie_pe(self):
state_inscription = re.sub('[^0-9]', '', self.state_inscription)
# verificando o tamanho da inscrição estadual
if (len(state_inscription) != 9) and (len(state_inscription) != 14):
return False
state_inscription = map(int, state_inscription)
# verificando o tamanho da inscrição estadual
if len(state_inscription) == 9:
# Pega apenas os 7 primeiros dígitos da inscrição estadual e
# gera os dígitos verificadores
state_inscription = map(int, state_inscription)
new_state_inscription = state_inscription[:7]
prod = [8, 7, 6, 5, 4, 3, 2]
while len(new_state_inscription) < 9:
r = sum([x * y for (x, y) in zip(
new_state_inscription, prod)]) % 11
if r > 1:
f = 11 - r
else:
f = 0
new_state_inscription.append(f)
prod.insert(0, 9)
elif len(state_inscription) == 14:
# Pega apenas os 13 primeiros dígitos da inscrição estadual e
# gera o dígito verificador
state_inscription = map(int, state_inscription)
new_state_inscription = state_inscription[:13]
prod = [5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2]
r = sum([x * y for (x, y) in zip(new_state_inscription, prod)]) % 11
f = 11 - r
if f > 10:
f = f - 10
new_state_inscription.append(f)
if not new_state_inscription == state_inscription:
return False
return True
def ie_ro(self):
def gera_digito_ro(new_state_inscription, prod):
r = sum([x * y for (x, y) in zip(new_state_inscription, prod)]) % 11
f = 11 - r
if f > 9:
f = f - 10
return f
state_inscription = re.sub('[^0-9]', '', self.state_inscription)
state_inscription = map(int, state_inscription)
# verificando o tamanho da inscrição estadual
if len(state_inscription) == 9:
# Despreza-se os 3 primeiros dígitos, pega apenas os 8 primeiros
# dígitos da inscrição estadual e gera o dígito verificador
new_state_inscription = state_inscription[3:8]
prod = [6, 5, 4, 3, 2]
f = gera_digito_ro(new_state_inscription, prod)
new_state_inscription.append(f)
new_state_inscription = \
state_inscription[0:3] + new_state_inscription
elif len(state_inscription) == 14:
# Pega apenas os 13 primeiros dígitos da inscrição estadual e
# gera o dígito verificador
new_state_inscription = state_inscription[:13]
prod = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
f = gera_digito_ro(new_state_inscription, prod)
new_state_inscription.append(f)
else:
return False
if not new_state_inscription == state_inscription:
return False
return True
def ie_sp(self):
def gera_digito_sp(new_state_inscription, prod, state_inscription):
r = sum([x * y for (x, y) in zip(new_state_inscription, prod)]) % 11
if r < 10:
return r
elif r == 10:
return 0
else:
return 1
# Industriais e comerciais
if self.state_inscription[0] != 'P':
state_inscription = re.sub('[^0-9]', '', self.state_inscription)
# verificando o tamanho da inscrição estadual
if len(state_inscription) != 12:
return False
# Pega apenas os 8 primeiros dígitos da inscrição estadual e
# gera o primeiro dígito verificador
state_inscription = map(int, state_inscription)
new_state_inscription = state_inscription[:8]
prod = [1, 3, 4, 5, 6, 7, 8, 10]
f = gera_digito_sp(new_state_inscription, prod, state_inscription)
new_state_inscription.append(f)
# gera o segundo dígito verificador
new_state_inscription.extend(state_inscription[9:11])
prod = [3, 2, 10, 9, 8, 7, 6, 5, 4, 3, 2]
f = gera_digito_sp(new_state_inscription, prod, state_inscription)
new_state_inscription.append(f)
# Produtor rural
else:
state_inscription = re.sub('[^0-9]', '', self.state_inscription)
# verificando o tamanho da inscrição estadual
if len(state_inscription) != 12:
return False
# verificando o primeiro dígito depois do 'P'
if state_inscription[0] != '0':
return False
# Pega apenas os 8 primeiros dígitos da inscrição estadual e
# gera o dígito verificador
state_inscription = map(int, state_inscription)
new_state_inscription = state_inscription[:8]
prod = [1, 3, 4, 5, 6, 7, 8, 10]
f = gera_digito_sp(new_state_inscription, prod, state_inscription)
new_state_inscription.append(f)
new_state_inscription.extend(state_inscription[9:])
if not new_state_inscription == state_inscription:
return False
return True
def ie_to(self):
state_inscription = re.sub('[^0-9]', '', self.state_inscription)
# verificando o tamanho da inscrição estadual
if len(state_inscription) != 11:
return False
# verificando os dígitos 3 e 4
if not state_inscription[2:4] in ['01', '02', '03', '99']:
return False
# Pega apenas os dígitos que entram no cálculo
state_inscription = map(int, state_inscription)
new_state_inscription = state_inscription[:2] + state_inscription[4:10]
prod = [9, 8, 7, 6, 5, 4, 3, 2]
r = sum([x * y for (x, y) in zip(new_state_inscription, prod)]) % 11
if r > 1:
f = 11 - r
else:
f = 0
new_state_inscription.append(f)
new_state_inscription = new_state_inscription[:2] + \
state_inscription[2:4] + new_state_inscription[2:]
if not new_state_inscription == state_inscription:
return False
return True | {"/pyie/__init__.py": ["/pyie/pyie.py"]} |
73,491 | CMU-IDS-2021/fp--05839-abby-jeff-kyle-will | refs/heads/main | /support/ending_humanity_odds.py | # ending_humanity_odds.py
# Data pre-processing for popular survey results visualization.
import pandas as pd
from pandas import DataFrame
# -----------------------------------------------------------------------------
# Dictionaries and Lists
# -----------------------------------------------------------------------------
COLNAMES = {
"Pursue Advancement_No. The risk is not worth it.": "No",
"Pursue Advancement_Yes. This is worth pursuing.": "Yes",
}
STATENUMLIST = [
1.0,
2.0,
4.0,
5.0,
6.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
15.0,
16.0,
17.0,
18.0,
19.0,
20.0,
21.0,
22.0,
23.0,
24.0,
25.0,
26.0,
27.0,
28.0,
29.0,
30.0,
31.0,
32.0,
33.0,
34.0,
35.0,
36.0,
37.0,
38.0,
39.0,
40.0,
41.0,
42.0,
44.0,
45.0,
46.0,
47.0,
48.0,
49.0,
50.0,
51.0,
53.0,
54.0,
55.0,
56.0,
]
STATETOREGION = {
"Alabama": "East South Central",
"Alaska": "Pacific",
"Arizona": "Mountain",
"Arkansas": "West South Central",
"California": "Pacific",
"Colorado": "Mountain",
"Connecticut": "New England",
"Delaware": "Middle Atlantic",
"District of Columbia": "Middle Atlantic",
"Florida": "South Atlantic",
"Georgia": "South Atlantic",
"Hawaii": "Pacific",
"Idaho": "Pacific",
"Illinois": "East North Central",
"Indiana": "East North Central",
"Iowa": "West North Central",
"Kansas": "West North Central",
"Kentucky": "East South Central",
"Louisiana": "West South Central",
"Maine": "New England",
"Maryland": "Middle Atlantic",
"Massachusetts": "New England",
"Michigan": "East North Central",
"Minnesota": "West North Central",
"Mississippi": "East South Central",
"Missouri": "West North Central",
"Montana": "Mountain",
"Nebraska": "West North Central",
"Nevada": "Mountain",
"New Hampshire": "New England",
"New Jersey": "Middle Atlantic",
"New Mexico": "Mountain",
"New York": "Middle Atlantic",
"North Carolina": "South Atlantic",
"North Dakota": "West North Central",
"Ohio": "East North Central",
"Oklahoma": "West South Central",
"Oregon": "Pacific",
"Pennsylvania": "Middle Atlantic",
"Rhode Island": "New England",
"South Carolina": "South Atlantic",
"South Dakota": "West North Central",
"Tennessee": "East South Central",
"Texas": "West South Central",
"Utah": "Mountain",
"Vermont": "New England",
"Virginia": "South Atlantic",
"Washington": "Pacific",
"West Virginia": "South Atlantic",
"Wisconsin": "East North Central",
"Wyoming": "Mountain",
}
STATE_DICT = {
1.0: "Alabama",
2.0: "Alaska",
4.0: "Arizona",
5.0: "Arkansas",
6.0: "California",
8.0: "Colorado",
9.0: "Connecticut",
10.0: "Delaware",
11.0: "District of Columbia",
12.0: "Florida",
13.0: "Georgia",
15.0: "Hawaii",
16.0: "Idaho",
17.0: "Illinois",
18.0: "Indiana",
19.0: "Iowa",
20.0: "Kansas",
21.0: "Kentucky",
22.0: "Louisiana",
23.0: "Maine",
24.0: "Maryland",
25.0: "Massachusetts",
26.0: "Michigan",
27.0: "Minnesota",
28.0: "Mississippi",
29.0: "Missouri",
30.0: "Montana",
31.0: "Nebraska",
32.0: "Nevada",
33.0: "New Hampshire",
34.0: "New Jersey",
35.0: "New Mexico",
36.0: "New York",
37.0: "North Carolina",
38.0: "North Dakota",
39.0: "Ohio",
40.0: "Oklahoma",
41.0: "Oregon",
42.0: "Pennsylvania",
44.0: "Rhode Island",
45.0: "South Carolina",
46.0: "South Dakota",
47.0: "Tennessee",
48.0: "Texas",
49.0: "Utah",
50.0: "Vermont",
51.0: "Virginia",
53.0: "Washington",
54.0: "West Virginia",
55.0: "Wisconsin",
56.0: "Wyoming",
}
# -----------------------------------------------------------------------------
# Region Data set up
# -----------------------------------------------------------------------------
def byRegions(oddsDf):
pursuebase = oddsDf[["Pursue Advancement"]].copy()
regionsbase = oddsDf[["US Region"]].copy()
pursueDf = pd.get_dummies(pursuebase)
pursueDf["US Region"] = regionsbase
pursueDf = pursueDf.rename(columns=COLNAMES)
pursueDf = pursueDf.groupby("US Region").sum()
pursueDf = pursueDf.div(pursueDf.sum(axis=1), axis=0)
pursueDf["Percentage That Said No"] = (
pursueDf["No"].apply(lambda x: x * 100).round(2)
)
statesDf = DataFrame(STATENUMLIST, columns=["id"])
statesDf["State"] = statesDf["id"].map(STATE_DICT)
statesDf["US Region"] = statesDf["State"].map(STATETOREGION)
mergedDf = statesDf.merge(
pursueDf, left_on="US Region", right_on="US Region", how="left"
)
return mergedDf
# -----------------------------------------------------------------------------
# User Decision Comparison
# -----------------------------------------------------------------------------
def ageGenderOdds(oddsDf, odds):
regionsbase = oddsDf[["US Region"]].copy()
agesdf = oddsDf[["Age Range"]].copy()
gaB = oddsDf[["Gender at Birth"]].copy()
pursuebase = oddsDf[["Pursue Advancement"]].copy()
regionsbase = oddsDf[["US Region"]].copy()
pursueDf = pd.get_dummies(pursuebase)
pursueDf["US Region"] = regionsbase
pursueDf["Age Range"] = agesdf
pursueDf["Gender at Birth"] = gaB
pursueDf = pursueDf.rename(columns=COLNAMES)
pursueDf = pursueDf.groupby(["Age Range", "Gender at Birth"]).sum()
pursueDf["Odds"] = pursueDf["No"] + pursueDf["Yes"]
pursueDf["No"] = pursueDf["No"].div(pursueDf["Odds"])
pursueDf["Yes"] = pursueDf["Yes"].div(pursueDf["Odds"])
pursueDf["Odds"] = odds
return pursueDf
def mergeAgeGenderOdds(df2, df3, df4, df5, df10):
ago2 = ageGenderOdds(df2, 2)
ago3 = ageGenderOdds(df3, 3)
ago4 = ageGenderOdds(df4, 4)
ago5 = ageGenderOdds(df5, 5)
ago10 = ageGenderOdds(df10, 10)
merged = pd.concat([ago2, ago3, ago4, ago5, ago10])
return merged
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
def main():
df2 = pd.read_excel(
"fp--05839-abby-jeff-kyle-will/data/1-in-2-Ending-Humanity.xlsx"
)
df3 = pd.read_excel(
"fp--05839-abby-jeff-kyle-will/data/1-in-3-Ending-Humanity.xlsx"
)
df4 = pd.read_excel(
"fp--05839-abby-jeff-kyle-will/data/1-in-4-Ending-Humanity.xlsx"
)
df5 = pd.read_excel(
"fp--05839-abby-jeff-kyle-will/data/1-in-5-Ending-Humanity.xlsx"
)
df10 = pd.read_excel(
"fp--05839-abby-jeff-kyle-will/data/1-in-10-Ending-Humanity.xlsx"
)
groupedDf = mergeAgeGenderOdds(df2, df3, df4, df5, df10)
groupedDf.to_csv("fp--05839-abby-jeff-kyle-will/data/grouped-Ending-Humanity.csv")
regions2 = byRegions(df2)
regions3 = byRegions(df3)
regions4 = byRegions(df4)
regions5 = byRegions(df5)
regions10 = byRegions(df10)
regions2.to_csv("fp--05839-abby-jeff-kyle-will/data/regions2-Ending-Humanity.csv")
regions3.to_csv("fp--05839-abby-jeff-kyle-will/data/regions3-Ending-Humanity.csv")
regions4.to_csv("fp--05839-abby-jeff-kyle-will/data/regions4-Ending-Humanity.csv")
regions5.to_csv("fp--05839-abby-jeff-kyle-will/data/regions5-Ending-Humanity.csv")
regions10.to_csv("fp--05839-abby-jeff-kyle-will/data/regions10-Ending-Humanity.csv")
# -----------------------------------------------------------------------------
# Script Entry Point
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
| {"/streamlit_app.py": ["/support/topic_modeling.py", "/support/sentiment_analysis.py"]} |
73,492 | CMU-IDS-2021/fp--05839-abby-jeff-kyle-will | refs/heads/main | /streamlit_app.py | # streamlist_app.py
# Streamlit application.
#
# Abby Vorhaus, Jeff Moore, Kyle Dotterrer, Will Borom
import numpy as np
import pandas as pd
import altair as alt
import vega_datasets
from PIL import Image
import streamlit as st
import matplotlib.pyplot as plt
from streamlit_timeline import timeline
# Custom support imports
import support.topic_modeling as tm
import support.sentiment_analysis as sm
# The relative path to the directory in which data is stored
DATA_PATH = "data/"
# The default height for our visualizations
DEFAULT_WIDTH = 800
# The default height for our visualizations
DEFAULT_HEIGHT = 550
# Colors from Vega color scheme for charts that should not be scaled
COLOR_SCHEME_BLUE = "#bcdeea"
COLOR_SCHEME_ORANGE = "#ffbc79"
# The name of the timeline data file
TIMELINE_DATA_FILENAME = "timeline.json"
# Allow streamlit to use the whole page
st.set_page_config(page_title="Machine Intelligence", layout="wide")
# -----------------------------------------------------------------------------
# Introduction
# -----------------------------------------------------------------------------
def render_introduction_content():
"""
Render the introduction content.
"""
"""
# Machine Intelligence: Risks and Opportunities
---
Machine intelligence is a complex topic - technically, socially, politically, and ethically. Because of this complexity, navigating this topic requires a combination of both breadth and depth of understanding that is difficult to come by in most settings.
In this application, we seek to provide you with the tools to navigate this topic effectively. What is machine intelligence, and why is it important? What is our current position, and how did we get here? What are the likely future implications, and what are we currently doing to shape this future? We address each of these questions and more in detail below.
Before we begin, we make one request of you: approach this topic with an open mind. It is easy to come in with pre-conceived notions of what machine intelligence is, what it is not, and what it can be. Depending on your background, such notions may or may not be well-founded. This topic might seem like science fiction, but below we will attempt to demonstrate that much of the hype surrounding machine intelligence, both optimistic and pessimistic, is grounded in data and the best technical understanding we currently possess.
"""
st.sidebar.header("Digging Deeper")
st.sidebar.write(
"We are only grazing the surface with our main graphics, "
+ "but you can keep exploring! Below you will find options "
+ "for each section that will allow you to explore the data."
)
# -----------------------------------------------------------------------------
# Chapter: Definition / History
# -----------------------------------------------------------------------------
def render_definition_chapter():
"""
Render the definition / history of machine intelligence chapter.
"""
"""
# Defining Machine Intelligence
What do we mean by "machine intelligence"? The definition of the term has evolved somewhat with the technical capabilities in the field, but for the purposes of our exploration, we will simply use the term to mean *the realization of general intelligence capabilities in non-biological substrate*. There are two salient components of this definition:
- We are concerned with _general_ intelligence; the other side of the spectrum, _narrow_ intelligence, poses its own set of risks and opportunities, but these are not our focus here
- The substrate in which intelligence is achieved is immaterial; this implies that the term "machine" is used only loosely here, and that the form factor in which machine intelligence is realized might be far from what we might expect
"""
"""
### A Brief History
Machine intelligence may seem like a distinctly modern phenomenon, but research into the subject has been going on for nearly seventy years. To get a better idea of what machine intelligence is and where it came from, we highlight some of the major milestones in the development of machine intelligence in the timeline below.
"""
path = DATA_PATH + TIMELINE_DATA_FILENAME
with open(path, "r") as f:
data = f.read()
# Render the timeline
timeline(data, height=500)
"""
### Cars, Cats, and Playing Games
After examining the events in the timeline above, one might be left with the impression that machine intelligence is little more than a novelty. We see that the technology is capable of helping us in our daily endeavors, perhaps by recognizing cat pictures and driving us where we need to go. While it does appear that machine intelligence has the capacity to surpass humans, it appears this phenomenon only occurs in narrow, game-playing settings that are hardly of interest to most. It is a long way from the chessboard to global domination; is machine intelligence truly a technology with vast disruptive potential?
"""
# -----------------------------------------------------------------------------
# Chapter: Paradigm
# -----------------------------------------------------------------------------
def spect_intel(slide_val, df, pointsDf):
points = (
alt.Chart(pointsDf)
.mark_circle(color="#ffbc79", size=300, clip=True)
.encode(
x=alt.X(
"x", title=None, axis=None, scale=alt.Scale(domain=(0, slide_val + 0.1))
),
y=alt.Y("exp", title=None, axis=None, scale=alt.Scale(domain=(-2, 8105))),
)
.properties(width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT)
)
line = (
alt.Chart(df)
.mark_line(color="#bcdeea", size=7, clip=True)
.encode(
x=alt.X(
"x", title=None, axis=None, scale=alt.Scale(domain=(0, slide_val + 0.1))
),
y=alt.Y("exp", title=None, axis=None, scale=alt.Scale(domain=(-2, 8105))),
)
.properties(width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT)
)
text = points.mark_text(
align="right",
baseline="middle",
dx=-10,
dy=-15,
font="IBM Plex Sans",
fontSize=15,
fontWeight="bolder",
clip=True,
color="#ffbc79",
).encode(text="Type")
finalchart = line + points + text
finalchart = finalchart.properties(
title={
"text": ["The Spectrum of Intelligence"],
"subtitle": "Will Humans always be the Smartest Things Around?",
}
).configure_title(
fontSize=40,
font="IBM Plex Sans",
)
return finalchart
def gen_exp():
df = pd.DataFrame(np.random.randint(0, 11, size=1000), columns=["x"])
df["exp"] = np.exp(df["x"])
pointsDf = pd.DataFrame(
[
{"x": 1, "Type": "Chicken"},
{"x": 5, "Type": "Average Human"},
{"x": 6, "Type": "John von Neumann"},
{"x": 9, "Type": "Machine Superintelligence"},
]
)
pointsDf["exp"] = np.exp(pointsDf["x"])
return df, pointsDf
def render_intelligence_section():
"""
Render the 'importance of intelligence' section of the paradigm-shift chapter.
"""
"""
### The Primacy of Intelligence
**Intelligence Allows us to get What we Want** Why is intelligence a matter of consequence? An answer to this question requires that we recognize that intelligence is the foundational source of power and control in the world. Intelligence is what allows us to attain the things we value in this world. Consider for example the diction between humans and the many members of the animal kingdom over which we (largely) dominate. Apes are far stronger than humans, pound-for-pound. Tigers are faster and have much sharper teeth. Ants represent a far larger proportion of the Earth's overall biomass. Yet in each of these cases, the fate of the this animal on Earth is almost entirely at the discretion of humanity, rather than in the hands of the animal itself. The common factor in each comparison is human intelligence - the fact that we are the most intellectually-competent species around.
**Human Intelligence is Nothing Special** So humans are the smartest species around, will this always be the case? There is no reason to think that this is so, or even that humans stand anywhere _near_ the zenith of what is possible. In fact, there is reason to believe that the intelligence curve extends far beyond what we might currently be able to observe and understand. What would be the implications for humanity if the spectrum of intelligence actually resembles something like the visualization below?
"""
df, pointsDf = gen_exp()
slider_spect_intelligence = st.slider(
"Slide to Explore the Shape of the Intelligence Spectrum", 0, 9, 0
)
st.write(spect_intel(slider_spect_intelligence, df, pointsDf))
"""
This visualization is not meant to actually quantify potential differences in level of intelligence, but merely highlight the fact that we (humans) might not even be able to conceive of the types of intelligence that are possible because of our own narrow viewpoint. However, is there any reason to believe that machine intelligence stands a chance of progressing to this point?
"""
def magnitude_viz_speed(chart_type):
speeds = pd.DataFrame(
[
{"Speed": 300000000, "Type": "Computer Signals", "row": 1},
{"Speed": 18000, "Type": "F35 Fighter Jet", "row": 2},
{"Speed": 15000, "Type": "Commericial Airline", "row": 3},
{"Speed": 150, "Type": "Human Being Biological Axons", "row": 4},
]
)
speeds["Log Speed"] = np.log(speeds["Speed"])
if chart_type == "Speed":
speed_viz = (
alt.Chart(speeds)
.mark_bar()
.encode(
x=alt.X("Speed:Q", title="Speed (m/s)"),
y=alt.Y("Type:O", title=None),
color=alt.Color("Type:O", scale=alt.Scale(scheme="redyellowblue")),
tooltip=[alt.Tooltip("Type:O"), alt.Tooltip("Speed:Q")],
)
.properties(
width=30000,
height=400,
)
.properties(
title={
"text": ["Comparing Speeds"],
"subtitle": "When directly comparing various objects, it is difficult to visually see the difference between the slower objects",
}
)
)
else:
speed_viz = (
alt.Chart(speeds)
.mark_bar()
.encode(
x=alt.X(
"Log Speed:Q", title="Logarithmic Values of Various Speed (m/s)"
),
y=alt.Y("Type:O", title=None),
color=alt.Color("Type:O", scale=alt.Scale(scheme="redyellowblue")),
tooltip=[
alt.Tooltip("Type:O"),
alt.Tooltip("Speed:Q"),
alt.Tooltip("Log Speed:Q"),
],
)
.properties(
width=10000,
height=400,
)
.properties(
title={
"text": ["Comparing Logarithmic Speeds"],
"subtitle": "In order to actually visually difference between some of the slower objects, we must convert via logarithm",
}
)
)
speed_viz = speed_viz.configure_title(
fontSize=30, font="IBM Plex Sans", anchor="start"
)
return speed_viz
def magnitude_viz_brain():
source = pd.DataFrame(
[
{"count": 1, "Type": "computer processor", "row": 1},
{"count": 1000, "Type": "1000 human beings", "row": 2},
{"count": 1000, "Type": "1000 human beings", "row": 3},
{"count": 1000, "Type": "1000 human beings", "row": 4},
{"count": 1000, "Type": "1000 human beings", "row": 5},
{"count": 1000, "Type": "1000 human beings", "row": 6},
{"count": 1000, "Type": "1000 human beings", "row": 7},
{"count": 1000, "Type": "1000 human beings", "row": 8},
{"count": 1000, "Type": "1000 human beings", "row": 9},
{"count": 1000, "Type": "1000 human beings", "row": 10},
{"count": 1000, "Type": "1000 human beings", "row": 11},
]
)
source["emoji"] = [
{"1000 human beings": "🛉", "computer processor": "💻"}[t] * int(cnt)
for t, cnt in source[["Type", "count"]].values
]
source.head()
mag_viz = (
alt.Chart(source)
.mark_text(size=30, align="left")
.encode(
alt.X("count:O", axis=None, scale=alt.Scale(range=[0, 100])),
alt.Y("row:O", axis=None),
alt.Text("emoji:N"),
)
.properties(width=20000, height=400)
.transform_calculate(value="0")
.properties(
title="Another Look at the Frequency Magnitude",
)
.properties(background="#f2f2f2")
)
mag_viz = mag_viz.configure_title(
fontSize=35, font="IBM Plex Sans", anchor="start", color="black"
)
return mag_viz
def render_substrate_section():
"""
Render the substrate distinction section of the paradigm-shift chapter.
"""
"""
### The Potential of Mechanical Minds
What are the implications of releasing intelligence from the bonds of a biological substrate? In this section we will compare Human Beings brain potential to the computerized counterparts. We will begin to see just how vastly computers outperform humans in the areas of brain frequency, speed, and storage capacity.
"""
st.sidebar.header("The Potential of Mechanical Minds")
st.sidebar.write(
"Select other options to understand the scale of the differences between a Human Being and a Computer"
)
scale_opt = st.sidebar.selectbox(
"Select an option", ("Frequency", "Speed", "Storage")
)
if scale_opt == "Frequency":
alt_brain = st.sidebar.radio(
"Look at an alternative magnitude visualization",
("-", "Alternative Magnitude"),
)
"""
- Frequency: Biological neruons fire at 20Hz. The clock speed in your run-of-the-mill laptop is 2GHz. This is a 10,000,000x difference, or seven orders of magnitude. Choose the brain type below (Human, Computer) to see the vast difference between the combined frequency of 1000x human brains and that of a typical processor.
"""
human_brain = Image.open("img/brain.png")
brains = {
"100,000x Human": [10, "100,000x Human Brain"],
"500,000x Human": [50, "500,000x Human Brain"],
"1,000,000x Human": [100, "1,000,000x Human Brain"],
"2,500,000x Human": [250, "2,500,000x Human Brain"],
"5,000,000x Human": [500, "5,000,000x Human Brain"],
"7,500,000x Human": [750, "7,500,000x Human Brain"],
"Typical Processor": [1000, "Typical Processor"],
}
brain = st.select_slider("Select your frequency.", list(brains.keys()))
st.image(
human_brain,
caption=brains[brain][1],
output_format="PNG",
width=brains[brain][0],
)
"""
"""
if alt_brain == "Alternative Magnitude":
st.write(
"Each human icon is equivalent to the combined frequency of 1000 human beings. Scroll to see the full impact."
)
st.write(magnitude_viz_brain())
elif scale_opt == "Speed":
alt_speed = st.sidebar.radio(
"Look at alternative magnitude visualizations",
("-", "Speed", "Logarithim of Speed"),
)
"""
- Speed: Signals propagate in biological axons at ~150 m/s. The same signals propagate at the speed of light within an integrated circuit. In this domain, the computer vastly outperforms the human again.
"""
speedhuman = Image.open("img/speedhuman.png")
speedairline = Image.open("img/speedairline.png")
speedF35 = Image.open("img/speedF35.png")
speedprocessor = Image.open("img/speedprocessor.png")
speeds = {
"Human": speedhuman,
"Commericial Airline": speedairline,
"F35 Fighter Jet": speedF35,
"Typical Processor": speedprocessor,
}
speed = st.select_slider("Select your speed.", list(speeds.keys()))
st.image(speeds[speed], output_format="PNG")
if alt_speed == "Speed":
st.write(magnitude_viz_speed(alt_speed))
elif alt_speed == "Logarithim of Speed":
st.write(magnitude_viz_speed(alt_speed))
else:
"""
- Capacity: The human cranium imposes tight size limitations on our brains. A mechanical mind that implements a machine intelligence has no such size restrictions. If we look at the typical human brain it can hold on average 2.5 million Gigabytes, whereas a small cloud facility holds about 400 million Gigbytes with all servers leveraged.
"""
storagehuman = Image.open("img/HumanStorage.png")
storage4 = Image.open("img/4ServerStorage.png")
storage9 = Image.open("img/9ServerStorage.png")
storage16 = Image.open("img/16ServerStorage.png")
storage25 = Image.open("img/25ServerStorage.png")
storage81 = Image.open("img/81ServerStorage.png")
storage156 = Image.open("img/156ServerStorage.png")
storages = {
"Human": storagehuman,
"4x 2U Server Rack": storage4,
"9x 2U Server Rack": storage9,
"16x 2U Server Rack": storage16,
"25x 2U Server Rack": storage25,
"81x 2U Server Rack": storage81,
"Small Cloud Facility": storage156,
}
storage = st.select_slider(
"Select your storage capacity.", list(storages.keys())
)
st.image(storages[storage], output_format="PNG")
def render_paradigm_chapter():
"""
Render the paradigm shift chapter.
"""
"""
# A Paradigm Shift
Based on the milestones timeline above, its clear that machine intelligence has demonstrated its prowess in select areas of human endeavor. This may be demoralizing for the world's Chess and Go players, and makes for an interesting couple of days of news coverage, but is this really cause for concern or trepidation? In other words, one might be skeptical of the potential of machine intelligence on the basis of events that have been hailed a major milestones in its development history. Is it really that big of a deal?
The answer is an unequivocal 'yes', but justification requires some additional explanation.
"""
render_intelligence_section()
render_substrate_section()
# -----------------------------------------------------------------------------
# Chapter: Perceptions
# -----------------------------------------------------------------------------
def render_popular_perceptions_section():
"""
Render the popular perceptions section of the perceptions chapter.
"""
"""
### Machine Intelligence in the Popular Media
How do we characterize the popular public perception of machine intelligence?
"""
"""
With the prevalence of research and popular movies about Artificial Intelligence, it would be safe to say that the public has some thoughts and opinions on Artificial Intelligence.
Did you know that there were 182 movies that featured Artificial Intelligence in some form from 2000-2020? With provocative titles such as "AI Amok" (2020) and "RoboCop" (2014), was public perception of AI affected by these movies? Let's Explore the perception of machine intelligence in the media..
We pulled all news articles from Nature.com from 2000-2020 that featured Artificial Intelligence. These articles do not include any journal or academic publications. We then performed a sentiment analysis on the content in the articles to judge the overarching sentiment of the reporting and tallied the number of positve articles vs negative articles.
As you can see below the percentage of articles that are positive is almost consistently 100%. Even when looking at the overall perception, you can see that it is actually pretty close to neutral in all years. There are a few explanations that could answer the question, but lets see if there is a correlation between hit-movies and the perception in news media.
Were there popular movies that may have affected the public perception of Artificial Intelligence and caused the sentiment analysis of these articles to trend toward neutral from the positvive. Slide the bar to the right and see.
### Sentiment and Perception in the Public Media
"""
sm.public()
"""
Many of the media articles are slightly above neutral. This can be intepreted a few ways in that we can infer that humanity is wary of AI and machine intelligence or that we are playing a wait and see game. It could be a combination of both. Can we really say that the future of machine intelligence or Artificial Intelligence is bleak? Should we be afraid? Let's explore what is being researched in the field of AI.
"""
def render_professional_perceptions_section():
"""
Render professional perceptions
"""
"""
### Machine Intelligence in Professional Settings
How do we characterize the nature of research work on machine intelligence?
Research in the field of Artificial Intelligence is growing quickly. Over the last 8 years, there have been over 500 publications regarding Artificial Intelligence and related topics from a single journal source: the Journal of Artificial Intelligence Research.
We looked at the title of each article and built a topic model to gather the most popular 5 topics. Using this model, we discovered that greater than 30% of the articles published on these fields of research.
The first chart shows us how many articles are being published in a given year that are on the most popular topics. AI Based Multi-Lingual translation is a field that is expanding quickly and some avenues of research are developing on the fly audio translation from multiple languages.
Autonomous AI Decision Making is the prodcess of making decision without human oversight. Some focus areas are healthcare, where, according to Science Direct, "clinical decisions are made without human oversight."
Cloud-Based ML Frameworks is an area of research that seeks to create robust and modular cloud based systems to do machine learning.
Language Models is another aspect of research that is focused on predicting words. This may sound simple, but languages have many rules and grammatical foibles that make this difficult.
The last popular topic is Multi-agent pathfinding. This area of research is based on calculating the best path for multiple objects in real time. Imagine a machine intelligence trying to calculate the most optimal route for every care on the road.
As you can see in the chart below, there are a number of research papers that are on one of the top-5 topics. As you move the slider over you can see how things change over time. For a different perspective, you can track a single topic from the sidebar and follow the research pattern over time.
"""
tm.academic()
topics = [
"Language Models",
"Cloud-Based ML Frameworks",
"AI Based Multi-Lingual Translation",
"Autonomous AI Decision Making",
"Multi-Agent Pathfinding",
]
st.sidebar.header("Tracking Academic Research")
st.sidebar.write("Select each option to observe the shift in research priorities.")
pTopic = st.sidebar.selectbox("Select an option", topics)
tm.topicTimeline(pTopic)
"""
The AI Based Multi-Lingual topic seems to be trending upward from 2020 into 2021. That is an interesting observation that could be related to the COVID-19 pandemic. As many people are teleworking, is there a greater call for instant translation of multiple languages? Is this a boon to humanity as we strive to counter the virus that is destroying our world?
Autonomous AI Decision Making is also trending up in 2021. The optimistic view of this is that AI will help derive vaccine genomes to help with stopping the virus, the pessimistic view is that the AI will start making decisions that could negatively impact us. Who knows what the future holds?
"""
def render_perceptions_chapter():
"""
Render the perceptions chapter.
"""
"""
---
# Perceptions versus Reality
How much do we really know about machine intelligence or Artificial Intelligence? What are the perceptions of AI that guide humanity toward the future? Are we thinking positiviely? Negatively? Are we neutral?
"""
render_popular_perceptions_section()
render_professional_perceptions_section()
# -----------------------------------------------------------------------------
# Chapter: Prospects
# -----------------------------------------------------------------------------
odds_dict = {"1 in 2": 2, "1 in 3": 3, "1 in 4": 4, "1 in 5": 5, "1 in 10": 10}
age_dict = {
"18 to 29": "18 - 29",
"30 to 44": "30 - 44",
"45 to 59": "45 - 59",
"60+": "60",
}
gender_dict = {"Female": "female", "Male": "male"}
si_impact = pd.DataFrame(
np.array(
[
[17, 24, 23, 17, 18],
[28, 25, 12, 12, 24],
[31, 30, 20, 13, 6],
[20, 40, 19, 13, 8],
[24, 28, 17, 13, 18],
]
),
columns=["Extremely Good", "Good", "Neutral", "Bad", "Extremely Bad"],
index=[
"Participants of Conference on “Philosophy and Theory of AI”",
"Participants of the conferences of “Artificial General Intelligence”",
"Members of the Greek Association for Artificial Intelligence",
"The 100 ‘Top authors in artificial intelligence’",
"All Groups",
],
)
def outlooks():
colnames = list(si_impact.columns)
si = si_impact.reset_index()
si = si.rename(columns={"index": "Group"})
si = si.melt(
id_vars=["Group"], value_vars=colnames, var_name="Outlook", value_name="Percent"
)
si["Percent"] = si["Percent"].div(100)
outlook_chart = (
alt.Chart(si)
.mark_bar()
.encode(
x=alt.X("sum(Percent)", stack="normalize"),
y=alt.Y("Group", title=None),
color=alt.Color("Outlook:N", scale=alt.Scale(scheme="redyellowblue")),
tooltip=[
alt.Tooltip("Group:N"),
alt.Tooltip("Outlook:N"),
alt.Tooltip("Percent:Q", format=".1%"),
],
)
.properties(title="What Experts Expect from the creation of HLMI")
.configure_axis(labelLimit=1000)
.properties(width=DEFAULT_WIDTH, height=400)
)
outlook_chart = outlook_chart.configure_title(
fontSize=20,
font="IBM Plex Sans",
)
return outlook_chart
def dev_hmli():
progess = pd.DataFrame(
np.array(
[
47.9,
42.0,
42.0,
39.6,
37.3,
35.5,
34.9,
32.5,
29.0,
29.0,
23.7,
21.3,
20.7,
17.8,
13.6,
4.1,
2.6,
]
),
columns=["Percent"],
index=[
"Cognitive Science",
"Integrated Cognitive Architectures",
"Algorithms Revealed by Computational Neuroscience",
"Artificial Neural Networks",
"Faster Computing Hardware",
"Large-scale Datasets",
"Embodied systems",
"Other method(s) currently completely unknown",
"Whole Brain Emulation",
"Evolutionary Algorithms or Systems",
"Other method(s) currently known to at least one investigator",
"Logic-based Systems",
"Algorithmic Complexity Theory",
"No method will ever contribute to this aim",
"Swarm Intelligence",
"Robotics",
"Bayesian Nets",
],
)
progess = progess.reset_index()
devhmli = (
alt.Chart(progess)
.mark_bar()
.encode(
x=alt.X("index:O", title=None),
y=alt.Y(
"Percent:Q",
title="Percent Likelihood that the Topic Will Impact the Development of HLMI",
),
color=alt.Color("index:O", scale=alt.Scale(scheme="redyellowblue")),
tooltip=[alt.Tooltip("index:O"), alt.Tooltip("Percent:Q")],
)
.properties(
width=DEFAULT_WIDTH,
height=DEFAULT_HEIGHT,
)
.properties(title="What Will Impact the Development of HLMI")
.configure_axis(labelLimit=1000)
.configure_title(
fontSize=20,
font="IBM Plex Sans",
)
.interactive()
)
return devhmli
def render_expert_sentiment_section():
"""
Render the expert sentiment section of the prospects chapter.
"""
"""
### Expert Sentiment on Machine Intelligence
The conjecture that **every aspect of learning** can be simulated by a machine if it is describe precisely enough forms the base of
the pursuit of artificial intelligence. This concept helps define how current research in AI approaches continued development in the field.
But, what does achieving true AI and beyond that super intelligence actually mean? And how soon can we expect to see it?
This became the premise for a series of surveys and questionaries created by Vincent Muller and Nick Bostrom and directed at subject
matter experts in artificial and machine intelligence. These questions were not only attempting to gauge the likelihood of achieveing
high level machine intelligence (HLMI), but also to assess the outcome of such an achievement if it was to occur.
Here we highlight some of the key results from the 2016 survey. The survey had 550 participants hand selected due to their background.
Participants included participants of the conference on “Philosophy and Theory of AI”, participants of the conferences of “Artificial General Intelligence”,
members of the Greek Association for Artificial Intelligence, and individuals from the 100 ‘top authors in artificial intelligence’ by ‘citation’.
Perhaps the most interesting result from the survey was the uncertainty among experts of the prospect of HLMI and its consequences.
Below is a slidebar that ranges from 0 to 100. It represents the probability of existential catastrophe if we as a human race develop
high level machine intelligence. We want you to guess what you think this probability is by manipulating the slider. Then, you can
compare your guess to the experts as well as see a breakdown of opinion by background.
"""
slider_doom = st.slider(
"Guess the likelihood that if HLMI comes into existence that it will result in an existential catastrophe for the human race.",
0.0,
100.0,
0.0,
)
if slider_doom != 0.0:
st.write(
"You guessed "
+ str(slider_doom)
+ "%. Across a variety of experts, the predicted likelihood of high level machine intelligence resulting in existential catastrophe is around 18%."
)
st.write(outlooks())
"""
Whether the experts placed their predictions higher or lower than yours, the larger takeaway is this: **the probability is non-zero**. In fact,
it is roughly one in five. The debate about whether we will ever reach HLMI is unresolved among experts. Some
believe HLMI and other critical aspects that would be required to achieve HLMI will never exist. Others feel we could see these
achievements within a few decades. You can explore more with the sidebar.
One piece is absolutely clear. **We have no idea what the consequences of high level machine intelligence will be**.
"""
# Sidebar
st.sidebar.header("Expert Sentiment on Machine Intelligence")
st.sidebar.write(
"Explore more responses from experts in the Machine Intelligence community."
)
hlmi = st.sidebar.selectbox(
"Select an option to view more visualizations based on expert opinions.",
(
"-",
"What Will Impact the Development of HLMI",
"Simulate Learning",
"Basic Operations",
"Brain Architecture",
),
)
if hlmi == "What Will Impact the Development of HLMI":
""" """
"""
Advancements in every field are made daily, but only some will be able to directly contribute to our development of high level machine
intelligence. Below are a variety of fields that experts feel could directly contribute to the body of knowledge required to achieve
HLMI. Note percenatges are not normalized.
"""
st.write(dev_hmli())
elif hlmi == "Simulate Learning":
names = [
"Today",
"Within 10 years",
"Between 11 and 25 years",
"Between 26 and 50 years",
"More than 50 years",
"Never",
]
size = [0, 5, 2, 11, 41, 41]
percents = [
"Today: 0%",
"Within 10 years: 5%",
"Between 11 and 25 years: 2%",
"Between 26 and 50 years: 11%",
"More than 50 years: 41%",
"Never: 41%",
]
fig = plt.figure()
# Create a circle at the center of the plot
my_circle = plt.Circle((0, 0), 0.7, color="white")
# Give color names
wedges, texts = plt.pie(
size,
labels=percents,
colors=["#e75a3b", "#fa9e5d", "#faf8c1", "#d7eeec", "#81b5d5", "#3f58a6"],
)
p = plt.gcf()
p.gca().add_artist(my_circle)
plt.legend(
wedges,
names,
title="How Soon Will We Achieve This",
loc="center left",
bbox_to_anchor=(1.2, -0.3, 0.5, 1),
)
# Show the graph
st.header(
"The Earliest Machines Will be Able to Simulate Learning and Every Other Aspect of Human Intelligence"
)
st.pyplot(fig)
"""
While no one believes we can currently simulate true human-like learning, more than half of the experts believe it is achieveable.
This is a foundational building block for achieving HLMI, and how a machine could rapidly outpace the abilities of any human.
"""
elif hlmi == "Basic Operations":
names = [
"Today",
"Within 10 years",
"Between 11 and 25 years",
"Between 26 and 50 years",
"More than 50 years",
"Never",
]
size = [6, 12, 10, 21, 29, 21]
percents = [
"Today: 6%",
"Within 10 years: 12%",
"Between 11 and 25 years: 10%",
"Between 26 and 50 years: 21%",
"More than 50 years: 29%",
"Never: 21%",
]
fig = plt.figure()
# Create a circle at the center of the plot
my_circle = plt.Circle((0, 0), 0.7, color="white")
# Give color names
wedges, texts = plt.pie(
size,
labels=percents,
colors=["#e75a3b", "#fa9e5d", "#faf8c1", "#d7eeec", "#81b5d5", "#3f58a6"],
)
p = plt.gcf()
p.gca().add_artist(my_circle)
plt.legend(
wedges,
names,
title="How Soon Will We Achieve This",
loc="center left",
bbox_to_anchor=(1.2, -0.3, 0.5, 1),
)
# Show the graph
st.header(
"The Earliest We Will Understand the Basic Operations of the Human Brain Sufficiently to Create Machine Simulation of Human Thought"
)
st.pyplot(fig)
"""
The basic operations of the brain, though still immensely complex, are likely the most feasible to achieve. Some experts believe
we already have a sufficient enough understanding to percisely describe and thus replicate these operations today. This achievement is
the first major step towards HLMI.
"""
elif hlmi == "Brain Architecture":
names = [
"Today",
"Within 10 years",
"Between 11 and 25 years",
"Between 26 and 50 years",
"More than 50 years",
"Never",
]
size = [0, 11, 14, 22, 40, 14]
percents = [
"Today: 0%",
"Within 10 years: 11%",
"Between 11 and 25 years: 14%",
"Between 26 and 50 years: 22%",
"More than 50 years: 40%",
"Never: 14%",
]
fig = plt.figure()
# Create a circle at the center of the plot
my_circle = plt.Circle((0, 0), 0.7, color="white")
# Give color names
wedges, texts = plt.pie(
size,
labels=percents,
colors=["#e75a3b", "#fa9e5d", "#faf8c1", "#d7eeec", "#81b5d5", "#3f58a6"],
)
p = plt.gcf()
p.gca().add_artist(my_circle)
plt.legend(
wedges,
names,
title="How Soon Will We Achieve This",
loc="center left",
bbox_to_anchor=(1.2, -0.3, 0.5, 1),
)
# Show the graph
st.header(
"The Earliest We Will Understand the Architecture and Structure of the Brain Sufficiently to Create Machine Simulation of Human Thought"
)
st.pyplot(fig)
"""
The architecture of the brain, still eludes our full understanding though most experts believe we will reach a point of full
understanding in the next few decades. This would provide us insight into how the organizational control of our brain is structured,
and will likely be key in developing machine intelligence beyond the capacity of our own in all regards, not just in a few areas.
"""
def user_selection(df):
# Center the columns
col1, col2, col3 = st.beta_columns([1, 3, 1])
# Set up the radio buttons
age_range = col2.radio(
"Choose your age bracket from the provided ranges.",
("-", "18 to 29", "30 to 44", "45 to 59", "60+"),
)
if age_range != "-":
gender_at_birth = col2.radio("Choose your sex.", ("-", "Female", "Male"))
if gender_at_birth != "-":
odds = col2.radio(
"Choose the Odds. Remember, this is the chance that humanity will face complete destruction.",
("-", "1 in 2", "1 in 3", "1 in 4", "1 in 5", "1 in 10"),
)
if odds != "-":
# Given the parameters get the specified percentages and graph for the user
yes_or_no = col2.radio(
"Given the odds, should we as humanity continue to pursue this advancement?",
("-", "Yes", "No"),
)
# Retrieve the specified row of data
selection = df.loc[
(df["Gender at Birth"] == gender_at_birth)
& (df["Age Range"] == age_dict.get(age_range))
& (df["Odds"] == odds_dict.get(odds))
]
selection = selection.reset_index()
graph_select = selection.melt(
id_vars=["Age Range", "Gender at Birth"],
value_vars=["No", "Yes"],
var_name="Decision",
value_name="Percent",
)
# Return specific message to user
if yes_or_no != "-":
if yes_or_no == "Yes":
y_percent = ((selection.at[0, "Yes"]) * 100).round(1)
st.write(
"You agreed with "
+ str(y_percent)
+ " percent of individuals who are "
+ gender_dict.get(gender_at_birth)
+ " and are in the age bracket of "
+ age_range
+ " that we should press on despite the risks."
)
elif yes_or_no == "No":
n_percent = ((selection.at[0, "No"]) * 100).round(1)
st.write(
"You agreed with "
+ str(n_percent)
+ " percent of individuals who are "
+ gender_dict.get(gender_at_birth)
+ " and are in the age bracket of "
+ age_range
+ " that jeopardizing humanity to that degree is simply not worth it."
)
# Build and return Bar chart
graph_title = "Decision Point: Odds " + odds
basic_bar = (
alt.Chart(graph_select)
.mark_bar()
.encode(
x=alt.X("Decision"),
y=alt.Y(
"Percent:Q",
scale=alt.Scale(domain=(0, 1)),
axis=alt.Axis(format="%", title="Percentage"),
),
color=alt.Color(
"Decision:N", scale=alt.Scale(scheme="redyellowblue")
),
tooltip=[
alt.Tooltip("Age Range:N"),
alt.Tooltip("Gender at Birth:N"),
alt.Tooltip("Percent:Q", format=".2%"),
],
)
.properties(title=graph_title)
.properties(
width=400,
height=350,
)
.interactive()
)
basic_bar = basic_bar.configure_title(
fontSize=30, font="IBM Plex Sans"
)
col2.write(basic_bar)
"""
Even in the most extreme cases, many feel a case can be made to keep exploring. Existential risks, and by extention the true extinction of
the human race, is difficult to fathom. Moreover, human beings are notoriously bad a conceptually understanding odds. It is possible
with a rewording or restructuring of the question, the outcome may be different. But, as long as a non-trivial portion of society
continues to support unburdened progress with disregard for the risks, humankind will remain **at the mercy of the unintended consequences - good or bad**.
Now feel free to explore more about the how the United States public responded to the survey using the sidebar.")
"""
def regions_viz(oddsDf, odds):
states = alt.topo_feature(vega_datasets.data.us_10m.url, "states")
subtitle = (
odds + " odds of total destruction of humanity - note percentages are by region"
)
uschart = (
alt.Chart(states)
.mark_geoshape()
.encode(
tooltip=[
"US Region:N",
"State:N",
alt.Tooltip("No:Q", format=".2%"),
alt.Tooltip("Yes:Q", format=".2%"),
],
color=alt.Color(
"Percentage That Said No:Q",
scale=alt.Scale(scheme="redyellowblue", reverse=True),
),
)
.transform_lookup(
lookup="id",
from_=alt.LookupData(
oddsDf,
"id",
["US Region", "State", "No", "Yes", "Percentage That Said No"],
),
)
.properties(
width=DEFAULT_WIDTH,
height=DEFAULT_HEIGHT,
)
.project(type="albersUsa")
.properties(
title={
"text": ["Should we Continue to Pursue Advancement?"],
"subtitle": subtitle,
}
)
)
uschart = uschart.configure_title(
fontSize=30,
font="IBM Plex Sans",
)
return uschart
def prep_grouped_data(cdf):
fullgrouptest = cdf.melt(
id_vars=["Age Range", "Gender at Birth", "Odds"],
value_vars=["No", "Yes"],
var_name="Decision",
value_name="Percent",
)
fullgrouptest["Combined"] = fullgrouptest.apply(
lambda x: list([x["Gender at Birth"], x["Age Range"], x["Decision"]]), axis=1
)
fullgrouptest["List"] = fullgrouptest["Combined"].apply(
lambda x: ", ".join(map(str, x))
)
return fullgrouptest
def create_compare_chart(cdf, odds, ages, genders):
df_odds = []
df_ages = []
for odd in odds:
df_odds.append(odds_dict[odd])
for age in ages:
df_ages.append(age_dict[age])
cdf = cdf[(cdf["Odds"].isin(df_odds))]
cdf = cdf[(cdf["Gender at Birth"].isin(genders))]
cdf = cdf[(cdf["Age Range"].isin(df_ages))]
compare_chart = (
alt.Chart(cdf)
.mark_bar()
.encode(
x=alt.X("List:O", title=None),
y=alt.Y(
"Percent",
scale=alt.Scale(domain=(0, 1)),
axis=alt.Axis(format="%", title="Percentage"),
),
color=alt.Color("Decision", scale=alt.Scale(scheme="redyellowblue")),
column="Odds:N",
tooltip=[
alt.Tooltip("Gender at Birth"),
alt.Tooltip("Age Range"),
alt.Tooltip("Decision"),
alt.Tooltip("Percent:Q", format=".2%"),
],
)
.properties(
title="Comparing Decisions across different Ages, Genders, and Odds"
)
.configure_axis(labelLimit=1000)
)
compare_chart = compare_chart.configure_title(
fontSize=20,
font="IBM Plex Sans",
)
return compare_chart
def render_user_choice_section():
"""
Render the user choice section of the prospects chapter.
"""
"""
### What Would You Choose?
In 2017 author Rick Webb wrote an article for NewCo Shift on machine superintelligence and public opinion. In the process of developing
this article, he surveyed various populations in the United States. One of the questions posed was:
"Humanity has discovered a scientific advancement. Pursuing it gives humanity two possible options: a chance that all of humanity
will die instantly, or a chance that poverty, death and disease will be cured for everyone, forever. Should we pursue it?"
In his survey, Webb put the chance at varying levels to see how the public would respond, but the two results, transcendence or destruction, were always the same.
Now, we will give you a chance to do the same and see how you compare to the rest of the respondents.
First select your age range and sex. This will let you see how you compare amongst others. Don't worry! You can use the sidebar
later to explore further and compare across different ages and genders as well as look at how different regions across the United States
differ in opinion. As a note, the survey only provided the option for male or female, so we unfortunately do not have data for other options at this time.
Next, select one of the odds. The odds represent the chance that humanity will be destroyed. For example, 1 in 3 implies that one out
of every three times, humanity will be completely and totally annihilated by the advancement we achieved. However, the other two times the
result will be a world with no famine, disease, poverty, suffering, or death - in short a utopia. You decide whether we should continue down the path or not.
"""
# Set up the radio button theme
st.write(
"<style>div.row-widget.stRadio > div{flex-direction:row;}</style>",
unsafe_allow_html=True,
)
st.write(
'<style>div.row-widget.stRadio > div[role="radiogroup"] > label[data-baseweb="radio"] > div:first-child {background-color:'
+ COLOR_SCHEME_BLUE
+ "};</style>",
unsafe_allow_html=True,
)
st.markdown(
""" <style>
div[role="radiogroup"] > :first-child{
display: none !important;
}
</style> """,
unsafe_allow_html=True,
)
# Color for MultiSelect
st.write(
'<style>div[data-baseweb="select"] div {background-color:'
+ COLOR_SCHEME_BLUE
+ ";}</style>",
unsafe_allow_html=True,
)
# Let the use choose their fate
selectDf = pd.read_csv(DATA_PATH + "grouped-Ending-Humanity.csv")
user_selection(selectDf)
# Sidebar
st.sidebar.header("What Would You Choose?")
st.sidebar.write(
"Explore more about how the United States public responded to the survey. You can look at region maps as well as "
+ "compare across different ages and genders."
)
# Region Visualization
region2 = pd.read_csv(DATA_PATH + "regions2-Ending-Humanity.csv")
region3 = pd.read_csv(DATA_PATH + "regions3-Ending-Humanity.csv")
region4 = pd.read_csv(DATA_PATH + "regions4-Ending-Humanity.csv")
region5 = pd.read_csv(DATA_PATH + "regions5-Ending-Humanity.csv")
region10 = pd.read_csv(DATA_PATH + "regions10-Ending-Humanity.csv")
region_map_select = st.sidebar.selectbox(
"Select one of the odds to see how different regions across the United States answered the survey question.",
("-", "1 in 2", "1 in 3", "1 in 4", "1 in 5", "1 in 10"),
)
if region_map_select != "-":
""" """
"""
Below is a map of the United States broken into regions to better observe how the distribution of beliefs about the path
humanity should take changes as one traverses the country. It is intereseting to see distinct differences in the regions that
are in some cases fairly extreme. Various factors play a role in why these differences exist from ethnic backgrounds to religion.
Colors indicate the proportion of the population in each region that believe we should **not** pursue advancement and jeopardize the human race.
"""
if region_map_select == "1 in 2":
st.write(regions_viz(region2, region_map_select))
elif region_map_select == "1 in 3":
st.write(regions_viz(region3, region_map_select))
elif region_map_select == "1 in 4":
st.write(regions_viz(region4, region_map_select))
elif region_map_select == "1 in 5":
st.write(regions_viz(region5, region_map_select))
else:
st.write(regions_viz(region10, region_map_select))
# Comparing tool in the sidebar
cdf = pd.read_csv(DATA_PATH + "grouped-Ending-Humanity.csv")
cdf = prep_grouped_data(cdf)
comparing_select_odd = st.sidebar.multiselect(
"Select one or more odds to compare across.",
["1 in 2", "1 in 3", "1 in 4", "1 in 5", "1 in 10"],
)
if comparing_select_odd != []:
comparing_select_age = st.sidebar.multiselect(
"Select one or more age groups to compare across.",
["18 to 29", "30 to 44", "45 to 59", "60+"],
)
comparing_select_gender = st.sidebar.multiselect(
"Select one or both genders to compare across.", ["Female", "Male"]
)
if comparing_select_age != [] and comparing_select_gender != []:
""" """
"""
The graphs here provide further exploration to the bar graph seen after you made your decision about pursuing advancement.
Now you can investigate how age, gender, and the odds themselves influence how individuals make their decisions.
"""
# once all options are present, produce the corresponding chart
comparechart = create_compare_chart(
cdf, comparing_select_odd, comparing_select_age, comparing_select_gender
)
st.write(comparechart)
def render_prospects_chapter():
"""
Render chapter four user choice.
"""
"""
---
# Prospects for Machine Intelligence
We have not yet achieved superintelligence. We may never achieve it in any meaningful way. Or, we may be a few years from a breakthrough.
The real question is what are the consequences of achieving it. This section seeks to understand whether superintellignce is achievable
and if it is, what will happen to the human race. Moreover, we seek to understand if the pursuit of such technologies is worth it.
This section considers these questions in the form of two different surveys conducted in 2016 and 2017. The 2016 survey asked a selection
of experts about the development of high level machine intelligence and the 2017 survey surveyed the general public in the United States
and had participants consider if the pursuit of super intelligence was worth the potential cost.
"""
render_expert_sentiment_section()
render_user_choice_section()
# -----------------------------------------------------------------------------
# Chapter: Responses and Future Directions
# -----------------------------------------------------------------------------
def render_world_powers_section():
"""
Render the world powers section of the responses chapter.
"""
"""
### Global Response and Status
What are the sentiments of the world powers, specifically the EU Countries regarding artificial intelligence regulation policy? Furthermore, what are they doing about it?
"""
# Read in data as csv
COUNTRY_VOTES = pd.read_csv("data/CountryVotesonAI.csv")
# Combine totally agree/tend agree to one agree column for user clarity
COUNTRY_VOTES["Agree"] = (
COUNTRY_VOTES["Tend_Agree"] + COUNTRY_VOTES["Totally_Agree"]
)
# Combine totally disagree/tend disagree to one disagree column for user clarity
COUNTRY_VOTES["Disagree"] = (
COUNTRY_VOTES["Tend_Disagree"] + COUNTRY_VOTES["Totally_Disagree"]
)
# Rename columns for ease of reference
COUNTRY_VOTES = COUNTRY_VOTES.rename(
columns={
"Totally_Agree": "Totally Agree",
"Tend_Agree": "Tend Agree",
"Tend_Disagree": "Tend Disagree",
"Totally_Disagree": "Totally Disagree",
"Security_Incident": "Security Incident",
"Safe_Tech_Policies": "Safe Tech Training",
}
)
COUNTRY_VOTES["No Safe Tech Training"] = 100.0 - COUNTRY_VOTES["Safe Tech Training"]
country_selector = alt.selection_multi(
fields=["Country"], init=[{"Country": "Hungary"}]
)
all_country_data = (
alt.Chart(COUNTRY_VOTES)
.mark_bar()
.encode(
tooltip=[alt.Tooltip("Country:N", title="Country")],
x=alt.X(
"Agree:Q",
title="Percent Representatives that Want AI Regulation",
scale=alt.Scale(domain=[0, 100]),
),
y=alt.Y("Country:N", title="Country"),
color=alt.condition(
country_selector, alt.value("#ffbc79"), alt.value("#d9d9d9")
),
)
.add_selection(country_selector)
.interactive()
)
by_country1 = (
alt.Chart(COUNTRY_VOTES)
.transform_fold(["Security Incident"], as_=["Digital Safety Metric", "% Value"])
.mark_bar()
.encode(
y=alt.Y(
"% Value:Q",
title="% of Companies with Tech Security Incident",
scale=alt.Scale(domain=[0, 100]),
),
tooltip=[alt.Tooltip("Country:N", title="Country")],
color=alt.Color(
"Digital Safety Metric:N",
scale=alt.Scale(domain=["Security Incident"], range=["#bcdeea"]),
),
)
.transform_filter(country_selector)
.interactive()
)
by_country3 = (
alt.Chart(COUNTRY_VOTES)
.transform_fold(
["No Safe Tech Training"], as_=["Digital Safety Metric", "% Value"]
)
.mark_bar()
.encode(
y=alt.Y(
"% Value:Q",
title="% of Companies without Safe Tech Training",
scale=alt.Scale(domain=[0, 100]),
),
tooltip=[alt.Tooltip("Country:N", title="Country")],
color=alt.Color(
"Digital Safety Metric:N",
scale=alt.Scale(domain=["No Safe Tech Training"], range=["#bcdeea"]),
),
)
.transform_filter(country_selector)
.interactive()
)
joint_chart = all_country_data | by_country1 | by_country3
st.write(joint_chart)
"""
From the chart above we can see that nearly every major EU power agrees that AI imposes enough risk that it should be regulated. There is not one outlier nation that does not think that AI and Robotics are beginning to play in increasingly dangerous or volatile role. However, when we inspect each country we can see that each country has had a non-trivial amount of cyber security incidents. Additionally, a large handful of corporations in each country do not have any kind of digital saftey training at work whatsoever. Does this not foreshadow the next catastrophic event? AI is increasing at a rapid rate, yet safety does not seem to be a major concern. Select the US Case Study option on the sidebar to the left to view first hand how lack of regulation is letting AI get out of hand.
"""
st.sidebar.header("Global Respons and Status")
case_study_select = st.sidebar.radio(
"Select the case study to learn more.", ("-", "US Case Study")
)
if case_study_select == "US Case Study":
"""
### US Case Study: Self-Driving Cars and their Lack of Regulation
Now that we've taken a look at what our counterparts in the EU believe, let's take a look back at America and a specific policy-case, "Self-Driving Cars". Traditionally, the regulation governing what cars are allowed to make it out onto the roadways is the FMVSS or Federal Motor Vehicle Safety Standards. These laws cover a wide array of aspects. However, they cover nothing to do with self-driving cars such as cyber-attacks, proper testing, software updates, emergency scenarios, etc. In September 2017, the House of Representatives did propose additional requirements to the FMVSS for autonomous cars, however, this proposal did not make it past the Senate. A Presidential Administration largely focused on minimal regulation certainly did not help the issue.
"""
"""
But who would possibly want less regulation on such a dangerous endeavor?
"""
"""
The answer is businesses and the locations that want to attract them. Two of the hotbeds for self-driving car research are Arizona and our own state Pennsylvania. Contrary to popular belief, the location in Pittsburgh is not solely due to NREC or CMU, but rather Pennsylvania's loose regulations on self-driving car testing.
"""
"""
You may be surprised to find that several incidents have occurred right here in Pittsburgh including an event in 2018 when a self-driving car slammed into an unsuspecting driver. The incidents get much more grave. Looking to make self-driving cars cheaper, Uber disabled what is called a LIDAR system in one of their Arizona vehicles. Without the costly system the car hit and killed a pedestrian.
"""
"""
So when is enough? Clearly, these businesses only have testing, training, and profits in mind. Perhaps, the local governments do too. Regulation is needed on a larger scale, however, it is largely non-existent.
"""
crash = Image.open("img/Ubercrash.jpg")
st.image(crash)
def render_responses_chapter():
"""
Render the responses chapter.
"""
"""
---
# Responses to Machine Intelligence
We have established the sentiments of both experts and members of the public regarding the future prospects of machine intelligence, but what is actually being done to address the inherent issues?
"""
render_world_powers_section()
# -----------------------------------------------------------------------------
# Conclusion
# -----------------------------------------------------------------------------
def render_conclusion_content():
"""
Render the conclusion content.
"""
"""
---
# Conclusion
This concludes our exploration of machine intelligence. In the narrative above, we saw some of major milestones in the technology's development, along with some accompanying evidence that suggests the disruptive potential it has. Next, we compared coverage of machine intelligence research in popular media with its treatment in academic settings. We then explored the sentiments regarding the long-term prospects of machine intelligence from both expert and non-expert sources. Finally, we examined the current state of the response to advances in machine intelligence in the form of national-level policies.
"""
# -----------------------------------------------------------------------------
# References
# -----------------------------------------------------------------------------
def render_references():
"""
Render the references for the project.
"""
"""
The datasets that we utilized to generate the visualizations in our application are listed below.
- [Journal of Artificial Intelligence Research](https://www.jair.org/index.php/jair).
- Muller, Vincent and Bostrom, Nick. [Future Progress in Artificial Intelligence: A Survey of Expert Opinion](https://www.nickbostrom.com/papers/survey.pdf). (2016).
- [Nature](https://www.nature.com/).
- Webb, Rick. [Superintelligence and Public Opinion.](https://shift.newco.co/2017/04/24/superintelligence-and-public-opinion/). NewCo Shift. (2017).
- Zhang, Baobao and Dafoe, Allan. [Artificial Intelligence, American Attitudes and Trends](https://governanceai.github.io/US-Public-Opinion-Report-Jan-2019/addresults.html). 2019.
The general references that we utilized during our research for this project are listed below.
- Armstrong, Stuart and Bostrom, Nick and Shulman, Carl. [Racing to the Precipice: A Model of Artificial Intelligence Development](http://www.fhi.ox.ac.uk/wp-content/uploads/Racing-to-the-precipice-a-model-of-artificial-intelligence-development.pdf). 2013.
- Bostrom, Nick. [The Superintelligent Will: Motivation and Instrumental Rationality in Advanced Artificial Agents](https://www.nickbostrom.com/superintelligentwill.pdf). 2012.
- Bostrom, Nick. [Existential Risks: Analyzing Human Extinction Scenarios and Related Hazards](https://www.nickbostrom.com/existential/risks.html). 2002.
- Bostrom, Nick. [The Future of Machine Intelligence](https://www.youtube.com/watch?v=8xwESiI6uKA). 2017.
- Bostrom, Nick. _Superintelligence: Paths, Dangers, Strategies_ 2014.
- Bostrom, Nick. [What Happens When Our Computers Get Smarter Than We Are?](https://www.youtube.com/watch?v=MnT1xgZgkpk&t=326s) TED Talk. 2015.
- [EU Data Eurostat](https://ec.europa.eu/eurostat/web/main/data/database). 2021.
- Harris, Sam. [Can We Build AI Without Losing Control Over It?](https://www.youtube.com/watch?v=8nt3edWLgIg) TED Talk. 2016.
- Harris, Sam and Russel, Stuart. [The Dawn of Artificial Intelligence](https://samharris.org/podcasts/the-dawn-of-artificial-intelligence1/). 2016.
- Harris, Sam and Yudkowsky, Eliezer. [AI: Racing Toward the Brink](https://intelligence.org/2018/02/28/sam-harris-and-eliezer-yudkowsky/). 2018.
- [Machine Intelligence Research Institute](https://intelligence.org/).
- [Internet Movie Database (IMDb)](https://www.imdb.com/).
- McCarthy et al. _A Proposal for the Dartmouth Summer Research Project on Artificial Intelligence._ 1955.
- Reber, Paul. [Human Brain Storage](https://www.scientificamerican.com/article/what-is-the-memory-capacity/). 2010.
- Timothy Lee B. [Self-Driving Car Article](https://arstechnica.com/cars/2018/04/the-way-we-regulate-self-driving-cars-is-broken-heres-how-to-fix-it/). 2018.
- Yudkowsky, Eliezer. [AI Alignment: Why it's Hard, and Where to Start](https://www.youtube.com/watch?v=EUjc1WuyPT8).
"""
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
def main():
# NOTE: Naturally, the order of the invocations here
# defines the order in which our content is rendered
# in the application. For sanity's sake, please ensure
# that the order here matches the high-level order of
# each of the chapters as they are defined above.
render_introduction_content()
render_definition_chapter()
render_paradigm_chapter()
render_perceptions_chapter()
render_prospects_chapter()
render_responses_chapter()
render_conclusion_content()
render_references()
# -----------------------------------------------------------------------------
# Application Entry Point
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
| {"/streamlit_app.py": ["/support/topic_modeling.py", "/support/sentiment_analysis.py"]} |
73,493 | CMU-IDS-2021/fp--05839-abby-jeff-kyle-will | refs/heads/main | /support/word_cloud.py | # word_cloud.py
# Word cloud generation.
import pandas as pd
import altair as alt
from textblob import TextBlob
import matplotlib.pyplot as plt
import streamlit as st
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
def buildWordCloudText(data):
text = ""
for i in range(len(data)):
text += data.text[i]
return text
def getData(filename):
data = pd.read_json(filename)
return data
def getWordCloud(text):
wordcloud = WordCloud(
stopwords=STOPWORDS,
max_font_size=50,
max_words=150,
background_color="white",
collocations=False,
).generate(text)
fig = plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
return fig
def getSentiment(data):
sentiment = []
for i in range(len(data)):
avgSentiment = []
blob = TextBlob(data.text[i])
for sentence in blob.sentences:
avgSentiment.append(sentence.sentiment.polarity)
sentiment.append(avgSentiment)
return sentiment
def getAvgSentiment(sentiments, data):
docSentiments = []
articleTitles = []
for i in range(len(sentiments)):
sentiment = 0
for j in range(len(sentiments[i])):
sentiment += sentiments[i][j]
docSentiments.append((sentiment / len(sentiments[i])) * 100)
articleTitles.append(data.title[i])
tuples = list(zip(articleTitles, docSentiments))
output = pd.DataFrame(tuples, columns=["Title", "Sentiment"])
return output
def buildChart(data):
sentChart = (
alt.Chart(data)
.mark_bar()
.encode(
alt.X(
"Sentiment:Q",
title="Media Sentiment of Articles about AI",
scale=alt.Scale(domain=(-100, 100)),
),
y="Title:O",
color=alt.condition(
alt.datum.Sentiment > 0, alt.value("steelblue"), alt.value("orange")
),
)
.properties(width=600)
)
return sentChart
| {"/streamlit_app.py": ["/support/topic_modeling.py", "/support/sentiment_analysis.py"]} |
73,494 | CMU-IDS-2021/fp--05839-abby-jeff-kyle-will | refs/heads/main | /support/jair_scraper.py | # jair_scraper.py
# A web scraper for the Journal of Artificial Intelligence Research.
"""
In order to run this webscraper you will need all of the packages installed. You will
also need a chromedriver that matches your version of Google Chrome (REQUIRED).
You can get a chromedriver from: https://chromedriver.chromium.org/downloads.
Additionally, if you are using a mac you
must bring the chromediver out of quarantine with the following command.
$ xattr -d com.apple.quarantine ./chromedriver
For this code the chromedriver is stored in the same directory as ./chromedriver
The format of the page being extracted is as follows. This is important to understand in order to understand
the methods below:
MAIN PAGE - VOLUME <Link>
- PAPER
-Title
-MetaData <Link>
-Date Published
Abstract
-PDF <Link>
-Download PDF
- VOLUME <Link>
- PAPER
-Title
-MetaData <Link>
-Date Published
Abstract
-PDF <Link>
-Download PDF
- VOLUME <Link>
- PAPER
-Title
-MetaData <Link>
-Date Published
Abstract
-PDF <Link>
-Download PDF
...
"""
import sys
import bs4
from bs4 import BeautifulSoup
from selenium import webdriver
from pdfminer import high_level
from pdfminer.high_level import extract_text
# path to chromedriver
chromedriver = "./chromedriver"
# needed to inialize the chromedriver with the options we desire
def initialize_chromedriver():
options = webdriver.ChromeOptions()
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-setuid-sandbox")
options.add_argument(
'--user-agent=""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36""'
)
return webdriver.Chrome(executable_path=chromedriver, options=options)
# downloads a pdf to local and extracts text (not currently used)
def download_pdf_and_convert_to_text(full_url):
driver.get(full_url)
button = driver.find_elements_by_class_name("download")[0]
button.click()
print("PDF Downloaded")
pdf_name = full_url.split("/")[-1]
print(pdf_name)
file = open("/Users/will/Downloads/" + pdf_name, "wb")
file.write(response.read())
file.close()
print("Downloaded and Stored PDF: " + pdf_name)
text = extract_text(pdf_name)
return text
# extract all abstracts from each paper page
def extract_texts(links):
texts = []
for link in links:
texts.append(download_pdf_and_convert_to_text(link))
return texts
# extract abstract from paper page meta data
def extract_abstract(paper_meta_url):
driver.get(paper_meta_url)
new_html = driver.page_source
soup = BeautifulSoup(new_html, "html.parser")
abstract = soup.find_all("div", class_="article-abstract")[0].text
abstract = abstract.replace("\t", "").replace("\n", "")
return abstract
# downloads pdf to local
def download_pdf_to_local(full_url):
response = urllib.request.urlopen(full_url)
print("PDF Downloaded")
pdf_name = full_url.split("/")[-1]
print(pdf_name)
file = open(pdf_name, "wb")
file.write(response.read())
file.close()
print("Downloaded and Stored PDF: " + pdf_name)
return pdf_name
# extract all dates for all papers from each paper page
def extract_date(paper_meta_url):
driver.get(paper_meta_url)
new_html = driver.page_source
soup = BeautifulSoup(new_html, "html.parser")
date_link_field = soup.find_all("div", class_="list-group-item date-published")[
0
].text
date_link_field = (
date_link_field.replace("Published:", "").replace("\t", "").replace("\n", "")
)
return date_link_field
# extract the pdf links for all articles from the volume page
def extract_pdf_links(volume_page_url):
driver.get(volume_page_url)
new_html = driver.page_source
soup = BeautifulSoup(new_html, "html.parser")
pdf_links_unparsed = soup.find_all("a", class_="galley-link btn btn-primary pdf")
pdf_links = []
for unparsed_link in pdf_links_unparsed:
pdf_links.append(unparsed_link.get("href"))
return pdf_links
# extract all titles for all papers from the volume page
def extract_titles(volume_page_url):
driver.get(volume_page_url)
new_html = driver.page_source
soup = BeautifulSoup(new_html, "html.parser")
titles_unparsed = soup.find_all("h3", class_="media-heading")
titles = []
for unparsed_title in titles_unparsed:
titles.append(unparsed_title.text.replace("\t", "").replace("\n", ""))
return titles
# extract the links for all paper page meta data from the volume page
def extract_meta_page_links(volume_page_url):
driver.get(volume_page_url)
new_html = driver.page_source
soup = BeautifulSoup(new_html, "html.parser")
meta_links_unparsed = soup.find_all("h3", class_="media-heading")
meta_links = []
for unparsed_meta_link in meta_links_unparsed:
meta_links.append(unparsed_meta_link.find("a").get("href"))
return meta_links
# extract the links for all volume pages from the main site
def extract_volume_page_links(page_url):
driver.get(page_url)
new_html = driver.page_source
soup = BeautifulSoup(new_html, "html.parser")
meta_links_unparsed = soup.find_all("h2", class_="media-heading")
meta_links = []
for unparsed_meta_link in meta_links_unparsed:
meta_links.append(unparsed_meta_link.find("a").get("href"))
return meta_links
# extract all dates, abstracts from all paper page meta data given links
def extract_all_dates_and_abstracts(volume_page_url):
links = extract_meta_page_links(volume_page_url)
dates = []
abstracts = []
for link in links:
dates.append(extract_date(link))
abstracts.append(extract_abstract(link))
return dates, abstracts
# main runner for volume, extracts all data from a volume
def extract_volume_data(volume_page_url):
dates, abstracts = extract_all_dates_and_abstracts(volume_page_url)
pdf_links = extract_pdf_links(volume_page_url)
titles = extract_titles(volume_page_url)
# texts = extract_texts(pdf_links)
return dates, abstracts, titles
# auxilliary used to combine list
def combine_list(l1, l2):
for item in l2:
l1.append(item)
return l1
# parse year from paper date
def extract_year(date):
return date[-4:]
# turn into list of json for processing
def turn_into_json_list(titles, dates, abstracts):
json_list = []
for i in range(len(titles)):
json = {}
json["Title"] = titles[i]
json["Date"] = extract_year(dates[i])
json["Text"] = abstracts[i]
json_list.append(json)
return json_list
def corpus_by_year(json_list):
corpus_by_year_json = {}
for json in json_list:
year = json["Date"]
print(year)
if year in corpus_by_year_json.keys():
corpus_by_year_json[year] = corpus_by_year_json[year] + " " + json["Text"]
else:
corpus_by_year_json[year] = json["Text"]
return corpus_by_year_json
def title_by_year(json_list):
corpus_by_year_json = {}
for json in json_list:
year = json["Date"]
print(year)
if year in corpus_by_year_json.keys():
corpus_by_year_json[year] = corpus_by_year_json[year] + " " + json["Title"]
else:
corpus_by_year_json[year] = json["Title"]
return corpus_by_year_json
def all_corpus(json_list):
corpus = ""
for json in json_list:
corpus = corpus + " " + json["Text"]
return corpus
def main():
# page url
page_url = "https://jair.org/index.php/jair/issue/archive"
# Initialize ChromeDriver
driver = initialize_chromedriver()
# the main driver starts here by calling extract volume data on all volumes
volume_links = extract_volume_page_links(page_url)
total_dates = []
total_titles = []
total_texts = []
for volume_link in volume_links:
dates, abstracts, titles = extract_volume_data(volume_link)
total_titles = combine_list(total_titles, titles)
total_dates = combine_list(total_dates, dates)
total_texts = combine_list(total_texts, abstracts)
json_list = turn_into_json_list(total_titles, total_dates, total_texts)
output_abstract = open("output_abstract.txt", "w")
output_abstract.write(str(json_list))
output_abstract.close()
all_corpus_text = all_corpus(json_list)
corpus_by_year_text = corpus_by_year(json_list)
title_by_year_text = title_by_year(json_list)
output_corpus = open("whole_corpus.txt", "w")
output_corpus.write(str(all_corpus_text))
output_corpus.close()
output_corpus_year = open("corpus_by_year.txt", "w")
output_corpus_year.write(str(corpus_by_year_text))
output_corpus_year.close()
output_title_year = open("title_by_year.txt", "w")
output_title_year.write(str(title_by_year_text))
output_title_year.close()
if __name__ == "__main__":
sys.exit(main())
| {"/streamlit_app.py": ["/support/topic_modeling.py", "/support/sentiment_analysis.py"]} |
73,495 | CMU-IDS-2021/fp--05839-abby-jeff-kyle-will | refs/heads/main | /support/topic_modeling.py | # topic_modeling.py
# Topic model analysis for professional articles on machine intelligence.
import numpy as np
import pandas as pd
import altair as alt
import streamlit as st
def buildAcademicData(data):
df = pd.DataFrame()
topics = [
"Language Models",
"Cloud-Based ML Frameworks",
"AI Based Multi-Lingual Translation",
"Autonomous AI Decision Making",
"Multi-Agent Pathfinding",
]
years = []
topic1, topic2, topic3, topic4, topic5 = [], [], [], [], []
for item in sorted(data.keys()):
years.append(item)
topic1.append(data[item][0])
topic2.append(data[item][1])
topic3.append(data[item][2])
topic4.append(data[item][3])
topic5.append(data[item][4])
df[0] = topic1
df[1] = topic2
df[2] = topic3
df[3] = topic4
df[4] = topic5
df = df.T
df.columns = years
df["Topics"] = topics
return df
def academic():
years = {
"2013": "2013",
"2014": "2014",
"2015": "2015",
"2016": "2016",
"2017": "2017",
"2018": "2018",
"2019": "2019",
"2020": "2020",
"2021": "2021",
}
data = pd.read_json("data/academicTopics.txt")
acData = buildAcademicData(data)
year = st.select_slider(
"Select the year to see the popular topics in AI research.", list(years.keys())
)
nData = acData[int(year)]
source = pd.DataFrame({"Number of Topics": nData.T, "Topics": acData["Topics"]})
acChart = (
alt.Chart(source)
.mark_bar()
.encode(
x=alt.X("Number of Topics:Q", scale=alt.Scale(domain=(0, 20))),
y=alt.Y("Topics:N"),
color=alt.Color("Topics:N", scale=alt.Scale(scheme="redyellowblue")),
)
.properties(title="What topics are AI researchers focusing on?")
)
st.altair_chart(acChart, use_container_width=True)
def flipProfData(data):
topics = [
"Language Models",
"Cloud-Based ML Frameworks",
"AI Based Multi-Lingual Translation",
"Autonomous AI Decision Making",
"Multi-Agent Pathfinding",
]
newData = data.drop("Topics", axis=1)
newData = newData.T
newData.columns = topics
return newData
def topicTimeline(topic):
unfixedData = pd.read_json("data/academicTopics.txt")
data = buildAcademicData(unfixedData)
data = flipProfData(data)
nData = data[topic]
years = ["2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020", "2021"]
source = pd.DataFrame({"Years": years, "Number of Articles": nData.T})
line = (
alt.Chart(source)
.mark_line(color="fcac64")
.encode(x="Years", y="Number of Articles:Q")
.properties(title="What topics are AI researchers focusing on?")
)
st.write(f"Lets look at {topic} research over time.")
st.altair_chart(line, use_container_width=True)
| {"/streamlit_app.py": ["/support/topic_modeling.py", "/support/sentiment_analysis.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.