index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
63,940 | gr8scott88/FFLStats4 | refs/heads/master | /Main.py | import configparser
from datetime import datetime
import math
from data_vis.LeagueVisGen import *
from models.League import League
config = configparser.ConfigParser()
config.read('config.ini')
start_week = config['Season']['StartWeek']
dt = datetime.strptime(start_week, '%m/%d/%y')
td = datetime.today()
elapsed = td - dt
current_week = math.floor(elapsed.days/7)
current_week = 16 if current_week > 16 else current_week # cap at 16
AFC_id = config['AFC']['id']
NFC_id = config['NFC']['id']
AFC = League(AFC_id, 'AFC')
NFC = League(NFC_id, 'NFC')
AFC.update(current_week)
NFC.update(current_week)
# plot_cum_real_score_by_week(AFC)
# plot_player_breakdown_for_all_teams(AFC)
# plot_player_breakdown_for_season(AFC)
should_save = True
plot_draft_value_by_team(AFC, 5, 12, save=should_save)
plot_draft_value_by_team(NFC, 5, 12, save=should_save)
plot_cum_real_score_by_week(AFC, save=should_save)
plot_cum_real_score_by_week(NFC, save=should_save)
plot_cum_real_vs_proj_by_week(AFC, save=should_save)
plot_cum_real_vs_proj_by_week(NFC, save=should_save)
AFC.score_info[DATACONTRACT.LEAGUE_NAME] = 'AFC'
NFC.score_info[DATACONTRACT.LEAGUE_NAME] = 'NFC'
Total_DF = AFC.score_info.append(NFC.score_info)
plot_real_score_by_league_through_week(Total_DF, 5, save=should_save)
plot_cum_real_score_by_league_through_week(Total_DF, 5, save=should_save)
# Total_DF.sort_values(by=['Week'])
# g = Total_DF.groupby(['League', 'Week'])['RealScore'].sum().unstack('League')
# g.plot()
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,941 | gr8scott88/FFLStats4 | refs/heads/master | /data_vis/LeagueVisGen.py | from models.League import League
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import time
import os
from matplotlib.cm import get_cmap
from models import DATACONTRACT
import numpy as np
from loguru import logger
color_wheel = "Accent"
cmap = get_cmap(color_wheel)
colors = cmap.colors
sns.set_context('talk')
def plot_real_score_by_week(league: League, save=False):
merged = pd.merge(league.score_info, league.league_info, on='TeamID', how='left')
grouped = merged.groupby('TeamName')
fig, ax = plt.subplots(figsize=(15, 7))
name = 'null'
for name, group in grouped:
group.plot(x='Week', y='RealScore', ax=ax, label=name)
plot_title = f'Real Score by Week for {league.name}'
plt.title(plot_title)
plt.legend(loc='upper left')
if save:
save_plot(league, plot_title)
else:
plt.show()
def plot_cum_real_score_by_week(league: League, save=False):
merged = pd.merge(league.score_info, league.league_info, on='TeamID', how='left')
grouped = merged.groupby('TeamName')
fig, ax = plt.subplots(figsize=(15, 7))
ax.set_prop_cycle(color=colors)
for name, group in grouped:
group['CumScore'] = group['RealScore'].cumsum()
group.plot(x='Week', y='CumScore', ax=ax, label=name)
plot_title = f'Cumulative Total Score by Week for {league.name}'
plt.title(plot_title)
plt.legend(loc='upper left')
if save:
save_plot(league, plot_title)
else:
plt.show()
def plot_real_vs_proj_by_week(league: League, save=False):
merged = pd.merge(league.score_info, league.league_info, on='TeamID', how='left')
merged['Delta'] = merged['RealScore'] - merged['ProjScore']
grouped = merged.groupby('TeamName')
fig, ax = plt.subplots(figsize=(15, 7))
for name, group in grouped:
group.plot(x='Week', y='Delta', ax=ax, label=name)
plot_title = f'Estimation Error by Week for {league.name}'
plt.title(plot_title)
plt.legend(loc='upper left')
if save:
save_plot(league, plot_title)
else:
plt.show()
def plot_cum_real_vs_proj_by_week(league: League, save=False):
merged = pd.merge(league.score_info, league.league_info, on='TeamID', how='left')
merged['Delta'] = merged['RealScore'] - merged['ProjScore']
grouped = merged.groupby('TeamName')
fig, ax = plt.subplots(figsize=(15, 7))
for name, group in grouped:
group['CumDelta'] = group['Delta'].cumsum()
group.plot(x='Week', y='CumDelta', ax=ax, label=name)
plot_title = f'Cumulative Estimation Error by Week for {league.name}'
plt.title(plot_title)
plt.legend(loc='upper left')
if save:
save_plot(league, plot_title)
else:
plt.show()
def plot_player_breakdown_for_all_teams(league: League, save=False):
teams = league.player_info.groupby('UniqueID')
for team in teams:
plot_player_breakdown_by_team_var(league, team, save)
def plot_player_breakdown_by_team(league: League, team, save=False):
team_df = team[1]
team_name = league.league_info.loc[league.league_info['UniqueID'] == team[0], 'TeamName'].iloc[0]
filtered = team_df.loc[~team_df['ActivePos'].isin(['BN'])]
grouped = filtered.groupby(['UniqueID', 'Week', 'ActivePos'])['RealScore'].sum().unstack('ActivePos')
grouped.plot(kind='bar', stacked=True)
plot_title = f'Score Breakdown by Position for {team_name}'
plt.title(plot_title)
if save:
save_plot(league, plot_title)
else:
plt.show()
def plot_player_breakdown_by_team_var(league: League, team, save=False):
team_df = team[1]
team_name = league.league_info.loc[league.league_info['UniqueID'] == team[0], 'TeamName'].iloc[0]
filtered = team_df.loc[~team_df['ActivePos'].isin(['BN', 'IR'])]
grouped_by_id = filtered.groupby(['UniqueID'])
for name, group in grouped_by_id:
f = plt.figure(figsize=(20, 10))
grouped = group.groupby(['Week', 'ActivePos'])['RealScore'].sum().unstack('ActivePos')
grouped.plot(kind='bar', stacked=True, ax=f.gca())
# legend(loc='center left', bbox_to_anchor=(1, 0.5))
plot_title = f'Score Breakdown by Position for {team_name}'
plt.title(plot_title)
plt.ylim(0, 200)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
f.subplots_adjust(right=0.8)
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
if save:
plt.show()
save_plot(league, plot_title)
else:
plt.show()
def plot_player_breakdown_for_season(league: League, save=False):
f, ax = plt.subplots(figsize=(20, 10))
ax.set_prop_cycle(color=colors)
df = pd.merge(league.player_info, league.league_info, on='UniqueID')
filtered = df.loc[~df['ActivePos'].isin(['BN', 'IR'])]
grouped = filtered.groupby(['TeamName', 'ActivePos'])['RealScore'].sum().unstack('ActivePos')
grouped.plot(kind='bar', stacked=True, ax=f.gca())
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plot_title = f'Score Breakdown by Position for {league.name}'
plt.title(plot_title)
f.subplots_adjust(right=0.8)
f.subplots_adjust(bottom=0.3)
plt.xticks(rotation=30, ha='right')
if save:
plt.show()
save_plot(league, plot_title)
else:
plt.show()
def plot_draft_value_by_team(league: League, max_draft, thru_week: int, save=False):
# f, ax = plt.subplots(figsize=(20, 10))
league.draft_info[DATACONTRACT.DRAFTORDER] = pd.to_numeric(league.draft_info[DATACONTRACT.DRAFTORDER])
# league.player_info.join(league.draft_info, on=DATACONTRACT.PLAYERNAME)
data = league.player_info.merge(league.draft_info[[DATACONTRACT.DRAFTORDER, DATACONTRACT.PLAYERNAME]],
on=DATACONTRACT.PLAYERNAME)
order_filter = f'{DATACONTRACT.DRAFTORDER}<={max_draft}'
week_filter = f'{DATACONTRACT.WEEK}<={thru_week}'
filtered = data.query(order_filter)
filtered = filtered.query(week_filter)
draft_scores = filtered.groupby(DATACONTRACT.UNIQUE_ID)[DATACONTRACT.REAL_SCORE].sum()
draft_scores = draft_scores.to_frame().reset_index()
cleaned = draft_scores.merge(league.league_info[[DATACONTRACT.UNIQUE_ID, DATACONTRACT.TEAM_NAME]],
on=DATACONTRACT.UNIQUE_ID)
cleaned = cleaned.set_index(DATACONTRACT.TEAM_NAME)
# plot = cleaned.plot.pie(y=DATACONTRACT.REAL_SCORE, figsize=(15, 10), legend='', autopct='%1.1f%%')
plot = cleaned.plot.pie(y=DATACONTRACT.REAL_SCORE, figsize=(15, 10), legend='',
autopct=lambda val: np.round(val / 100. * cleaned[DATACONTRACT.REAL_SCORE].sum(), 0))
plot_title = f'Cumulative Score from Top {max_draft} Drafted Players for {league.name} Through Week {thru_week}'
plt.title(plot_title)
plt.ylabel('')
if save:
plt.show()
save_plot(league, plot_title)
else:
plt.show()
def plot_cum_injury_score_by_team(league: League, save=False):
# I dont think I have player status... so can't really get "out"
pass
def plot_cum_real_score_by_league_through_week(leagues: pd.DataFrame, week, save=False):
leagues.sort_values(by=['Week'])
grouped = leagues.groupby(DATACONTRACT.LEAGUE_NAME)
fig, ax = plt.subplots(figsize=(15, 7))
ax.set_prop_cycle(color=colors)
for name, group in grouped:
group['CumScore'] = group[DATACONTRACT.REAL_SCORE].cumsum()
group.plot(x='Week', y='CumScore', ax=ax, label=name)
plot_title = f'Cumulative Total Score by Week Between Leagues'
plt.title(plot_title)
plt.xlabel('Week')
plt.ylabel('Cumulative Score')
plt.legend(loc='upper left')
if save:
manual_save("comparison", plot_title)
else:
plt.show()
def plot_real_score_by_league_through_week(leagues: pd.DataFrame, week, save=False):
fig, ax = plt.subplots(figsize=(15, 7))
ax.set_prop_cycle(color=colors)
g = leagues.groupby([DATACONTRACT.LEAGUE_NAME, DATACONTRACT.WEEK])[DATACONTRACT.REAL_SCORE].sum().unstack(DATACONTRACT.LEAGUE_NAME)
g.plot(ax=ax)
plot_title = f'Total Score by Week Between Leagues'
plt.title(plot_title)
plt.xlabel('Week')
plt.ylabel('Cumulative Score')
plt.legend(loc='upper left')
if save:
manual_save('comparison', plot_title)
else:
plt.show()
def manual_save(folder, filename):
dir_path = os.path.join('export', 'plots', folder)
if not os.path.exists(dir_path):
os.mkdir(dir_path)
fpath = os.path.join('export', 'plots', folder, filename)
logger.debug(f'Saving plot to {fpath}')
plt.savefig(fpath)
plt.close()
def save_plot(league: League, name):
name = name.replace('.', '')
dir_path = os.path.join('export', 'plots', league.name)
if not os.path.exists(dir_path):
os.mkdir(dir_path)
fpath = os.path.join('export', 'plots', league.name, name)
logger.debug(f'Saving plot to {fpath}')
plt.savefig(fpath)
plt.close()
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,942 | gr8scott88/FFLStats4 | refs/heads/master | /archive/Helper.py | from bs4 import BeautifulSoup
import requests
class UniqueID:
def __init__(self, league_id, league_name, team_id, team_name, team_order, week_id, time_id=0):
self.league_id = league_id
self.team_id = team_id
self.week = week_id
self.time = time_id
self.team_name = team_name
self.league_name = league_name
self.team_order = team_order
def get_id_array(self):
return [self.league_id, self.league_name, self.team_id, self.team_name, self.team_order, self.week, self.time]
def get_id_string(self):
return 'ID: ' + 'Week ' + str(self.week) + ' Time ' + str(self.time) + ', ' + str(self.league_id) + ", " + str(self.team_id)
def get_soup_url(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
return soup
def get_soup_file(html_file):
with open(html_file) as current_html:
soup = BeautifulSoup(current_html, 'html.parser')
return soup
def floatify(array):
for index in range(len(array)):
try:
array[index] = float(array[index])
except Exception as e:
# print(e)
pass
return array
def player_data_float_convert(player_data):
all_info = []
for player_info in player_data:
new_player = []
for item in range(len(player_info)):
try:
new_player.append(float(player_info[item]))
except Exception as e:
new_player.append(player_info[item])
all_info.append(new_player)
return all_info
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,943 | gr8scott88/FFLStats4 | refs/heads/master | /models/DATACONTRACT.py | LEAGUE_ID = 'LeagueID'
UNIQUE_ID = 'UniqueID'
LEAGUE_NAME = 'LeagueName'
TEAM_ID = 'TeamID'
TEAM_NAME = 'TeamName'
REAL_SCORE = 'RealScore'
PROJ_SCORE = 'ProjScore'
WEEK = 'Week'
SLOT = 'Slot'
ACTIVEPOS = 'ActivePos'
PCTSTART = 'PctStart'
PLAYERNAME = 'PlayerName'
TEAMRANKING = 'Ranking'
RESULT = 'Result'
TOTALWINS = 'TotalWins'
OPPONENTSCORE = 'OpponentScore'
DRAFTORDER = 'DraftOrder'
CLASSORDER = 'ClassOrder'
PLAYERPOS = 'PlayerPos'
TEAMINFOCOLS = [UNIQUE_ID, LEAGUE_ID, LEAGUE_NAME, TEAM_ID, TEAM_NAME]
TEAMSCORECOLS = [UNIQUE_ID, TEAM_ID, WEEK, REAL_SCORE, PROJ_SCORE]
PLAYERSCORECOLS = [UNIQUE_ID, WEEK, PLAYERNAME,
SLOT, ACTIVEPOS, REAL_SCORE, PROJ_SCORE, PCTSTART]
PLAYERPARSECOLS = [PLAYERNAME, SLOT, ACTIVEPOS, REAL_SCORE, PROJ_SCORE, PCTSTART]
LEAGUEINFOCOLS = [UNIQUE_ID, LEAGUE_ID, TEAM_ID, TEAM_NAME]
LEAGUETRACKERCOLS = [UNIQUE_ID, LEAGUE_ID, LEAGUE_NAME, 'Order', TEAM_ID, TEAM_NAME]
RANKINGTRACKERCOLS = [UNIQUE_ID, LEAGUE_ID, TEAM_ID, TEAM_NAME, WEEK, RESULT, TOTALWINS, TEAMRANKING]
MATCHUPSCORECOLS = [UNIQUE_ID, LEAGUE_ID, TEAM_ID, TEAM_NAME, WEEK, REAL_SCORE, OPPONENTSCORE, RESULT]
DRAFTTRACKERCOLS = [UNIQUE_ID, LEAGUE_ID, TEAM_ID, DRAFTORDER, CLASSORDER, PLAYERNAME, PLAYERPOS]
DATAROOT = r'data_archive'
LEAGUEFILENAME = 'LeagueInfo'
MATCHUPFILENAME = 'MatchupInfo'
SCOREFILENAME = 'ScoreInfo'
PLAYERFILENAME = 'PlayerInfo'
DRAFTFILENAME = 'DraftInfo'
LEAGUEHTML= 'LeagueHTML'
TEAMHTML = 'TeamHTML'
MATCHUPHTML = 'MatchupHTML'
EXPORTDIR = 'export'
PLAYERREPORTS = 'player_reports'
LEAGUEREPORTS = 'league_reports'
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,944 | gr8scott88/FFLStats4 | refs/heads/master | /models/League.py | import pandas as pd
from models import DATACONTRACT
from web_parsing.LeaguePageParser import LeaguePageParser
from web_parsing.MatchPageParser import MatchParser
from web_parsing.TeamPageParser import TeamParser
from web_parsing.PlayerParser import PlayerParser
from web_parsing.DraftParser import DraftParser
import data_handlers.LocalDataManager as dm
from data_handlers.PandasHandler import PandasDataHandler
from utility.YahooWebHelper import YahooWebHelper
total_weeks = 16
current_week = 7
class League:
def __init__(self, league_id, name_):
self.league_id = league_id
self.name = name_
self.league_parser = LeaguePageParser()
self.match_parser = MatchParser()
self.draft_parser = DraftParser()
self.team_parser = TeamParser()
self.player_parser = PlayerParser()
self.web_helper = YahooWebHelper()
self.pandas_manager = PandasDataHandler()
self.league_info = self.load_league_info()
self.draft_info = self.load_draft_info()
self.matchup_info = self.load_matchup_info()
self.score_info = self.load_score_info()
self.player_info = self.load_player_info()
# self.rank_info = self.calculate_rank()
def update(self, current_week):
self.load_all_player_data_through_week(current_week)
self.load_team_scores_through_week(current_week)
def reload_player_info(self):
self.player_info = self.load_player_info()
def load_league_info(self) -> pd.DataFrame:
league_df = dm.load_from_parquet(self.league_id, DATACONTRACT.LEAGUEFILENAME)
if league_df is None:
league_soup = self.web_helper.get_league_soup(self.league_id)
league_df = self.league_parser.parse_league_info(league_soup)
print('Loaded League Info from WEB')
dm.save_to_parquet(self.league_id, league_df, DATACONTRACT.LEAGUEFILENAME, False)
else:
print('Loaded League Info from PARQUET file')
return league_df
def load_draft_info(self) -> pd.DataFrame:
draft_df = dm.load_from_parquet(self.league_id, DATACONTRACT.DRAFTFILENAME)
if draft_df is None:
for index, team_row in self.league_info.iterrows():
team_id = team_row[DATACONTRACT.TEAM_ID]
team_name = team_row[DATACONTRACT.TEAM_NAME]
unique_id = f'{self.league_id}_{team_id}'
# [UNIQUE_ID, LEAGUE_ID, TEAM_ID, TEAM_NAME,
info_dict = {DATACONTRACT.UNIQUE_ID: unique_id,
DATACONTRACT.LEAGUE_ID: self.league_id,
DATACONTRACT.TEAM_ID: team_id,
DATACONTRACT.TEAM_NAME: team_name}
draft_soup = self.web_helper.get_draft_soup(self.league_id, team_id)
if draft_df is None:
draft_df = self.draft_parser.parse_draft_info(draft_soup, info_dict)
else:
draft_df = draft_df.append(self.draft_parser.parse_draft_info(draft_soup, info_dict))
dm.save_to_parquet(self.league_id, draft_df, DATACONTRACT.DRAFTFILENAME, False)
print('Loaded Draft Info from WEB')
else:
print('Loaded Draft Info from PARQUET file')
return draft_df
def load_matchup_info(self) -> pd.DataFrame:
matchup_df = dm.load_from_parquet(self.league_id, DATACONTRACT.MATCHUPFILENAME)
if matchup_df is None:
matchup_array = []
for index, team_row in self.league_info.iterrows():
team_id = team_row['TeamID']
team_name = team_row['TeamName']
team_matchups = []
for week in range(total_weeks):
match_page_soup = self.web_helper.get_team_soup_by_week(self.league_id, team_id, week+1)
weekly_matchup = self.team_parser.get_weekly_opponent(match_page_soup)
print(f'{team_id} vs {weekly_matchup}')
team_matchups.append(weekly_matchup)
matchup_row = [team_id, team_name]
matchup_row.extend(team_matchups)
matchup_array.append(matchup_row)
matchup_df = self.gen_matchup_df(matchup_array)
dm.save_to_parquet(self.league_id, matchup_df, DATACONTRACT.MATCHUPFILENAME, False)
print('Loaded Matchup Info from WEB')
else:
print('Loaded Matchup Info from PARQUET file')
return matchup_df
@staticmethod
def gen_matchup_df(matchup_array) -> pd.DataFrame:
week_array = ['Week' + str(x+1) for x in range(total_weeks)]
df_columns = ['TeamId', 'TeamName']
df_columns.extend(week_array)
matchup_df = pd.DataFrame(data=matchup_array, columns=df_columns)
matchup_df = matchup_df.astype({'TeamId': 'int32'})
return matchup_df
def get_team_count(self):
return self.league_info.shape[0]
def load_score_info(self):
return dm.load_from_parquet(self.league_id, DATACONTRACT.SCOREFILENAME)
def load_player_info(self):
return dm.load_from_parquet(self.league_id, DATACONTRACT.PLAYERFILENAME)
def load_all_team_scores_to_date(self):
for week in range(current_week):
self.load_team_scores_by_week(week+1, save_data=True)
def load_team_scores_through_week(self, week):
for w in range(week):
self.load_team_scores_by_week(w+1, save_data=True)
def is_week_loaded(self, week):
if self.score_info is None:
return False
else:
return week in self.score_info['Week'].to_list()
def is_week_player_data_loaded(self, week):
# logger.debug(f'Loading player scores for week {week}')
if self.player_info is None:
# logger.debug('Player info doesn\'t exist')
return False
else:
res = week in self.player_info['Week'].to_list()
# logger.debug(f'Looking in week {week} result is {res}')
return res
def load_team_scores_by_week(self, week, save_data=False):
if not self.is_week_loaded(week):
score_array = []
for index, fantasy_player in self.league_info.iterrows():
team_id = fantasy_player[DATACONTRACT.TEAM_ID]
team_name = fantasy_player[DATACONTRACT.TEAM_NAME]
print(f'{team_id}/{team_name}')
soup = self.web_helper.get_team_soup_by_week(self.league_id, team_id, week)
real_score = self.team_parser.get_team_score(soup)
proj_score = self.team_parser.get_team_projected_score(soup)
unique_id = f'{self.league_id}_{team_id}'
# TEAMSCORECOLS = ['UniqueID', 'TeamId', 'Week', 'RealScore', 'ProjScore']
score_array.append([unique_id, int(team_id), int(week), float(real_score), float(proj_score)])
self.append_scores_df(score_array)
if save_data:
dm.save_to_parquet(self.league_id, self.score_info, DATACONTRACT.SCOREFILENAME, True)
def append_scores_df(self, arr):
# TEAMSCORECOLS = ['UniqueID', 'TeamId', 'Week', 'RealScore', 'ProjScore']
temp_df = pd.DataFrame(data=arr, columns=DATACONTRACT.TEAMSCORECOLS)
# print(temp_df)
if self.score_info is None:
self.score_info = temp_df
else:
self.score_info = self.score_info.append(temp_df)
def load_all_player_data_through_week(self, week):
for w in range(week):
self.load_player_data_by_week(w + 1, save_data=True)
def load_player_data_by_week(self, week, save_data=False):
if not self.is_week_player_data_loaded(week):
for index, fantasy_player in self.league_info.iterrows():
team_id = fantasy_player[DATACONTRACT.TEAM_ID]
team_name = fantasy_player[DATACONTRACT.TEAM_NAME]
print(f'{team_id}/{team_name}')
soup = self.web_helper.get_team_soup_by_week(self.league_id, team_id, week)
player_array = self.player_parser.get_all_info(soup)
unique_id = f'{self.league_id}_{team_id}'
# PLAYERSCORECOLS = [UNIQUE_ID, WEEK, 'Name',
# 'PlayerPos', 'ActivePos', REAL_SCORE, PROJ_SCORE, 'PctPlayed']
player_df = pd.DataFrame(columns=DATACONTRACT.PLAYERPARSECOLS, data=player_array)
player_df[DATACONTRACT.UNIQUE_ID] = unique_id
player_df[DATACONTRACT.WEEK] = week
# player_array.append([unique_id, int(team_id), int(week), float(real_score), float(proj_score)])
# matchup_df = matchup_df.astype({'TeamId': 'int32'})
player_df = player_df.astype({f'{DATACONTRACT.REAL_SCORE}': 'float'})
player_df = player_df.astype({f'{DATACONTRACT.PROJ_SCORE}': 'float'})
player_df = player_df.astype({f'{DATACONTRACT.PCTSTART}': 'float'})
player_df = player_df.astype({f'{DATACONTRACT.WEEK}': 'int'})
self.append_player_stats_df(player_df)
if save_data:
dm.save_to_parquet(self.league_id, self.player_info, DATACONTRACT.PLAYERFILENAME, True)
def append_player_stats_df(self, df):
# PLAYERSCORECOLS = [UNIQUE_ID, WEEK, 'Name',
# 'PlayerPos', 'ActivePos', REAL_SCORE, PROJ_SCORE, 'PctPlayed']
# temp_df = pd.DataFrame(data=arr, columns=DATACONTRACT.TEAMSCORECOLS)
# print(temp_df)
if self.player_info is None:
self.player_info = df
else:
self.player_info = self.player_info.append(df)
def calculate_rank(self):
loaded_weeks = self.score_info['Week'].max()
self.gen_ranking_df()
for week in loaded_weeks:
for index, fantasy_player in self.league_info.iterrows():
#TODO
pass
def gen_ranking_df(self):
# RANKINGTRACKERCOLS = [UNIQUE_ID, LEAGUE_ID, TEAM_ID, TEAM_NAME, WEEK, RESULT, TOTALWINS, TEAMRANKING]
self.rank_info = pd.DataFrame(columns=DATACONTRACT.RANKINGTRACKERCOLS)
def export_league_data_to_csv(self):
leaguefilename = str(self.league_id) + 'LeagueData'
dm.export_to_csv(self.league_info, leaguefilename)
matchupfilename = str(self.league_id) + '_MatchupData'
dm.export_to_csv(self.matchup_info, matchupfilename)
scorefilename = str(leaguefilename) + '_ScoreData'
dm.export_to_csv(self.score_info, scorefilename)
def export_friendly_score_data(self):
week_array = self.score_info['Week'].unique()
for week in week_array:
temp = self.score_info.loc[self.score_info['Week'] == week]
pass
def print_info(self):
print(self.league_info)
print(self.matchup_info)
print(self.score_info)
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,945 | gr8scott88/FFLStats4 | refs/heads/master | /ScratchFile.py | from models import League
from models import DATACONTRACT
from utility.YahooWebHelper import YahooWebHelper
from web_parsing.MatchPageParser import MatchParser
from web_parsing.TeamPageParser import TeamParser
from web_parsing.PlayerParser import PlayerParser
from data_handlers.PandasHandler import PandasDataHandler
import matplotlib.pyplot as plt
import pandas as pd
from data_vis.LeagueVisualizer import LeagueVisualizer
import numpy as np
helper = YahooWebHelper()
mp = MatchParser()
tp = TeamParser()
AFC_id = 609682
NFC_id = 713428
load_thru_week = 12
AFC = League.League(AFC_id, 'AFC')
NFC = League.League(NFC_id, 'NFC')
AFC_vis = LeagueVisualizer(AFC)
NFC_vis = LeagueVisualizer(NFC)
AFC.player_info.join(AFC.draft_info, on=DATACONTRACT.PLAYERNAME)
# df["y"] = pd.to_numeric(df["y"])
AFC.draft_info[DATACONTRACT.DRAFTORDER] = pd.to_numeric(AFC.draft_info[DATACONTRACT.DRAFTORDER])
data = AFC.player_info.merge(AFC.draft_info[[DATACONTRACT.DRAFTORDER, DATACONTRACT.PLAYERNAME]], on=DATACONTRACT.PLAYERNAME)
# df[(df.A == 1) & (df.D == 6)]
order = 1
order_filter = f'{DATACONTRACT.DRAFTORDER}<={order}'
filtered = data.query(order_filter)
# grouped = df.groupby('A')
# >>> grouped.filter(lambda x: x['B'].mean() > 3.)
draft_scores = filtered.groupby(DATACONTRACT.UNIQUE_ID)[DATACONTRACT.REAL_SCORE].sum()
draft_scores = draft_scores.to_frame().reset_index()
# AFC.league_info[DATACONTRACT.PLAYERNAME]
cleaned = draft_scores.merge(AFC.league_info[[DATACONTRACT.UNIQUE_ID, DATACONTRACT.TEAM_NAME]], on=DATACONTRACT.UNIQUE_ID)
cleaned = cleaned.set_index(DATACONTRACT.TEAM_NAME)
# cleaned = draft_scores.join(AFC.league_info[[DATACONTRACT.UNIQUE_ID, DATACONTRACT.PLAYERNAME]], on=DATACONTRACT.UNIQUE_ID)
plot = cleaned.plot.pie(y=DATACONTRACT.REAL_SCORE, figsize=(5, 5))
max_draft = 5
thru_week = 12
def func(pct, allvals):
absolute = int(pct/100.*np.sum(allvals))
return "{:.1f}%\n({:d} g)".format(pct, absolute)
def absolute_value(val):
a = np.round(val/100.*cleaned['RealScore'].sum(), 0)
return a
# f, ax = plt.subplots(figsize=(20, 10))
AFC.draft_info[DATACONTRACT.DRAFTORDER] = pd.to_numeric(AFC.draft_info[DATACONTRACT.DRAFTORDER])
# league.player_info.join(league.draft_info, on=DATACONTRACT.PLAYERNAME)
data = AFC.player_info.merge(AFC.draft_info[[DATACONTRACT.DRAFTORDER, DATACONTRACT.PLAYERNAME]],
on=DATACONTRACT.PLAYERNAME)
order_filter = f'{DATACONTRACT.DRAFTORDER}<={max_draft}'
week_filter = f'{DATACONTRACT.WEEK}<={thru_week}'
filtered = data.query(order_filter)
filtered = filtered.query(week_filter)
draft_scores = filtered.groupby(DATACONTRACT.UNIQUE_ID)[DATACONTRACT.REAL_SCORE].sum()
draft_scores = draft_scores.to_frame().reset_index()
cleaned = draft_scores.merge(AFC.league_info[[DATACONTRACT.UNIQUE_ID, DATACONTRACT.TEAM_NAME]],
on=DATACONTRACT.UNIQUE_ID)
cleaned = cleaned.set_index(DATACONTRACT.TEAM_NAME)
# plot = cleaned.plot.pie(y=DATACONTRACT.REAL_SCORE, figsize=(15, 10), legend='', autopct=absolute_value)
plot = cleaned.plot.pie(y=DATACONTRACT.REAL_SCORE, figsize=(15, 10), legend='', autopct=lambda val: np.round(val/100.*cleaned['RealScore'].sum(), 0))
plot_title = f'Cumulative Score from Top {max_draft} Drafted Players for {AFC.name} Through Week {thru_week}'
plt.title(plot_title)
plt.ylabel('')
AFC.load_all_player_data_through_week(load_thru_week)
NFC.load_all_player_data_through_week(load_thru_week)
AFC.load_team_scores_through_week(load_thru_week)
NFC.load_team_scores_through_week(load_thru_week)
AFC_vis.plot_player_breakdown_for_season()
AFC_vis.plot_player_breakdown_for_season(save=True)
NFC_vis.plot_player_breakdown_for_season(save=True)
AFC_vis.plot_player_breakdown_for_all_teams(save=True)
NFC_vis.plot_player_breakdown_for_all_teams(save=True)
AFC_vis.plot_cum_real_vs_proj_by_week(save=True)
NFC_vis.plot_cum_real_vs_proj_by_week(save=True)
AFC_vis.plot_cum_real_score_by_week()
AFC_vis.plot_cum_real_score_by_week(save=True)
NFC_vis.plot_cum_real_score_by_week(save=True)
AFC_vis.plot_real_score_by_week(save=True)
NFC_vis.plot_real_score_by_week(save=True)
AFC_vis.plot_real_vs_proj_by_week(save=True)
NFC_vis.plot_real_vs_proj_by_week(save=True)
AFC_vis.plot_player_breakdown_for_all_teams(save=True)
# Export Scores (unfriendly format)
AFC.score_info.sort_values(['Week', 'TeamID']).to_csv('AFC_Scores.csv')
NFC.score_info.sort_values(['Week', 'TeamID']).to_csv('NFC_Scores.csv') | {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,946 | gr8scott88/FFLStats4 | refs/heads/master | /archive/TestFile.py | from models import League
from utility import DataManager
nfc_id = 729457
nfc_data = DataManager.DataManager()
NFC = League.League(nfc_id, nfc_data)
NFC.load_all_data_points(9)
nfc_data.export_complete_team_frame(nfc_id)
afc_id = 910981
afc_data = DataManager.DataManager()
AFC = League.League(afc_id, afc_data)
AFC.load_all_data_points(9)
afc_data.export_complete_team_frame(afc_id)
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,947 | gr8scott88/FFLStats4 | refs/heads/master | /archive/GLOBALS.py | import os
ROOTDIR = r'C:\Dev\Python\Projects\FFLStats4'
try:
ROOTDIR = os.path.dirname(os.path.realpath(__file__))
except Exception as e:
print('Script not being run dynamically')
URLROOT = r'https://football.fantasysports.yahoo.com/f1/'
MAXTIMESLICES = 20
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,948 | gr8scott88/FFLStats4 | refs/heads/master | /archive/data_access/DataLoader.py | import os
filepath_root = r'/data_archive'
url_root = r'https://football.fantasysports.yahoo.com/f1/'
class DataLoader:
def __init__(self, league_id, no_teams):
self.no_teams = no_teams
self.league_id = league_id
def load_data_to_date(self, week):
for w in range(week):
self.load_week_data(w)
def load_week_data(self, week):
success = self.load_local_data(week)
if not success:
success = self.load_web_data(week)
if not success:
raise RuntimeError('Unable to load data')
def load_local_data(self, week, team):
return True
def load_web_data(self, week, team):
return True
def parse_results(self, html):
pass
def build_url(self, week, team):
return f'{url_root}/{self.league_id}/{team}/team?&week={week}'
def build_filepath(self, week, team):
return os.path.join(filepath_root, str(self.league_id), str(week), f'{team}.html')
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,949 | Petro-Y/python-web-project | refs/heads/master | /web.py | #!cmd /k py -3
from flask import Flask, request, redirect, render_template, session
from flask_socketio import SocketIO, emit
from zipfile import ZipFile
from werkzeug import secure_filename
import os
from db import *
import proj
from proj import project_by_name, project_data
from settings import secret_key, upload_folder, rootdir
app = Flask(__name__)
app.secret_key=secret_key
app.config['UPLOAD_FOLDER']=upload_folder
socketio = SocketIO(app)
@app.route('/')
def main_page():
user=session['current_user'] if 'current_user' in session else None
return redirect(user+'/' if user else 'login')
@app.route('/login', methods=['GET'])
def login_page():
return render_template('login.html')
@app.route('/ws', methods=['GET'])
def ws_page():
return render_template('ws.html')
@socketio.on('my event')
def handle_my_custom_event(json):
print('received json: ' + str(json))
emit('my response', {'data': 'hello'})
@app.route('/login', methods=['POST'])
def login_post():
#try to log in:
user=request.form['user']
password=request.form['password']
#check user and password (show login page with error message if user/password is wrong:
if check_user(user, password):
session['current_user']=user
else:
return render_template('login.html', error="Неправильний логін чи пароль!", user=user)
return redirect('./')
@app.route('/register', methods=['POST'])
def register_post():
#if password2 and email fields posted, try to register (and log in)
#
user=request.form['user']
password=request.form['password']
password2=request.form['password2']
email=request.form['email']
if password!=password2:
return render_template('login.html', error='Вкажіть один і той же пароль двічі!',
user=user, email=email, register=True)
elif user_exists(user):
return render_template('login.html', error="Користувача з таким ім'ям уже зареєстровано!",
user=user, email=email, register=True)
elif email_exists(email):
return render_template('login.html', error="Потрібна унікальна адреса електронної пошти!",
user=user, email=email, register=True)
add_user(user, password, email)
session['current_user']=user
return redirect('./')
@app.route('/logout')
def logout_page():
del session['current_user']
return redirect('./')
@app.route('/<user>/<project>/<path:fname>', methods=['GET'])
def file_page(user, project, fname):
content=''; error=''
try:
project_vfs=proj.project_by_name(user, project)
content=''.join(project_vfs.load(fname))
except:
error='No such file exists'
#if ?mode=raw, show content as plain text:
try:
if request.args.get('mode')=='raw':
return content, 200, {'Content-Type': 'text/plain; charset=utf-8'}
except Exception:
pass
return render_template('file.html', f=content, fname=fname, user=user, project=project, error=error)
@app.route('/<user>/<project>/<path:fname>', methods=['POST'])
def file_post(user, project, fname):
print(user, project, fname)
content=request.form['content']
print(user, project, fname, 'content is OK')
if session['current_user']!=user:
return render_template('file.html', f=content, fname=fname, user=user, project=project,
error='Ви не маєте прав редагувати файл. Створіть відгалуження проекту чи реалізацію підзадачі')
#show error message (fork/implement?)
#save changes:
print(user, project, fname, 'user check is OK')
project_vfs=proj.project_by_name(user, project)
print(user, project, fname, 'project_by_name is OK')
project_vfs.save(fname, content)
print(user, project, fname, 'save is OK')
return redirect('.')
@app.route('/<user>/<project>/', methods=['GET'])
def project_page(user, project):
#project_vfs=proj.project_by_name(user, project)
try:
if request.args.get('mode')=='zip':
# see https://docs.python.org/3/library/zipfile.html
# generate zip archive (in /static/download directory)........
zfname=rootdir+'static/'+user+'/'+project+'.zip'
pr=project_by_name(user, project)
with ZipFile(zfname, 'w') as zf:
for filename in pr.get_all_files():
zf.writestr(filename, ''.join(pr.load(filename)).encode())
# and download it:
return redirect(zfname)
pass
except:
pass
#show files of the project
#show subtasks list
return render_template('project.html',
is_current=session['current_user']==user if 'current_user' in session else False,
**project_data(user, project))
@app.route('/<user>/<project>/', methods=['POST'])
def project_post(user, project):
"upload zip, or upload test-report......"
print('POST', user, project)
action=request.form['action']
print(' ', action, user, project)
if action=='send_report':
report=request.form['testreport']
print('test report:', report)
#add report to db....
try:
socketio.emit('qa_report '+user+'/'+project, {'text': report})#...
# see https://flask-socketio.readthedocs.org/en/latest/
# see http://stackoverflow.com/questions/30124701/how-to-emit-websocket-message-from-outside-a-websocket-endpoint
print('emit is ok')
except Exception as e:
print(e)
elif action=='upload':
# upload zip file ....
# see http://flask.pocoo.org/docs/0.10/patterns/fileuploads/
zf=request.files['zipfile']
if zf:
# extract it....
filename = secure_filename(zf.filename)
zf.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
with ZipFile(filename) as zf:
zf.extractall()#prepare target path....
# (create new folder for extracted, then remove old files and move it on their place)....
elif action=='build':
target_user, target_project=request.form['target'].split('/', 1)
# create test build ...
# impl (user, project) = current project
# project (user, project) = target project
buildname='build/'+proj.build(target_user, target_project, impls)
# emit message to QA...
for qa in get_qa_list(user, project):
socketio.emit('qa_request '+qa, {'build': buildname})
elif action=='integrate':
#integrate current implementation into target project....
pass
elif action=='find_subtasks':
proj.find_subtasks(user, project)
elif action=='add_file':
return redirect('/'+user+'/'+project+'/'+request.form['filename'])
return redirect('/'+user+'/'+project)
@app.route('/<user>/', methods=['GET'])
def user_page(user):
try:
if request.args.get('newproject'):
add_project(user, request.args.get('newproject'))
except:
pass
return render_template('user.html',
current_user=session['current_user'] if 'current_user' in session else None,
**user_data(user))
try:
from settings import enable_reset
if enable_reset:
@app.route('/reset')
def reset_page():
create_db()
return '''DB creation/reset complete.<br>
To prevent this in future, remove 'enable_reset=True' from settings.py and restart the server.'''
except:
pass
if __name__=='__main__':
socketio.run(app, host='0.0.0.0')
| {"/web.py": ["/db.py", "/proj.py", "/settings.py"], "/db.py": ["/settings.py"], "/tasktools.py": ["/vfs.py"], "/proj.py": ["/vfs.py", "/db.py", "/tasktools.py", "/settings.py"]} |
63,950 | Petro-Y/python-web-project | refs/heads/master | /settings.py | import os
path = os.path.abspath(__file__)
rootdir = os.path.dirname(path)+'/'
#rootdir='./'
db_name=rootdir+'project.db'
usersdir=rootdir+'users/'
buildsdir=rootdir+'builds/'
upload_folder=rootdir+'uploads/'
passhashsecret='3f61a0a041fe98decc152b1d5f94ea63'
secret_key='8ffc1cececb0a97ae8d6045dbf75fd49'
enable_reset=True | {"/web.py": ["/db.py", "/proj.py", "/settings.py"], "/db.py": ["/settings.py"], "/tasktools.py": ["/vfs.py"], "/proj.py": ["/vfs.py", "/db.py", "/tasktools.py", "/settings.py"]} |
63,951 | Petro-Y/python-web-project | refs/heads/master | /db.py | #!py -3 -i
from sqlite3 import connect
from hashlib import md5
import os
#from proj import project_by_name
from settings import db_name, passhashsecret
def create_db():
try:
os.unlink(db_name)
except:
pass
conn=connect(db_name)
cur=conn.cursor()
cur.executescript('''
create table user(
id integer primary key autoincrement,
name char(50),
passhash char(32),
email char(100));
create table project(
id integer primary key autoincrement,
name char(50),
user_id int,
status int,
implementation_id int,''' #what's this? supertask?...
'''changed datetime);
create table project_rel(
id integer primary key autoincrement,
slave_id int,
master_id int);
create table status(
id int,
category int,
name char(20));
insert into status values
(0, 0, 'project'),
(1, 0, 'test_project'),
(2, 1, 'subtask'),
(3, 1, 'subtask_done'),
(4, 1, 'subtask_cancelled'),
(5, 2, 'qa_task'),
(6, 0, 'implementation');
create table test(
id integer primary key autoincrement,
report text,
build_id int
);
create table build(
id integer primary key autoincrement,
name char(120),
project_id int,
impl_id int,
created datetime
);
/* create table build_impl(
build_id int,
impl_id int
); */
create table qa_watch(
qa_user_id int,
project_id int
);
'''
# views.....
)
conn.commit()
cur.close()
conn.close()
add_user('boss', 'boss', 'boss@example.com')
add_user('slave', 'slave', 'slave@example.com')
add_user('qa', 'qa', 'qa@example.com')
def project_data(user, project):
#print('project_data', user, project)
try:
conn=connect(db_name); cur=conn.cursor()
cur.execute('''
select status.category from project
join user on project.user_id=user.id
join status on project.status=status.id
where user.name=? and project.name=?
''', (user, project))#is_subtask_view(user_name, project_name, status_category)
for row in cur:
#is_subtask: select status from project
is_subtask=row[0]==1
else:
is_subtask=False
cur.close()
#files:
#files=project_by_name(user, project).get_all_files()
#subtasks:
cur=conn.cursor()
cur.execute('''
select slaveuser.name, slave.name from project as slave
join project_rel on project_rel.slave_id=slave.id
join project as master on project_rel.master_id=master.id
join user as slaveuser on slave.user_id=slaveuser.id
join user as masteruser on master.user_id=masteruser.id
where masteruser.name=? and master.name=?
''', (user, project))# subtasks_view(slaveuser_name, slave_name, masteruser_name, master_name)
subtasks=[row[0]+'/'+row[1] for row in cur]
cur.close()
#supertasks:
cur=conn.cursor()
cur.execute('''
select masteruser.name, master.name from project as slave
join project_rel on project_rel.slave_id=slave.id
join project as master on project_rel.master_id=master.id
join user as slaveuser on slave.user_id=slaveuser.id
join user as masteruser on master.user_id=masteruser.id
where slaveuser.name=? and slave.name=?
''', (user, project))
supertasks=[row[0]+'/'+row[1] for row in cur]
cur.close()
# reports.....
# targets (for build)....
conn.close()
return dict( user=user, project=project,
is_subtask=is_subtask, #files=files,
subtasks=subtasks, supertasks=supertasks)
except Exception as e:
#print('project_data problems...')
print(e)
return dict( user=user, project=project, error='Stub mode (DB is inaccessible)',
files=['file1.c', 'file2.c', 'file3.html'],#get them from project_vfs........
subtasks=['st1', 'st2'],#project_vfs.get_subtasks() ......
supertasks=['project'],#project_vfs.get_supertasks() .....,
reports=[('QA', '22.03.2016', 'Deadline error: nothing implemented!!!')],
is_subtask=project.startswith(('st', 'subtask')))
def user_data(username):
try:
#.....
conn=connect(db_name)
cur=conn.cursor()
projects=[p for p, in cur.execute('''
select project.name from project
join user on user.id=project.user_id
join status on project.status=status.id
where user.name=? and status.category=0
''', (username,))]#user_project_view(user_name, project_name)
subtasks=[p for p, in cur.execute('''
select project.name from project
join user on user.id=project.user_id
join status on project.status=status.id
where user.name=? and status.category=1
''', (username,))]#user_subtask_view(user_name, project_name)
#qa tasks:
#find builds w/o test report for all watched projects....
qatasks=list(cur.execute('''
with recursive ancestor (project_id) as
(select project_id from qa_watch
join user on user.id=qa_watch.qa_user_id
where user.name=?
union select slave_id from project_rel
join ancestor on ancestor.project_id=project_rel.master_id)
select build.name, build.project_id, build.impl_id, build.created from build
join ancestor on ancestor.project_id=build.impl_id
join qa_watch on qa_watch.project_id=build.project_id
where build.id not in (select build_id from test)
''', (username,)))#qutasks_view(user_name, build_name, build_project_id, build_impl_id, build_created)
#find list of implementations for each of them.....
qatasks= [[bname]+build_sequence(bproject_id, bimpl_id)
for bname, bproject_id, bimpl_id, bcreated in qatasks]
reports=()#reports: find all reports for this user's projects....
return dict(user=username, projects=projects, subtasks=subtasks, quatasks=qatasks, reports=reports)
except Exception as e:
print(e)
return dict(user=username, error='Stub mode (DB is inaccessible)',
projects=['project1', 'project2'],
subtasks=['subtask1', 'subtask2'],
qatasks=[('build/ghg676761', 'user1/project1', 'user2/impl1'), ('user1/project1', 'user1/project1')])
def user_exists(username):
try:
conn=connect(db_name)
cur=conn.cursor()
cur.execute('''
select name from user where name=?
''', (username,))
for row in cur:
return True
cur.close()
conn.close()
except:
pass
return False
def email_exists(email):
try:
conn=connect(db_name)
cur=conn.cursor()
cur.execute('''
select email from user where email=?
''', (email,))
for row in cur:
return True
cur.close()
conn.close()
except:
pass
return False
def get_passhash(user, password):
return md5((user+password+passhashsecret).encode()).hexdigest()
def add_user(user, password, email):
passhash=get_passhash(user, password)
conn=connect(db_name)
cur=conn.cursor()
cur.execute('''
insert into user (name, passhash, email) values (?, ?, ?);
''', (user, passhash, email))
conn.commit()
cur.close()
conn.close()
def add_project(user, project):
conn=connect(db_name)
cur=conn.cursor()
cur.execute('''
insert into project (user_id, name, status)
select user.id, ?, 0 from user where name=?
''', (project, user))
conn.commit()
cur.close()
conn.close()
def add_impl(user, project, st_user, st):
conn=connect(db_name)
cur=conn.cursor()
cur.execute('''
insert into project (user_id, name, status)
select user.id, ?, 6 from user where name=?
''', (project, user))
cur.execute('''
insert into project_rel (slave_id, master_id)
select ?, project.id from project
join user on project.user_id=user.id
where user.name=? and project.name=?
''',(cur.lastrowid, st_user, st))
conn.commit()
cur.close()
conn.close()
def add_subtask(user, project, st):
conn=connect(db_name)
cur=conn.cursor()
cur.execute('''
insert into project (user_id, name, status)
select user.id, ?, 2 from user where name=?
''', (st, user))
cur.execute('''
insert into project_rel (slave_id, master_id)
select ?, project.id from project
join user on project.user_id=user.id
where user.name=? and project.name=?
''',(cur.lastrowid, user, project))
conn.commit()
cur.close()
conn.close()
def add_test_project(user, project, st):
conn=connect(db_name)
cur=conn.cursor()
cur.execute('''
insert into project (user_id, name, status)
select user.id, ?, 1 from user where name=?
''', (project, user))
cur.execute('''
insert into project_rel (slave_id, master_id)
select project.id, ? from project
join user on project.user_id=user.id
where user.name=? and project.name=?
''',(cur.lastrowid, user, st))
conn.commit()
cur.close()
conn.close()
def clone_project(old_user, old_project, new_user, new_project):
conn=connect(db_name)
cur=conn.cursor()
cur.execute('''
insert into project (user_id, name, status)
select newuser.id, ?, oldproject.status from project as oldproject
join user as olduser on olproject.user_id=olduser.id
join user as newuser
where newuser.name=? and oldproject.name=? and olduser.name=?
''', (new_project, new_user, old_project, old_user))
new_id=cur.lastrowid
old_id=list(cur.execute('''
select project.id from project
join user on project.user_id=user.id
where user.name=? and project.name=?
''', (old_user, old_project)))
cur.execute('''
insert into project_rel (master_id, slave_id)
select ?, slave_id from project_rel
where master_id=?
''', (new_id, old_id))
cur.execute('''
insert into project_rel (master_id, slave_id)
select master_id, ? from project_rel
where slave_id=?
''', (new_id, old_id))
build_id=cur.lastrowid
conn.commit()
cur.close()
conn.close()
return build_id
def add_build(proj_user, project, impl_user, impl):
conn=connect(db_name)
cur=conn.cursor()
cur.execute('''
insert into build (project_id, impl_id, created)
select main.id, impl.id, now
from project as main
join user as mainuser on main.user_id=mainuser.id
join project as impl
join user as impluser on impl.user_id=impluser.id
where mainuser.name=? and main.name=?
and imluser.name=? and impl.name=?
''', (proj_user, project, impl_user, impl))
conn.commit()
cur.close()
conn.close()
pass#.....
def check_user(user, password):
try:
passhash=get_passhash(user, password)
conn=connect(db_name)
cur=conn.cursor()
cur.execute('''
select * from user where name=? and passhash=?
''', (user, passhash))
for row in cur:
return True
cur.close()
conn.close()
except:
pass
return False
def get_qa_list(user, project):
"Get all QAs watching this project"
conn=connect(db_name)
cur=conn.cursor()
res=[qa for qa, in cur.execute('''
select qauser.name from user
join project on project.user_id=user.id
join qa_watch on qa_watch.project_id=project.id
join user as qauser on qa_watch.qa_user_id=qauser.id
where user.name=? and project.name=?
''', (user, project))]
cur.close()
conn.close()
return res
def build_sequence(proj_id, impl_id):
"Sequence of projects (from project to impl, excluding subtasks)"
conn=connect(db_name)
cur=conn.cursor()
res=list(cur.execute('''
with recursive prj as (
select id, user.name ||'/'|| project.name as name from project
join user on project.user_id=user.id
where id=?
union select master_id,
(case when status.category=0 then user.name ||'/'|| project.name ||'+' else '' end)|| prj.name
from prj
join project_rel on prj.id=slave_id
join project on master_id=project.id
join user on project.user_id=user.id
join status on project.status=status.id
) select name from prj where id=? limit 1
''', (impl_id, proj_id)))
cur.close()
conn.close()
return res[0][0].split('+')
def get_base(user, project):
print('get_base', user, project)
try:
"Return base subtask's name"
conn=connect(db_name)
cur=conn.cursor()
res=list(cur.execute('''
select super.name from project as super
join project_rel on super.id=master_id
join project on project.id=slave_id
join user on user.id=project.user_id
where project.status=6
and user.name=? and project.name=?
''', (user, project)))
cur.close()
conn.close()
print('base is', res)
return res[0][0] if res else None
except Exception as e:
print(e)
return None | {"/web.py": ["/db.py", "/proj.py", "/settings.py"], "/db.py": ["/settings.py"], "/tasktools.py": ["/vfs.py"], "/proj.py": ["/vfs.py", "/db.py", "/tasktools.py", "/settings.py"]} |
63,952 | Petro-Y/python-web-project | refs/heads/master | /vfs.py | #!py -3 -i
import re
import os
import os.path
class VFS:
'''
abstract base class for virtual file systems (implemented on real disk file system,
memory storage, database etc.'''
def translate_path(self, path):
#add final / ....
#replace /DIR/../ => /, /./ => /, //=>/ .....
#remove final / .......
#add initial / ......
#ensure path doesn't start from /.. .......
return path
def parse_path(self, path):
return list(filter(lambda s:s, path.split('/')))#also remove empty items
def clone(self, another_vfs):
#remove all files:
for f in self.get_all_files():
self.rm(f)
#copy all files from another_vfs:
for f in another_vfs.get_all_files():
self.save(f, another_vfs.load(f))
return self
def get_all_files(self, path=''):
for fname in map(lambda fname: path+'/'+fname, self.ls(path)):
if self.isdir(fname):
yield from self.get_all_files(fname)
else:
yield fname
def get_all_dirs(self, path=''):
for fname in map(lambda fname: path+'/'+fname, self.ls(path)):
if self.isdir(fname):
yield fname
yield from self.get_all_dirs(fname)
def mkdir(self, path): pass
def rm(self, path): pass
def ls(self, path): pass
def exists(self, path): pass
def isdir(self, path): pass
def load(self, path): pass
def save(self, path, linelist): pass
class ListReadStream:
def __init__(self, lst):
self.it=iter(lst)
def __exit__(self):pass
def read(self):
try:
return next(self.it)
except Exception:
return ''
class ListWriteStream:
def __init__(self, lst):
self.lst=lst
def __exit__(self):pass
def write(self, s):
self.lst+=[s+'\n']
pass
class ListVFS(VFS):
'''Abstract file system where file can be loaded and saved as a list of strings'''
def open(self, path, mode='r'):
if mode=='r':
#create iterator with read() and close() metods:
return ListReadStream(self.load(path))
elif mode=='w':
#create object with write() method to store data in a list and close() to save the list....
pass
elif mode=='a':
old=self.load(path)
f=self.open(path, 'w')
for s in old:
f.write(s)
return f
class StreamVFS(VFS):
'''Abstract file system with stream access to file content'''
def load(self, path):
with self.open(path) as f:
return list(f)
def save(self, path, linelist):
with self.open(path, 'w') as f:
for s in linelist:
f.write(s)
class DiskVFS(StreamVFS):
def __init__(self, path, *args, **kwargs):
super().__init__(*args, **kwargs)
self.basepath=path
#if such dir not exist, create it....
def localpath(self, path):
path=re.sub(r'^[\\/]*', '', path)
return os.path.join(self.basepath, path)
def open(self, path, mode='r', *args, **kwargs):
if set(mode) & {'w', 'a', 'x'}:
self.mkdir(os.path.dirname(path))
#create all directories for this file if they don't exist
path=self.localpath(path)
return open(path, mode, *args, **kwargs)
def mkdir(self, path):
path=self.localpath(path)
os.makedirs(path, exist_ok=True)
def rm(self, path):
path=self.localpath(path)
os.remove(path)
def ls(self, path):
try:
path=self.localpath(path)
return os.listdir(path)
except:
return []
def exists(self, path):
return os.path.exists(self.localpath(path))
def isdir(self, path):
return os.path.isdir(self.localpath(path))
#def load(self, path): pass
#def save(self, path, linelist): pass
pass
class SubdirVFS(VFS):
#based on subdir of another VFS.....
def __init__(self, vfs, path, *args, **kwargs):
super().__init__(*args, **kwargs)
self.basepath=path
self.basevfs=vfs
#if such dir not exist, create it....
pass
class MergeVFS(VFS):
'''Join multiple VFSes into single one'''
def __init__(self, *vfses, **kwargs):
super().__init__(**kwargs)
self.vfses=vfses
pass
class DictVFS(ListVFS):
#dictionary-based memory VFS.......
def __init__(self, root=None, **kwargs):
self.root=root or {}
def load(self, path):
try:
f=self.root
for name in self.parse_path(path):
f=f[name]
return f[:]
except Exception:
return []
def save(self, path, content):
f=self.root
names=self.parse_path(path)
for name in names[:-1]:
if name not in f:
f[name]={}
f=f[name]
f[names[-1]]=content
def mkdir(self, path):
f=self.root
names=self.parse_path(path)
for name in names:
if name not in f:
f[name]={}
f=f[name]
def get_all_files(self, root=None):
res=[]
if root==None:
root=self.root
for name in root:
if isinstance(root[name], dict):
res+=map(lambda s:name+'/'+s, self.get_all_files(root[name]))
else:
res+=[name]
return res
class DBVFS(VFS):
'''VFS stored in DB'''
pass | {"/web.py": ["/db.py", "/proj.py", "/settings.py"], "/db.py": ["/settings.py"], "/tasktools.py": ["/vfs.py"], "/proj.py": ["/vfs.py", "/db.py", "/tasktools.py", "/settings.py"]} |
63,953 | Petro-Y/python-web-project | refs/heads/master | /tasktools.py | #!cmd /k py -3
from vfs import DictVFS
import re
class Project(DictVFS):
#SubdirVFS with additional information.....
def __init__(self, *args, **kwargs):
self.__dict__.update(kwargs)
super().__init__(*args, **kwargs)
def find_fragments(project, base=None, only_file=None):
fragments=[]
for filename in ([only_file] if only_file else project.get_all_files()):
#print('file:', filename)
nested=0; ln=0
for s in project.load(filename):
ln+=1
#(ln, ':', s, end='')
#find ":subtask SUBTASKNAME:" and ":endsubtask:",
#skip nested fragments:
stname=re.findall(r':subtask\s([^:\s]+):', s)
if stname and stname!=base:
#print('subtask:', stname[0])
if nested==0:
fragments+=[dict(
begin=ln,
name=stname[0],
#globalname='%s@%s' % (stname[0].split('/', 1)[0], project.id),
globalname=stname[0].split('/', 1)[0],
filename=filename)]
nested+=1
elif s.find(':endsubtask:')>=0:
if nested>0:
nested-=1
if nested==0:
fragments[-1]['end']=ln
return fragments
def extract_subtasks(project):
print('extract_subtasks')
try:
subtasks={}
for fr in find_fragments(project):
if fr['globalname'] not in subtasks:
subtasks[fr['globalname']]=Project(implements=fr['globalname'])
subtasks[fr['globalname']].save(fr['filename'],
subtasks[fr['globalname']].load(fr['filename'])
+project.load(fr['filename'])[fr['begin']-1:fr['end']])
return subtasks
except Exception as e:
print('extract_subtasks', e)
def apply_subtasks(project, impls):
# Переробити: рекурсивно викликати останню пару impls...
if not impls: return
impl=DictVFS().clone(impls[0])
impl.base=impls[0].base
apply_subtasks(impl, impls[1:])
#apply changes from impl:
try:
base=project.base
except:
base=None
impl_fragments=filter(lambda fr: fr['globalname']==impl.base, find_fragments(impl))# filter main subtask only
project_fragments={fr['name']:fr for fr in find_fragments(project, base=base)}
for impl_fr in impl_fragments:
project_fr=project_fragments[impl_fr['name']]
project_text=project.load(project_fr['filename'])
impl_text=project.load(impl_fr['filename'])
project_text[project_fr['begin']:project_fr['end']]=impl_text[impl_fr['begin']:impl_fr['end']]
project.save(project_fr['filename'], project_text)
#find subtasks in this file again:
project_fragments.update({fr['name']:fr for fr in find_fragments(project, only_file=project_fr['filename'])})
pass
#also copy new files (if they do not already exist in project):
for f in impl.get_all_files():
if not project.exists(f):
project.save(f, impl.load(f))
def text2list(text):
return list(map(lambda s:s+'\n', text.split('\n')))
if __name__=='__main__':
myproj=Project(
{
'file':text2list('''
#include <stdio.h>
main()
{
/* :subtask one: */
// write some instructions here
/* :endsubtask: */
}
'''),
'subdir':{
'file': text2list('''
Hi!!!
:subtask two:
some text here
:endsubtask:
lorem ipsum dolor...
:subtask two/2:
another text here
:endsubtask:
''')}
},
id='0000')
print(myproj.get_all_files())
#print(myproj.load('subdir/file'))
#print(myproj.root)
print(find_fragments(myproj))
sts=extract_subtasks(myproj)
for st in sts:
print(st)
print(sts[st].root) | {"/web.py": ["/db.py", "/proj.py", "/settings.py"], "/db.py": ["/settings.py"], "/tasktools.py": ["/vfs.py"], "/proj.py": ["/vfs.py", "/db.py", "/tasktools.py", "/settings.py"]} |
63,954 | Petro-Y/python-web-project | refs/heads/master | /proj.py | #!py -3 -i
# project factory implementation
import vfs
import db
import tasktools
from settings import usersdir, buildsdir
from uuid import uuid4
def project_by_name(user, project=None):
if project==None:
return project_by_name(*user.split('/', 1))
#go to users root dir, create project_vfs object:
print("I'm project_by_name", user, project)
path=usersdir+user+'/'+project
project_vfs=vfs.DiskVFS(path)
# print('DiskVFS is OK')
#or extract it from cache(?) ....
#add properties (stored in db): subtasks, supertasks, status....
base=db.get_base(user, project)
#print('base is ok')
project_vfs.base=base
# print('base assignment is ok')
return project_vfs
def project_fork(old_user, old_project, new_user, new_project):
path=usersdir+old_user+'/'+old_project
old_project_vfs=vfs.DiskVFS(path)
path=usersdir+new_user+'/'+new_project
new_project_vfs=vfs.DiskVFS(path)
#copy some data in db:
db.clone_project(old_user, old_project, new_user, new_project)
#copy files:
new_project_vfs.clone(old_project_vfs)
def build(user, project, implementations):
buildname=str(uuid4())#unical random name
path=buildsdir+buildname
build_vfs=vfs.DiskVFS(path)
path=usersdir+user+'/'+project
project_vfs=vfs.DiskVFS(path)
#copy project_vfs => build_vfs:
build_vfs.clone(project_vfs)
#apply_subtasks in a temporary copy of the project
implementations=proj_sequence(implementations)
tasktools.apply_subtasks(build_vfs, implementations)
#store buid info in db....
return buildname
def integrate(user, project, implementations):
"apply_subtasks permanently"
project_vfs=project_by_name(user, project)
implementations=proj_sequence(implementations)
tasktools.apply_subtasks(project_vfs, implementations)
#store information about this build.....
pass
def proj_sequence(build_seq):
return list(map(project_by_name, build_seq))
def project_data(user, project):
print('I am project_data', user, project)
return dict(files=project_by_name(user, project).get_all_files(),
**db.project_data(user, project))
def add_test_project(user, project, st):
db.add_test_project(user, project, st)
#create empty directory, or clone master project?....
def add_subtask(user, project, st):
db.add_subtask(user, project, st)
#extract subtask files from project.....
def add_impl(user, project, st_user, st):
db.add_impl(user, project, st_user, st)
#clone subtask files:
st_vfs=project_by_name(st_user, st)
impl_vfs=project_by_name(user, project)
impl_vfs.clone(st_vfs)
def find_subtasks(user, project):
print('find_subtasks', user, project)
subtasks=tasktools.extract_subtasks(project_by_name(user, project))
print('extract_subtasks is OK')
#store them on disk and add to db.....
for stname, st in subtasks.items():
add_subtask(user, project, stname)
project_by_name(user, stname).clone(st)
print('adding subtasks is OK')
pass | {"/web.py": ["/db.py", "/proj.py", "/settings.py"], "/db.py": ["/settings.py"], "/tasktools.py": ["/vfs.py"], "/proj.py": ["/vfs.py", "/db.py", "/tasktools.py", "/settings.py"]} |
63,956 | mrosata/python-yurp | refs/heads/master | /database_setup.py | from datetime import datetime
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime, Date
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Entry(Base):
__tablename__ = 'entries'
id = Column(Integer, autoincrement=True, primary_key=True)
inserted = Column(DateTime, default=datetime.utcnow())
last_edit = Column(DateTime, default=datetime.utcnow())
publicity = Column(String(12), nullable=False)
author = Column(Integer, nullable=False)
category = Column(Integer, default=0)
title = Column(String(64), nullable=False)
gist = Column(String(140), nullable=True)
content = Column(String(None), nullable=False)
# For making JSON queries
@property
def serialize(self):
return {
'id': self.id
}
# labels, aka categories. they have no relation outside their own table
class Label(Base):
__tablename__ = 'labels'
id = Column(Integer, autoincrement=True, primary_key=True)
name = Column(String(50), nullable=False)
type = Column(String(30), nullable=True)
class Day(Base):
__tablename__ = 'days'
id = Column(Integer, autoincrement=True, primary_key=True)
date = Column(Date, nullable=True)
order = Column(Integer, nullable=True)
entry_id = Column(Integer, ForeignKey('entries.id'))
entries = relationship(Entry)
class CategoryLink(Base):
__tablename__ = 'cat_links'
id = Column(Integer, autoincrement=True, primary_key=True)
label_id = Column(Integer, ForeignKey('labels.id'))
entry_id = Column(Integer, ForeignKey('entries.id'))
labels = relationship(Label)
entries = relationship(Entry)
class Attachment(Base):
__tablename__ = 'attachments'
id = Column(Integer, primary_key=True, autoincrement=True)
type = Column(String(16),nullable=False)
src = Column(String(32), nullable=False)
entry_id = Column(Integer, ForeignKey('entries.id'))
entries = relationship(Entry)
engine = create_engine('sqlite:///yurp.application.db')
Base.metadata.create_all(engine)
| {"/idata.py": ["/database_setup.py"], "/main.py": ["/database_setup.py", "/attachments.py", "/parsed_entry.py"]} |
63,957 | mrosata/python-yurp | refs/heads/master | /idata.py | #!/usr/bin/env python
__author__ = 'michaael'
__package__ = ''
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Entry, Day, Attachment, Base
# Setup database and session so it can be used in console
engine = create_engine('sqlite:///yurp.application.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
| {"/idata.py": ["/database_setup.py"], "/main.py": ["/database_setup.py", "/attachments.py", "/parsed_entry.py"]} |
63,958 | mrosata/python-yurp | refs/heads/master | /parsed_entry.py | #!/usr/bin/env python
__author__ = 'Michael Rosata mrosata1984@gmail.com'
__package__ = ''
from flask import Markup
from database_setup import Label, Attachment
class Parsed_Entry:
def __init__(self, db_entry, query):
self.db_entry = db_entry
self.query = query
self.id = db_entry.id
self.title = Markup(db_entry.title)
self.author = db_entry.author
self.content = Markup(db_entry.content)
self.gist = db_entry.gist
self.inserted = db_entry.inserted
self.image = self.check_attachments()
self.category = self.check_label('category')
def check_attachments(self, _type='image'):
attachment = self.query(Attachment).\
filter_by(entry_id=self.id, type=_type).first()
ret_attach = {'id':0, 'type': '', 'src': ''}
if attachment:
ret_attach['id'] = attachment.id
ret_attach['type'] = attachment.type
ret_attach['src'] = str(attachment.src).lstrip('/')
ret_attach['year'] = attachment.src.split('/')[0]
ret_attach['month'] = attachment.src.split('/')[1]
ret_attach['file'] = attachment.src.split('/')[2]
return ret_attach
return ''
def check_label(self, _type='category'):
label = self.query(Label).\
filter_by(id=self.db_entry.category, type=_type).one()
ret_label = {'id': 0, 'name': ''}
if label:
try:
ret_label['name'] = label.name
ret_label['id'] = label.id
except AttributeError, e:
print e
return ret_label | {"/idata.py": ["/database_setup.py"], "/main.py": ["/database_setup.py", "/attachments.py", "/parsed_entry.py"]} |
63,959 | mrosata/python-yurp | refs/heads/master | /main.py | import os
import pprint
from datetime import datetime
from flask import Flask, render_template, url_for, request,\
redirect, flash, send_from_directory, send_file
from sqlalchemy import create_engine, update
from sqlalchemy.orm import sessionmaker
from database_setup import Entry, Label, Day, Attachment, CategoryLink, Base
from attachments import attach
from parsed_entry import Parsed_Entry
dump = pprint.PrettyPrinter(depth=8,indent=4)
engine = create_engine('sqlite:///yurp.application.db')
# Initial the application and create db session object
app = Flask(__name__)
DBSession = sessionmaker(bind=engine)
session = DBSession()
# All Routing for Yurp Entry Web Application
@app.route('/')
def main_page():
entries = get_entries()
categories = session.query(Label).filter_by(type='category').all()
return render_template('_main.html',
entries=entries, categories=categories)
@app.route('/archive/')
def archive_page():
entries = get_entries()
categories = session.query(Label).filter_by(type='category').all()
return render_template('_archive.html',
entries=entries, categories=categories)
@app.route('/entry/<int:entry_id>')
def entry_page(entry_id):
try:
entry = session.query(Entry).filter_by(id=entry_id).first()
entry = Parsed_Entry(entry, session.query)
return render_template('_entry.html', entry=entry)
except AttributeError:
return render_template('_404.html')
@app.route('/contact/')
def contact_page():
return render_template('_contact.html')
# Static Uploads, Attachments for entries.
@app.route('/uploadss/<year>/<month>/<file>')
def serve_attachment(year, month, file):
return send_from_directory(os.path.join(app.config['UPLOAD_FOLDER'],
year, month), file)
# Hybrid Pages, pages that both display and have alternative POST functionality
@app.route('/create/entry/', methods=['GET', 'POST'])
def new_page():
if request.method == 'POST':
# Check that we have all the required info
for val in ['author', 'title', 'content', 'category', 'publicity']:
# Make sure that the required fields have been filled out.
if (not (val in request.form)) or request.form[val] == '':
flash('Form was not filled out properly')
return render_template('_main.html')
author = int(request.form['author'])
title = request.form['title']
content = request.form['content']
gist = request.form['gist']
category = int(request.form['category'])
publicity = request.form['publicity']
new_id = insert_entry(author, title, content, gist, category, publicity)
save_label_link(new_id, category)
# Check if there was an image sent with the entry form as well.
upload_handler = attach(app, request)
saved_file = upload_handler.attach('image')
if saved_file:
insert_attachment(saved_file, new_id, 'image')
flash('Inserted new Entry')
# Regardless of request.method, we will render the new entry page
categories = session.query(Label).filter_by(type='category').all()
return render_template('_new.html', categories=categories)
@app.route('/edit/entry/<int:entry_id>/', methods=['GET', 'POST'])
def edit_page(entry_id):
if request.method == 'POST':
# Check that we have all the required info
for val in ['author', 'title', 'content', 'category', 'publicity']:
# Make sure that the required fields have been filled out.
if (not (val in request.form)) or request.form[val] == '':
flash('Form was not filled out properly')
return render_template('_main.html')
author = int(request.form['author'])
title = request.form['title']
content = request.form['content']
gist = request.form['gist']
category = int(request.form['category'])
publicity = request.form['publicity']
save_label_link(entry_id, category)
update_entry(entry_id, author, title, content,
gist, category, publicity)
# Check if there was an image sent with the entry form as well.
upload_handler = attach(app, request)
saved_file = upload_handler.attach('image')
if saved_file:
update_attachment(saved_file, entry_id, 'image')
flash('Edited the Entry')
# Regardless of request.method, we will render the edit entry page
categories = session.query(Label).filter_by(type='category').all()
# Need to get the entry for this page
entry = get_entry(entry_id)
return render_template('_edit.html',
categories=categories, entry=entry)
@app.route('/create/label/category', methods=['POST'])
def create_category():
if request.form['newCategory'] and request.form['newCategory'] is not '':
name = request.form['newCategory']
message = insert_label('category', name)
flash(message, 'Category')
else:
flash('Sorry, You didn\'t fill out the form correctly!')
return redirect(request.referrer)
# CRUD Functionality (Create, Delete)
def insert_label(_type, name):
label = Label(type=_type, name=name)
session.add(label)
session.commit()
return 'successfully inserted category %s!' % name
def insert_entry(author, title, content, gist, category, publicity):
entry = Entry(author=author, title=title, gist=gist,\
content=content, category=category, publicity=publicity)
session.add(entry)
session.commit()
return entry.id
def update_entry(id, author, title, content, gist, category, publicity):
the_time = datetime.utcnow()
entry = session.query(Entry).filter_by(id=id).\
update({Entry.author: author, Entry.title: title, Entry.gist: gist,
Entry.content: content, Entry.category: category,
Entry.publicity: publicity, Entry.last_edit: the_time})
session.commit()
return entry
def insert_attachment(src, entry_id, _type):
attachment = Attachment(src=src, entry_id=entry_id, type=_type)
session.add(attachment)
session.commit()
return attachment.id
# TODO: Delete the old uploaded file
def update_attachment(src, entry_id, _type):
attachment = session.query(Attachment).filter_by(entry_id=entry_id).\
update({Attachment.src: src, Attachment.entry_id: entry_id,
Attachment.type: _type})
session.commit()
return attachment
def delete_entry(id):
pass
def save_label_link(entry_id, label_id):
new_link = CategoryLink(label_id=label_id, entry_id=entry_id)
session.add(new_link)
session.commit()
return new_link.id
# Functions for returning data from db for use in app.
def get_entries(limit=12):
raw_entries = session.query(Entry).limit(limit)
entries = []
for entry in raw_entries:
entry = Parsed_Entry(entry, session.query)
entries.append(entry)
return entries
def get_entry(id):
id = int(id)
entry = session.query(Entry).filter_by(id=id).first()
entry = Parsed_Entry(entry, session.query)
return entry
running_on_python_anywhere = False
# Make sure to keep the uploads directory name relative
app.config['UPLOAD_FOLDER'] = 'uploads'
app.secret_key = 'ZaR%tC3SzAw48vm2./2!'
if not running_on_python_anywhere and __name__ == '__main__':
app.debug = True
app.run() | {"/idata.py": ["/database_setup.py"], "/main.py": ["/database_setup.py", "/attachments.py", "/parsed_entry.py"]} |
63,960 | mrosata/python-yurp | refs/heads/master | /attachments.py | #!/usr/bin/env python
__author__ = 'Michael Rosata mrosata1984@gmail.com'
__package__ = ''
import os
import random
import string
import pprint
from datetime import datetime
from werkzeug.utils import secure_filename as werkzeug_secure_filename
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
FILENAME_MAXLENGTH = 32
if __name__ == '__main__':
"""Tests Go Here"""
else:
class attach:
def __init__(self, app, request):
"""
Set up the attach object to upload files, probably should be
doing some sort of instanceof here.
:param app:
:type app:
:param request:
:type request:
:return:
:rtype:
"""
self.app = app
self.request = request
self.base = os.path.abspath(os.path.join(app.root_path,
self.app.config['UPLOAD_FOLDER']))
self.upload_dir = self.get_upload_directory()
def attach(self, post_name):
self.post_name = post_name
try:
return self.upload_file()
except IOError, error:
print error
return False
def upload_file(self):
""" Get the file from the POST request and pass it to the other
class methods to check and prep it, then save it local and return
path to saved file
:return:
:rtype:
"""
if self.request.method == 'POST':
_file = self.request.files[self.post_name]
if _file and file_allowed(_file.filename):
# File extension is allowable, so try to save it and any
# exception raised will be caught by self.attach()
if not self.upload_dir:
raise IOError('could not resolve dir')
filename = secure_filename(_file.filename)
image_path = os.path.join(self.upload_dir, filename)
_file.save(image_path)
# If no exception was raised, we return the image_path
return image_path.lstrip(self.base)
else:
# There was no file, or the file is not acceptable ext
return False
def get_upload_directory(self):
"""Figures out upload directory based on app config and also the
current year and month. Creates later folders if not exists
:return:
:rtype:
"""
base = self.base
year = str(datetime.utcnow().year)
month = str(datetime.utcnow().month)
full_path = os.path.join(base, year, month)
if not os.path.isdir(full_path):
try:
if not os.path.isdir(os.path.join(base, year)):
os.makedirs(full_path)
else:
os.mkdir(full_path)
except OSError:
return False
return full_path
def secure_filename(filename):
"""
This function calls werkzeugs function after first making the
filename unique for our own needs.
:param filename:
:type filename:
:return:
:rtype:
"""
base = filename.rsplit('.', 1)[0]
ext = filename.rsplit('.', 1)[1]
uid = ''.join(random.choice(string.lowercase) for i in range(32))
# Concatenate file bits and keep filename under 32
slice_ind = FILENAME_MAXLENGTH - (len(ext) + 1)
if len(base) > 16:
base = base[:16]
filename = (base + uid)[:slice_ind] + '.' + ext
return werkzeug_secure_filename(filename)
def file_allowed(filename):
"""Returns bool of whether filename fits allowed extensions based on const
with name ALLOWED_EXTENSIONS
:param filename:
:type filename:
:return:
:rtype:
"""
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS | {"/idata.py": ["/database_setup.py"], "/main.py": ["/database_setup.py", "/attachments.py", "/parsed_entry.py"]} |
63,961 | lupries/TAGN2 | refs/heads/master | /train.py | import time
import torch
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
import numpy as np
import matplotlib.pyplot as plt
def train_model(model, criterion, dataloader, optimizer, metrics, num_epochs=1):
since = time.time()
writer = SummaryWriter()
best_loss = 1e10
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
n_iter = 0
fieldnames = ['epoch', 'Train_loss', 'Test_loss'] + \
[f'Train_{m}' for m in metrics.keys()] + \
[f'Test_{m}' for m in metrics.keys()]
for epoch in range(1, num_epochs+1):
print('Epoch {}/{}'.format(epoch, num_epochs))
print('-' * 10)
batchsummary = {a: [0] for a in fieldnames}
for phase in ['Train']:
if phase == 'Train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
for sample in tqdm(iter(dataloader)):
n_iter += 1
inputs = sample['image'].to(device)
masks = sample['mask'].to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'Train'):
outputs = model(inputs)
loss = criterion(outputs['out'], masks)
y_pred = 1.0/(1+np.exp(-outputs['out'].data.cpu().numpy().ravel()))
y_true = masks.data.cpu().numpy().ravel()
for name, metric in metrics.items():
if name == 'f1_score':
score = metric(y_true > 0, y_pred > 0.5)
batchsummary[f'{phase}_{name}'].append(score)
writer.add_scalar('Accuracy/f1_score',score,n_iter)
if phase == 'Train':
loss.backward()
optimizer.step()
writer.add_scalar('Loss/Train',loss,n_iter)
best_loss = loss if loss < best_loss else best_loss
batchsummary['epoch'] = epoch
epoch_loss = loss
batchsummary[f'{phase}_loss'] = epoch_loss.item()
print('{} Loss: {:.4f}'.format(phase,loss))
for field in fieldnames[3:]:
batchsummary[field] = np.mean(batchsummary[field])
print(batchsummary)
#grid = make_grid(outputs['out'])
#writer.add_image('masks', grid, epoch)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Lowest Loss: {:4f}'.format(best_loss))
writer.close()
return model
def train_full_model(model, criterion, dataloader, optimizer, metrics, num_epochs=1):
since = time.time()
writer = SummaryWriter()
best_loss = 1e10
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
n_iter = 0
fieldnames = ['epoch', 'Train_loss', 'Test_loss'] + \
[f'Train_{m}' for m in metrics.keys()] + \
[f'Test_{m}' for m in metrics.keys()]
for epoch in range(1, num_epochs+1):
print('Epoch {}/{}'.format(epoch, num_epochs))
print('-' * 10)
batchsummary = {a: [0] for a in fieldnames}
for phase in ['Train']:
if phase == 'Train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
for sample in tqdm(iter(dataloader)):
n_iter += 1
inputs = sample['image'].to(device)
masks = sample['mask'].to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'Train'):
outputs = model(inputs)
loss = criterion(outputs, masks)
y_pred = 1.0/(1+np.exp(-outputs.data.cpu().numpy().ravel()))
y_true = masks.data.cpu().numpy().ravel()
for name, metric in metrics.items():
if name == 'f1_score':
score = metric(y_true > 0, y_pred > 0.5)
batchsummary[f'{phase}_{name}'].append(score)
writer.add_scalar('Accuracy/f1_score',score,n_iter)
if phase == 'Train':
loss.backward()
optimizer.step()
writer.add_scalar('Loss/Train',loss,n_iter)
best_loss = loss if loss < best_loss else best_loss
batchsummary['epoch'] = epoch
epoch_loss = loss
batchsummary[f'{phase}_loss'] = epoch_loss.item()
print('{} Loss: {:.4f}'.format(phase,loss))
for field in fieldnames[3:]:
batchsummary[field] = np.mean(batchsummary[field])
print(batchsummary)
#grid = make_grid(outputs['out'])
#writer.add_image('masks', grid, epoch)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Lowest Loss: {:4f}'.format(best_loss))
writer.close()
return model
def show_results(model, dataloader, number):
model.eval()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
counter = 0
max_count = int(number/dataloader.batch_size)
for sample in dataloader:
inputs = sample['image'].to(device)
masks = sample['mask'].to(device)
with torch.set_grad_enabled(False):
outputs = model(inputs)
y_pred = outputs['out'].data.cpu().numpy()
y_true = masks.data.cpu().numpy()
images = inputs.cpu().numpy()
for i in range(dataloader.batch_size):
image = images[i]
mask_pred = y_pred[i][0]
mask = y_true[i][0]
print(np.max(image[1,:,:]), np.min(mask_pred))
print(np.max(mask), np.min(mask))
print(image.shape,mask_pred.shape,mask.shape)
plt.subplot(3,1,1)
plt.imshow(image.transpose(1,2,0))
plt.subplot(3,1,2)
mask_pred = 1.0/(1+np.exp(-mask_pred))
plt.imshow(mask_pred>0.5)
plt.subplot(3,1,3)
plt.imshow(mask>0)
plt.show(block=True)
counter += 1
if counter >= max_count:
return
def show_results_img(model, dataloader, number):
model.eval()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
counter = 0
max_count = int(number/dataloader.batch_size)
for sample in dataloader:
inputs = sample['image'].to(device)
masks = sample['mask'].to(device)
with torch.set_grad_enabled(False):
outputs = model(inputs)
y_pred = outputs['out'].data.cpu().numpy()
y_true = masks.data.cpu().numpy()
images = inputs.cpu().numpy()
for i in range(dataloader.batch_size):
image = images[i]
mask_pred = y_pred[i][0]
mask = y_true[i][:]
mask = np.squeeze(np.sum(mask, axis=0))
gt_img = image + np.asarray([mask, -mask, -mask],dtype=float)*0.3
image += np.asarray([1/(1+np.exp(-mask_pred))>0.5, -1*(1/(1+np.exp(-mask_pred))>0.5), -1*(1/(1+np.exp(-mask_pred))>0.5)],dtype=float)*0.2
#print(np.max(image[0,:,:]), np.min(mask_pred))
#print(np.max(mask), np.min(mask))
print(image.shape,mask_pred.shape,mask.shape)
plt.subplot(2,1,1)
plt.imshow(image.transpose(1,2,0))
plt.subplot(2,1,2)
#mask_pred = 1.0/(1+np.exp(-mask_pred))
plt.imshow(gt_img.transpose(1,2,0))
#plt.subplot(3,1,3)
#plt.imshow(mask>0)
plt.show(block=True)
counter += 1
if counter >= max_count:
return
def show_results_graph(model, dataloader, number):
model.eval()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
counter = 0
max_count = int(number/dataloader.batch_size)
for sample in dataloader:
inputs = sample['image'].to(device)
masks = sample['mask'].to(device)
with torch.set_grad_enabled(False):
outputs = model(inputs)
y_pred = outputs.data.cpu().numpy()
y_pred_1 = model.node_states[0].cpu().numpy()
y_pred_2 = model.node_states[1].cpu().numpy()
y_true = masks.data.cpu().numpy()
images = inputs.cpu().numpy()
for i in range(dataloader.batch_size):
for j in range(dataloader.batch_size):
image = images[i,j]
mask_pred = y_pred[i,j]
mask_pred_1 = y_pred_1[i,j]
mask_pred_2 = y_pred_2[i,j]
mask = y_true[i,j]
print(np.max(image[1,:,:]), np.min(mask_pred))
print(np.max(mask), np.min(mask))
print(image.shape,mask_pred.shape,mask.shape)
plt.subplot(3,5,1+j*5)
plt.imshow(image.transpose(1,2,0))
plt.subplot(3,5,2+j*5)
mask_pred_1 = 1.0/(1+np.exp(-mask_pred_1))
plt.imshow(mask_pred_1>0.5)
plt.subplot(3,5,3+j*5)
mask_pred_2 = 1.0/(1+np.exp(-mask_pred_2))
plt.imshow(mask_pred_2>0.5)
plt.subplot(3,5,4+j*5)
mask_pred = 1.0/(1+np.exp(-mask_pred))
plt.imshow(mask_pred>0.5)
plt.subplot(3,5,5+j*5)
plt.imshow(mask>0)
plt.show(block=True)
counter += 1
if counter >= max_count:
return | {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
63,962 | lupries/TAGN2 | refs/heads/master | /datasets/__init__.py | from .dataloader import SegDataset, create_dataloader | {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
63,963 | lupries/TAGN2 | refs/heads/master | /models/attention/attention.py | import torch
import torch.nn as nn
class SelfAttention(nn.Module):
"""
Self Attention Module as described in the AGNN Framework of
Zero-Shot Video Object Segmentation via Attentive Graph Neural Networks
"""
def __init__(self, input_channels):
super(SelfAttention, self).__init__()
self.W_f = nn.Conv2d(input_channels, input_channels//4, kernel_size=1, stride=1)
self.W_h = nn.Conv2d(input_channels, input_channels//4, kernel_size=1, stride=1)
self.activation = nn.Softmax(dim=-1)
self.W_l = nn.Conv2d(input_channels, input_channels, kernel_size=1, stride=1)
self.alpha = nn.Parameter(torch.zeros(1))
def forward(self, x):
"""
calculates e_ii = alpha*(softmax((W_f conv x)(W_h conv x).T)(W_l conv x) + x
returns message m_ii for node h_i
:param x: node h_i (feature embedding)
:return: message m_ii = e_ii for that node (tensor of form W x H x C)
"""
batch, channels, height, width = x.size()
x1 = self.W_f(x).view(batch, -1, width*height).permute(0, 2, 1)
x2 = self.W_h(x).view(batch, -1, width*height)
t = torch.bmm(x1, x2)
t = self.activation(t)
x3 = self.W_l(x).view(batch, -1, width*height)
t = torch.bmm(x3, t.permute(0, 2, 1))
t = t.view(batch, channels, height, width)
t = t * self.alpha + x
return t
class InterAttention(nn.Module):
"""
Inter Attention Module as described in the AGNN Framework of
Zero-Shot Video Object Segmentation via Attentive Graph Neural Networks
"""
def __init__(self, input_features, output_features):
super(InterAttention, self).__init__()
self.W_c = nn.Linear(input_features, output_features, bias=False)
self.activation = nn.Softmax(dim=-1)
def forward(self, node1, node2):
"""
calculates e_ij = h_i * W_c * h_j.T
and m_ij = softmax(e_ij) * h_j
:param node1: node h_i (flattened to matrix form for multiplication)
:param node2: node h_j (flattened to matrix form for multiplication)
:return: message m_ij of form (WH) x C (needs to be reshaped to tensor W x H x C in main class)
"""
batch, _, height, width = node1.size()
node1_flat = node1.view(batch, -1, width*height).permute(0,2,1)
node2_flat = node2.view(batch, -1, width*height)
x = self.W_c(node1_flat)
x = torch.bmm(x, node2_flat)
x = torch.bmm(node2_flat,self.activation(x).permute(0,2,1))
return x.view(batch, -1, height, width)
class GAP(nn.Module):
def __init__(self, input_channels, output_channels):
super(GAP, self).__init__()
self.W_g = nn.Conv2d(input_channels, output_channels, kernel_size=1, stride=1, bias=True)
self.pooling = nn.AdaptiveAvgPool2d(1)
self.activation = nn.Sigmoid()
def forward(self, x):
"""
calculates sigmoid(AvgPool(W_g conv x + b_g))
:param x: input message m_ji of size W x H x C
:return: confidence g_ji with channel wise responses [0,1]xC
"""
x = self.W_g(x)
x = self.pooling(x)
x = self.activation(x)
return x
| {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
63,964 | lupries/TAGN2 | refs/heads/master | /scripts/utils/utils.py | import yaml
import glob
import os
import numpy as np
import torch
import cv2
from TAGN2.datasets.dataloader_AGNN import create_dataloader
from TAGN2.datasets.dataloader import Resize, ToTensor, Normalize
from torchvision import transforms
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
import copy
import matplotlib.pyplot as plt
def generate_masks(model, root_dir, target_dir, imageFolder, img_size, batch_size, step):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()
transform = transforms.Compose([Resize(img_size, img_size), ToTensor(), Normalize()])
info_file = open(root_dir + 'db_info.yaml', 'r')
seq_list = yaml.load(info_file)['sequences']
for seq in seq_list:
if seq['year'] == 2016 and seq['set'] == 'val':
seq_name = seq['name']
os.mkdir(os.path.join(target_dir, seq_name))
image_names = sorted(glob.glob(os.path.join(root_dir, imageFolder, seq_name, '*')))
new_image_names = []
for elem in range(len(image_names)):
if elem + step * (batch_size - 1) < len(image_names):
for frame in range(batch_size):
new_image_names.append(image_names[elem + frame * step])
else:
for frame in range(batch_size):
new_image_names.append(image_names[elem + frame * step - len(image_names)])
image_names = new_image_names
for i in range(int(len(image_names)/batch_size)):
batch = torch.zeros(1, batch_size, 3, img_size[1], img_size[0])
img = image_names[i*batch_size][-9:]
for j in range(batch_size):
if i + j > int(len(image_names) / batch_size):
break
image = cv2.imread(image_names[i*batch_size + j], 1).transpose(2, 0, 1)
image_dict = {'image': image, 'mask': image}
image_dict = transform(image_dict)
batch[0, j] = image_dict['image']
with torch.set_grad_enabled(False):
inputs = batch.to(device)
outputs = model(inputs)
mask_img = outputs[0][0].data.cpu().numpy()
mask_img = 1.0 / (1.0 + np.exp(-mask_img))
retransform = Resize((854, 480), (854, 480))
mask_img = retransform({'image': mask_img, 'mask': mask_img})
mask_img = np.asarray(mask_img['image'] > 0.5, dtype=np.uint8)
img_png = img[:-4] + '.png'
print(img_png)
cv2.imwrite(os.path.join(target_dir, seq_name, img_png), mask_img)
#plt.subplot(1, 1, 1)
#plt.imshow(mask_img)
#plt.show(block=True)
def oneshot_baseline_validation(model, criterion, optimizer, metrics, root_dir, target_dir, iterations, imageFolder,
maskFolder,
img_size, batch_size):
writer = SummaryWriter('drive/My Drive/oneshot/runs/SGD_1e-4_10iter_last_layer')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
n_iter = 0
# cache initial model weights
initial_state = copy.deepcopy(model.state_dict())
transform = transforms.Compose([Resize(img_size, img_size), ToTensor(), Normalize()])
info_file = open(root_dir + 'db_info.yaml', 'r')
seq_list = yaml.load(info_file)['sequences']
for seq in seq_list:
if seq['year'] == 2016 and seq['set'] == 'val':
seq_name = seq['name']
print(seq_name)
os.mkdir(os.path.join(target_dir, seq_name))
# load Image names and create batch of first frame
image_names = sorted(glob.glob(os.path.join(root_dir, imageFolder, seq_name, '*')))
mask_names = sorted(glob.glob(os.path.join(root_dir, maskFolder, seq_name, '*')))
img_batch = torch.zeros(batch_size, 3, img_size[1], img_size[0])
mask_batch = torch.zeros(batch_size, 1, img_size[1], img_size[0])
for j in range(batch_size):
image = cv2.imread(image_names[0], 1).transpose(2, 0, 1)
mask = cv2.imread(mask_names[0], 0)
mask = np.asarray(mask > 0).astype(float)
image_dict = {'image': image, 'mask': mask}
image_dict = transform(image_dict)
img_batch[j] = image_dict['image']
mask_batch[j] = image_dict['mask']
inputs = img_batch.to(device)
mask = mask_batch.to(device)
# do online learning on first frame
model.train()
model.load_state_dict(initial_state)
new_model_state = online_baseline_loop(inputs, mask, iterations, model, criterion, optimizer)
model.load_state_dict(new_model_state)
new_state = copy.deepcopy(model.state_dict())
# generate outputs for the whole sequence
model.eval()
for i in range(int(len(image_names) / batch_size) + 1):
n_iter += 1
img = []
img_batch = torch.zeros(batch_size, 3, img_size[1], img_size[0])
mask_batch = torch.zeros(batch_size, 1, img_size[1], img_size[0])
for j in range(batch_size):
if i * batch_size + j == int(len(image_names)):
break
img.append(image_names[i * batch_size + j][-9:])
image = cv2.imread(image_names[i * batch_size + j], 1).transpose(2, 0, 1)
mask = cv2.imread(mask_names[i * batch_size + j], 0)
mask = np.asarray(mask > 0).astype(float)
image_dict = {'image': image, 'mask': mask}
image_dict = transform(image_dict)
img_batch[j] = image_dict['image']
mask_batch[j] = image_dict['mask']
inputs = img_batch.to(device)
masks = mask_batch.to(device)
model.load_state_dict(new_state)
with torch.set_grad_enabled(False):
outputs = model(inputs)['out']
for j in range(int(outputs.shape[0])):
if i * batch_size + j == int(len(image_names)):
break
mask_img = outputs[j].data.cpu().numpy()
mask_img = 1.0 / (1.0 + np.exp(-mask_img))
retransform = Resize((854, 480), (854, 480))
mask_img = retransform({'image': mask_img, 'mask': mask_img})
mask_img = np.asarray(mask_img['image'] > 0.5, dtype=np.uint8)
img_png = img[j][:-4] + '.png'
cv2.imwrite(os.path.join(target_dir, seq_name, img_png), mask_img)
# classify batch with refined model
loss_refined, score_refined = classify_baseline_sequence(inputs, masks, model, criterion, metrics)
# classify batch with unrefined model (comparison)
model.load_state_dict(initial_state)
loss_unrefined, score_unrefined = classify_baseline_sequence(inputs, masks, model, criterion, metrics)
# log to tensorboard
writer.add_scalars('Accuracy/f1_score', {'refined': score_refined,
'unrefined': score_unrefined,
'gain': score_refined - score_unrefined}, n_iter)
writer.add_scalars('Loss', {'refined': loss_refined,
'unrefined': loss_unrefined,
'gain': loss_refined - loss_unrefined}, n_iter)
writer.close()
model.load_state_dict(initial_state)
def online_baseline_loop(inputs, masks, iterations, model, criterion, optimizer):
writer = SummaryWriter('online_plots')
for i in range(iterations):
optimizer.zero_grad()
with torch.set_grad_enabled(True):
outputs = model(inputs)['out']
loss = criterion(outputs, masks)
print('online loss:' + str(loss))
loss.backward()
optimizer.step()
writer.add_scalars('Loss', {'loss': loss}, i)
writer.close()
return model.state_dict()
def classify_baseline_sequence(frames, masks, model, criterion, metrics):
with torch.set_grad_enabled(False):
outputs = model(frames)['out']
loss = criterion(outputs, masks)
y_pred = 1.0 / (1 + np.exp(-outputs.data.cpu().numpy().ravel()))
y_true = masks.data.cpu().numpy().ravel()
for name, metric in metrics.items():
if name == 'f1_score':
score = metric(y_true > 0, y_pred > 0.5)
return loss, score
def standard_oneshot_validation(model, criterion, optimizer, metrics, root_dir, target_dir, iterations, imageFolder,
maskFolder, img_size, batch_size, step):
writer = SummaryWriter('drive/My Drive/oneshot/runs/TAGNN_SGD_1e-5_5iter_ASPP')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
n_iter = 0
# cache initial model weights
initial_state = copy.deepcopy(model.state_dict())
transform = transforms.Compose([Resize(img_size, img_size), ToTensor(), Normalize()])
info_file = open(root_dir + 'db_info.yaml', 'r')
seq_list = yaml.load(info_file)['sequences']
for seq in seq_list:
if seq['year'] == 2016 and seq['set'] == 'val':
seq_name = seq['name']
print(seq_name)
os.mkdir(os.path.join(target_dir, seq_name))
image_names = sorted(glob.glob(os.path.join(root_dir, imageFolder, seq_name, '*')))
new_image_names = []
mask_names = sorted(glob.glob(os.path.join(root_dir, maskFolder, seq_name, '*')))
new_mask_names = []
# create sequence dataset {{x, x+10, x+20},{x+1, x+1+10, x+1+20},...}
for elem in range(len(image_names)):
if elem + step * (batch_size - 1) < len(image_names):
for frame in range(batch_size):
new_image_names.append(image_names[elem + frame * step])
new_mask_names.append(mask_names[elem + frame*step])
else:
for frame in range(batch_size):
new_image_names.append(image_names[elem + frame * step - len(image_names)])
new_mask_names.append(mask_names[elem + frame * step - len(image_names)])
image_names = new_image_names
mask_names = new_mask_names
# create batch for online training, frames: {{0,10,20},{0,10,20},{0,10,20}}
img_batch = torch.zeros(3, batch_size, 3, img_size[1], img_size[0])
mask_batch = torch.zeros(3, batch_size, 1, img_size[1], img_size[0])
for i in range(img_batch.shape[0]):
for j in range(batch_size):
image = cv2.imread(image_names[j], 1).transpose(2, 0, 1)
mask = cv2.imread(mask_names[0], 0)
mask = np.asarray(mask > 0).astype(float)
image_dict = {'image': image, 'mask': mask}
image_dict = transform(image_dict)
img_batch[i, j] = image_dict['image']
mask_batch[i, j] = image_dict['mask']
inputs = img_batch.to(device)
mask = mask_batch.to(device)
# do oneshot training
model.train()
model.load_state_dict(initial_state)
new_model_state = standard_online_loop(inputs, mask, iterations, model, criterion, optimizer)
model.load_state_dict(new_model_state)
new_state = copy.deepcopy(model.state_dict())
model.eval()
# classify the rest of the sequence
for i in range(int(len(image_names) / (batch_size*batch_size)) + 1):
n_iter += 1
img = []
# create batch of data
img_batch = torch.zeros(3, batch_size, 3, img_size[1], img_size[0])
mask_batch = torch.zeros(3, batch_size, 1, img_size[1], img_size[0])
for k in range(img_batch.shape[0]):
for j in range(batch_size):
if i * batch_size*batch_size + k * batch_size + j >= int(len(image_names)):
break
img.append(image_names[i * batch_size*batch_size + k*batch_size + j][-9:])
image = cv2.imread(image_names[i * batch_size*batch_size + k*batch_size + j], 1).transpose(2, 0, 1)
mask = cv2.imread(mask_names[i * batch_size*batch_size + k*batch_size + j], 0)
mask = np.asarray(mask > 0).astype(float)
image_dict = {'image': image, 'mask': mask}
image_dict = transform(image_dict)
img_batch[k, j] = image_dict['image']
mask_batch[k, j] = image_dict['mask']
inputs = img_batch.to(device)
masks = mask_batch.to(device)
model.load_state_dict(new_state)
# generate mask for each of the first elements in the batch
with torch.set_grad_enabled(False):
outputs = model(inputs)
for j in range(int(outputs.shape[0])):
if j*batch_size >= int(len(img)):
break
mask_img = outputs[j][0].data.cpu().numpy()
mask_img = 1.0 / (1.0 + np.exp(-mask_img))
retransform = Resize((854, 480), (854, 480))
mask_img = retransform({'image': mask_img, 'mask': mask_img})
mask_img = np.asarray(mask_img['image'] > 0.5, dtype=np.uint8)
img_png = img[j*batch_size][:-4] + '.png'
print(img_png)
cv2.imwrite(os.path.join(target_dir, seq_name, img_png), mask_img)
#plt.subplot(1, 1, 1)
#plt.imshow(mask_img)
#plt.show(block=True)
# classify batch with refined model
loss_refined, score_refined = classify_baseline_sequence(inputs, masks, model, criterion, metrics)
# classify batch with unrefined model (comparison)
model.load_state_dict(initial_state)
loss_unrefined, score_unrefined = classify_baseline_sequence(inputs, masks, model, criterion, metrics)
# log to tensorboard
writer.add_scalars('Accuracy/f1_score', {'refined': score_refined,
'unrefined': score_unrefined,
'gain': score_refined - score_unrefined}, n_iter)
writer.add_scalars('Loss', {'refined': loss_refined,
'unrefined': loss_unrefined,
'gain': loss_refined - loss_unrefined}, n_iter)
writer.close()
model.load_state_dict(initial_state)
def standard_online_loop(inputs, masks, iterations, model, criterion, optimizer):
writer = SummaryWriter('online_plots')
for i in range(iterations):
optimizer.zero_grad()
with torch.set_grad_enabled(True):
outputs = model(inputs)[:, 0]
loss = criterion(outputs.unsqueeze(1), masks[:, 0])
print('online loss:' + str(loss))
loss.backward()
optimizer.step()
writer.add_scalars('Loss', {'loss': loss}, i)
writer.close()
return model.state_dict()
def classify_sequence(frames, masks, model, criterion, metrics):
with torch.set_grad_enabled(False):
outputs = model(frames)
loss = criterion(outputs.unsqueeze(2), masks)
y_pred = 1.0 / (1 + np.exp(-outputs.data.cpu().numpy().ravel()))
y_true = masks.data.cpu().numpy().ravel()
for name, metric in metrics.items():
if name == 'f1_score':
score = metric(y_true > 0, y_pred > 0.5)
return loss, score
def repeat_oneshot_validation(model, criterion, optimizer, metrics, root_dir, target_dir, iterations, imageFolder,
maskFolder, img_size, batch_size, step):
writer = SummaryWriter('drive/My Drive/oneshot/runs/TAGNN_SGD_1e-5_2iter')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
n_iter = 0
# cache initial model weights
initial_state = copy.deepcopy(model.state_dict())
transform = transforms.Compose([Resize(img_size, img_size), ToTensor(), Normalize()])
info_file = open(root_dir + 'db_info.yaml', 'r')
seq_list = yaml.load(info_file)['sequences']
for seq in seq_list:
if seq['year'] == 2016 and seq['set'] == 'val':
seq_name = seq['name']
print(seq_name)
os.mkdir(os.path.join(target_dir, seq_name))
image_names = sorted(glob.glob(os.path.join(root_dir, imageFolder, seq_name, '*')))
new_image_names = []
mask_names = sorted(glob.glob(os.path.join(root_dir, maskFolder, seq_name, '*')))
new_mask_names = []
# create sequence dataset {{x, x+10, x+20},{x+1, x+1+10, x+1+20},...}
for elem in range(len(image_names)):
if elem + step * (batch_size - 1) < len(image_names):
for frame in range(batch_size):
new_image_names.append(image_names[elem + frame * step])
new_mask_names.append(mask_names[elem + frame*step])
else:
for frame in range(batch_size):
new_image_names.append(image_names[elem + frame * step - len(image_names)])
new_mask_names.append(mask_names[elem + frame * step - len(image_names)])
image_names = new_image_names
mask_names = new_mask_names
# iterate through sequence
for i in range(int(len(image_names) / (batch_size*batch_size)) + 1):
n_iter += 1
img = []
# create batch
img_batch = torch.zeros(3, batch_size, 3, img_size[1], img_size[0])
mask_batch = torch.zeros(3, batch_size, 1, img_size[1], img_size[0])
for k in range(img_batch.shape[0]):
for j in range(batch_size):
if i * batch_size*batch_size + k * batch_size + j >= int(len(image_names)):
break
img.append(image_names[i * batch_size*batch_size + k*batch_size + j][-9:])
image = cv2.imread(image_names[i * batch_size*batch_size + k*batch_size + j], 1).transpose(2, 0, 1)
mask = cv2.imread(mask_names[i * batch_size*batch_size + k*batch_size + j], 0)
mask = np.asarray(mask > 0).astype(float)
image_dict = {'image': image, 'mask': mask}
image_dict = transform(image_dict)
img_batch[k, j] = image_dict['image']
mask_batch[k, j] = image_dict['mask']
# change every last element to the first frame
for j in range(batch_size):
image = cv2.imread(image_names[0], 1).transpose(2, 0, 1)
mask = cv2.imread(mask_names[0], 0)
mask = np.asarray(mask > 0).astype(float)
image_dict = {'image': image, 'mask': mask}
image_dict = transform(image_dict)
img_batch[j, 2] = image_dict['image']
mask_batch[j, 2] = image_dict['mask']
inputs = img_batch.to(device)
masks = mask_batch.to(device)
# do online training
model.train()
model.load_state_dict(initial_state)
new_model_state = repeat_online_loop(inputs, masks, iterations, model, criterion, optimizer)
model.load_state_dict(new_model_state)
new_state = copy.deepcopy(model.state_dict())
model.load_state_dict(new_state)
# generate output masks for first frame for every element in the batch
model.eval()
with torch.set_grad_enabled(False):
outputs = model(inputs)
for j in range(int(outputs.shape[0])):
if j*batch_size >= int(len(img)):
break
mask_img = outputs[j][0].data.cpu().numpy()
mask_img = 1.0 / (1.0 + np.exp(-mask_img))
retransform = Resize((854, 480), (854, 480))
mask_img = retransform({'image': mask_img, 'mask': mask_img})
mask_img = np.asarray(mask_img['image'] > 0.5, dtype=np.uint8)
img_png = img[j*batch_size][:-4] + '.png'
print(img_png)
cv2.imwrite(os.path.join(target_dir, seq_name, img_png), mask_img)
#plt.subplot(1, 1, 1)
#plt.imshow(mask_img)
#plt.show(block=True)
# classify batch with refined model
loss_refined, score_refined = classify_baseline_sequence(inputs, masks, model, criterion, metrics)
# classify batch with unrefined model (comparison)
model.load_state_dict(initial_state)
loss_unrefined, score_unrefined = classify_baseline_sequence(inputs, masks, model, criterion, metrics)
# log to tensorboard
writer.add_scalars('Accuracy/f1_score', {'refined': score_refined,
'unrefined': score_unrefined,
'gain': score_refined - score_unrefined}, n_iter)
writer.add_scalars('Loss', {'refined': loss_refined,
'unrefined': loss_unrefined,
'gain': loss_refined - loss_unrefined}, n_iter)
writer.close()
model.load_state_dict(initial_state)
def repeat_online_loop(inputs, masks, iterations, model, criterion, optimizer):
writer = SummaryWriter('online_plots')
for i in range(iterations):
optimizer.zero_grad()
with torch.set_grad_enabled(True):
outputs = model(inputs)[:, 2]
loss = criterion(outputs.unsqueeze(1), masks[:, 2])
print('online loss:' + str(loss))
loss.backward()
optimizer.step()
writer.add_scalars('Loss', {'loss': loss}, i)
writer.close()
return model.state_dict()
def past_oneshot_validation(model, criterion, optimizer, metrics, root_dir, target_dir, iterations, imageFolder,
maskFolder, img_size, batch_size, step):
writer = SummaryWriter('drive/My Drive/oneshot/runs/TAGNN_SGD_1e-5_2iter')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
n_iter = 0
# cache initial model weights
initial_state = copy.deepcopy(model.state_dict())
transform = transforms.Compose([Resize(img_size, img_size), ToTensor(), Normalize()])
info_file = open(root_dir + 'db_info.yaml', 'r')
seq_list = yaml.load(info_file)['sequences']
for seq in seq_list:
if seq['year'] == 2016 and seq['set'] == 'val':
seq_name = seq['name']
print(seq_name)
os.mkdir(os.path.join(target_dir, seq_name))
image_names = sorted(glob.glob(os.path.join(root_dir, imageFolder, seq_name, '*')))
new_image_names = []
mask_names = sorted(glob.glob(os.path.join(root_dir, maskFolder, seq_name, '*')))
new_mask_names = []
# create sequence dataset {{x, x+10, x+20},{x+1, x+1+10, x+1+20},...}
for elem in range(len(image_names)):
if elem + step * (batch_size - 1) < len(image_names):
for frame in range(batch_size):
new_image_names.append(image_names[elem + frame * step])
new_mask_names.append(mask_names[elem + frame * step])
else:
for frame in range(batch_size):
new_image_names.append(image_names[elem + frame * step - len(image_names)])
new_mask_names.append(mask_names[elem + frame * step - len(image_names)])
image_names = new_image_names
mask_names = new_mask_names
# iterate through sequence
for i in range(int(len(image_names) / (batch_size * batch_size)) + 1):
n_iter += 1
img = []
# create batch
img_batch = torch.zeros(3, batch_size, 3, img_size[1], img_size[0])
mask_batch = torch.zeros(3, batch_size, 1, img_size[1], img_size[0])
for k in range(img_batch.shape[0]):
for j in range(batch_size):
if i * batch_size * batch_size + k * batch_size + j >= int(len(image_names)):
break
img.append(image_names[i * batch_size * batch_size + k * batch_size + j][-9:])
image = cv2.imread(image_names[i * batch_size * batch_size + k * batch_size + j], 1).transpose(
2, 0, 1)
mask = cv2.imread(mask_names[i * batch_size * batch_size + k * batch_size + j], 0)
mask = np.asarray(mask > 0).astype(float)
image_dict = {'image': image, 'mask': mask}
image_dict = transform(image_dict)
img_batch[k, j] = image_dict['image']
mask_batch[k, j] = image_dict['mask']
# change every last element to the first frame
for j in range(batch_size):
image = cv2.imread(image_names[0], 1).transpose(2, 0, 1)
mask = cv2.imread(mask_names[0], 0)
mask = np.asarray(mask > 0).astype(float)
image_dict = {'image': image, 'mask': mask}
image_dict = transform(image_dict)
img_batch[j, 2] = image_dict['image']
mask_batch[j, 2] = image_dict['mask']
# for every batch except the first change the middle element to the past frame and its label to its
# predicted mask
if i != 0:
for j in range(batch_size):
image = cv2.imread(
image_names[i * batch_size * batch_size + j * batch_size - batch_size * batch_size],
1).transpose(2, 0, 1)
image_dict = {'image': image, 'mask': image}
image_dict = transform(image_dict)
img_batch[j, 1] = image_dict['image']
mask_batch[j, 1] = input_masks[j]
else:
for j in range(batch_size):
image = cv2.imread(image_names[0], 1).transpose(2, 0, 1)
mask = cv2.imread(mask_names[0], 0)
mask = np.asarray(mask > 0).astype(float)
image_dict = {'image': image, 'mask': mask}
image_dict = transform(image_dict)
img_batch[j, 1] = image_dict['image']
mask_batch[j, 1] = image_dict['mask']
inputs = img_batch.to(device)
masks = mask_batch.to(device)
# do online training
model.train()
model.load_state_dict(initial_state)
new_model_state = past_online_loop(inputs, masks, iterations, model, criterion, optimizer)
model.load_state_dict(new_model_state)
new_state = copy.deepcopy(model.state_dict())
model.load_state_dict(new_state)
#save past masks
input_masks = torch.zeros(3, 1, img_size[1], img_size[0])
# generate output masks for first frame for every element in the batch
model.eval()
with torch.set_grad_enabled(False):
outputs = model(inputs)
for j in range(int(outputs.shape[0])):
if j * batch_size >= int(len(img)):
break
input_masks[j] = 1.0 / (1.0 + torch.exp(-outputs[j][1].unsqueeze(0))) > 0.5
mask_img = outputs[j][0].data.cpu().numpy()
mask_img = 1.0 / (1.0 + np.exp(-mask_img))
retransform = Resize((854, 480), (854, 480))
mask_img = retransform({'image': mask_img, 'mask': mask_img})
mask_img = np.asarray(mask_img['image'] > 0.5, dtype=np.uint8)
img_png = img[j * batch_size][:-4] + '.png'
print(img_png)
cv2.imwrite(os.path.join(target_dir, seq_name, img_png), mask_img)
#plt.subplot(1, 1, 1)
#plt.imshow(mask_img)
#plt.show(block=True)
# classify batch with refined model
loss_refined, score_refined = classify_baseline_sequence(inputs, masks, model, criterion, metrics)
# classify batch with unrefined model (comparison)
model.load_state_dict(initial_state)
loss_unrefined, score_unrefined = classify_baseline_sequence(inputs, masks, model, criterion, metrics)
# log to tensorboard
writer.add_scalars('Accuracy/f1_score', {'refined': score_refined,
'unrefined': score_unrefined,
'gain': score_refined - score_unrefined}, n_iter)
writer.add_scalars('Loss', {'refined': loss_refined,
'unrefined': loss_unrefined,
'gain': loss_refined - loss_unrefined}, n_iter)
writer.close()
model.load_state_dict(initial_state)
def past_online_loop(inputs, masks, iterations, model, criterion, optimizer):
writer = SummaryWriter('online_plots')
for i in range(iterations):
optimizer.zero_grad()
with torch.set_grad_enabled(True):
outputs = model(inputs)[:, 1:3]
loss = criterion(outputs.unsqueeze(2), masks[:, 1:3])
print('online loss:' + str(loss))
loss.backward()
optimizer.step()
writer.add_scalars('Loss', {'loss': loss}, i)
writer.close()
return model.state_dict()
def optimize_attention(image, mask, model, seq):
model.eval()
B, F, C, H, W = image.shape
mask = mask.squeeze()
out = model.get_features(image, mask)
mask = out['mask'][seq]
X = out['feature'][seq]
N, C = X.shape
X_X = torch.mm(X, X.T)
X_X = X_X[mask > 0]
X_X = X_X[:, mask > 0]
N, _ = X_X.shape
# X_X[XX_mask<=1] *= 0 # similarity between feature in and out mask should be low
# X_X[XX_mask==0] = 0 # similarity between background features should be neglegted
W = torch.mm(torch.mm(torch.ones(C, N).cuda(), X_X), torch.ones(N, C).cuda())
W = W / torch.sum(W)
return W | {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
63,965 | lupries/TAGN2 | refs/heads/master | /models/graphnet/AttentiveGraphNeuralNetwork.py | import torch
import torch.nn as nn
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops, degree
import numpy as np
from ..convgru import ConvGRU, ConvGRUCell
from ..attention import SelfAttention, InterAttention, GAP
class AGNN(MessagePassing):
"""
Graph Neural Network with Attention Modules for Message Passing and Convolutional GRU for Node Update
as described in Zero-Shot Video Object Segmentation via Attentive Graph Neural Networks
"""
def __init__(self, loops, channels, num_nodes, edge_index=None):
super(AGNN, self).__init__(aggr='mean')
self.loops = loops
if edge_index is None:
edge_index = create_fully_connected(num_nodes)
if torch.cuda.is_available():
edge_index = edge_index.cuda()
self.edge_index = edge_index
# Attention Modules
self.intraAttention = SelfAttention(channels)
self.interAttention = InterAttention(channels, channels)
self.gate = nn.Sequential(
nn.Conv2d(256, 1, kernel_size=1, bias=False),
nn.Sigmoid()
)
#GAP(channels, channels)
# Convolutional Gated Recurrent Unit
self.convGRU = ConvGRUCell(channels, channels, 3)
self.hidden = None
self.hidden_states = []
def forward(self, x):
# x has shape [N, W, H, C]
self.hidden_states = []
# Propagate messages
for itr in range(self.loops):
x = self.propagate(edge_index=self.edge_index, x=x)
return x
def message(self, x_i, x_j, edge_index):
# x_j and x_j have shape [E, C, W, H]
# edge_index has shape [2, E]
mask_selfAtt = edge_index[0] == edge_index[1]
x_i_selfAtt, x_j_selfAtt = x_i[mask_selfAtt], x_j[mask_selfAtt]
x_i_interAtt, x_j_interAtt = x_i[mask_selfAtt==False], x_j[mask_selfAtt==False]
assert (x_i_selfAtt == x_j_selfAtt).all()
msg = torch.zeros_like(x_i)
# Intra-Attention messages
msg[mask_selfAtt] = self.intraAttention(x_i_selfAtt) - x_i_selfAtt
# Inter-Attention messages
msg[mask_selfAtt==False] = self.interAttention(x_i_interAtt, x_j_interAtt)
# Gate
gate_multiplier = self.gate(msg)
msg = msg * gate_multiplier
return msg
def update(self, aggr_out):
# aggr_out has shape [N, C, H, W]
self.hidden_states.append(self.hidden)
self.hidden = self.convGRU.forward(aggr_out, self.hidden)
# Return new node embeddings.
return self.hidden
def create_fully_connected(num_nodes=3):
"""
Returns assignement matrix for fully-connected graph including self-loops
"""
first_row = torch.tensor([])
second_row = torch.tensor([])
for num in range(num_nodes):
first_row = torch.cat([first_row, torch.ones(1,num_nodes)*num],1)
second_row = torch.cat([second_row, torch.tensor(np.arange(num_nodes)).float()])
edge_index = torch.cat([first_row, second_row.view(-1,second_row.size()[0])])
return edge_index.long() | {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
63,966 | lupries/TAGN2 | refs/heads/master | /models/TAGNN.py | import torch.nn as nn
from torch.nn import functional as F
from torchvision import models
import torch
from torchvision import transforms
from .graphnet import AGNN
from .graphnet import create_fully_connected
class TAGNN(nn.Module):
def __init__(self, loops, num_nodes):
super(TAGNN, self).__init__()
deeplab = models.segmentation.deeplabv3_resnet50(pretrained=False)
self.backbone = deeplab.backbone
self.graph = AGNN(loops=loops, channels=2048, num_nodes=num_nodes)
self.readout = models.segmentation.deeplabv3.DeepLabHead(2048, num_classes=1)
def forward(self, x):
# reset hidden state for Gated Recurrent Units in Graph Update function
if self.graph.hidden is not None:
self.graph.hidden = None
input_shape = x.shape[-2:]
features = self.backbone(x)
x = features['out']
x = self.graph(x)
x = self.readout(x)
return F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
class TAGNN_batch(nn.Module):
def __init__(self, loops, frames, batch_size, backbone=None):
super(TAGNN_batch, self).__init__()
edge_index = create_fully_connected(frames)
new_edge_index = edge_index
for i in range(1,batch_size):
new_edge_index = torch.cat((new_edge_index,edge_index + torch.ones_like(edge_index) * i * frames),dim=1)
if backbone is not None:
deeplab = backbone
else:
deeplab = models.segmentation.deeplabv3_resnet50(pretrained=False)
deeplab.classifier = models.segmentation.deeplabv3.DeepLabHead(2048, num_classes=1)
self.backbone = deeplab.backbone
self.deeplabhead = deeplab.classifier
self.ASPP = nn.Sequential(
self.deeplabhead[0],
self.deeplabhead[1],
self.deeplabhead[2],
self.deeplabhead[3]
)
self.graph = AGNN(loops=loops, channels=256, num_nodes=frames, edge_index=new_edge_index.cuda())
self.classifier = nn.Sequential(
nn.Dropout2d(0.1),
nn.Conv2d(256*2, 256, kernel_size=1, padding=0, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
self.deeplabhead[4]
)
self.node_states = []
def forward(self, x):
input_shape = x.shape[-2:]
frames = x.shape[1]
# backbone (feature extraction)
features = self.encode(x,frames)
# flatten batches for graph
batch, frames, channel, height, width = features.shape
x = features.view(-1, channel, height, width)
# set node states
self.graph.hidden = x
# graphnet (attention mechanism)
x = self.graph(x)
# list intermediate node states and pass through readout
self.node_states = []
for state in self.graph.hidden_states:
self.node_states.append(self.readout(state.view(features.shape),features,input_shape,frames))
# reshape (unflatten batches)
x = x.view(features.shape)
# readout (pixelwise classification)
out = self.readout(x, features, input_shape, frames)
return out
def encode(self, x, frames):
features = torch.Tensor().cuda()
for frame in range(frames):
frame_feature = self.backbone(x[:,frame])['out']
frame_feature = self.ASPP(frame_feature)
frame_feature = frame_feature.unsqueeze(1)
features = torch.cat((features, frame_feature),dim=1)
return features
def readout(self, x, res_x, input_shape, frames):
x = torch.cat((x,res_x),dim=2)
out = torch.Tensor().cuda()
for frame in range(frames):
frame_out = self.classifier(x[:,frame])
frame_out = F.interpolate(frame_out, size=input_shape, mode='bilinear', align_corners=False)
out = torch.cat((out,frame_out),dim=1)
return out
def get_attention_map(self, x, mask):
input_shape = x.shape[-2:]
frames = x.shape[1]
# backbone (feature extraction)
features = self.encode(x,frames)
# flatten batches for graph
batch, frames, channel, height, width = features.shape
node1 = features[:,0]
node2 = features[:,1]
resize = transforms.Resize((height,width))
toPIL = transforms.ToPILImage()
toTensor = transforms.ToTensor()
mask1 = toTensor(resize(toPIL(mask[:,0].cpu()))).cuda()
print(mask1.shape)
node1_flat = node1.view(batch, -1, width*height).permute(0,2,1)
node2_flat = node2.view(batch, -1, width*height)
mask1_flat = mask1.view(batch, width*height)
print("node: "+str(node1_flat.shape)+" mask: "+str(mask1_flat.shape))
x = self.graph.interAttention.W_c(node1_flat)
x = torch.bmm(x, node2_flat)
soft_att = x
x = self.graph.interAttention.activation(x)
out = torch.zeros(batch,width*height)
out_soft = torch.zeros(batch,width*height)
for i in range(batch):
#out[i] = torch.sum(x[i][mask1_flat[i]>0],dim=0)
out[i] = torch.sum((x[i][mask1_flat[i]>0])[100:102],dim=0)
out_soft[i] = torch.sum((soft_att[i][mask1_flat[i]>0])[100:102],dim=0)
mask_add = torch.zeros_like(mask1_flat[i][mask1_flat[i]>0])
mask_add[100:102] = torch.ones_like(mask_add[100:102])
mask1_flat[i][mask1_flat[i]>0] = mask_add
return {'attention': F.interpolate(out.view(batch, height, width).unsqueeze(1), size=input_shape, mode='bilinear', align_corners=False).squeeze(1), 'focus': F.interpolate(mask1_flat.view(batch, height, width).unsqueeze(1), size=input_shape, mode='bilinear', align_corners=False).squeeze(1), 'soft_attention': F.interpolate(out_soft.view(batch, height, width).unsqueeze(1), size=input_shape, mode='bilinear', align_corners=False).squeeze(1)}
def get_features(self, x, mask):
input_shape = x.shape[-2:]
frames = x.shape[1]
# backbone (feature extraction)
features = self.encode(x,frames)
# flatten batches for graph
batch, frames, channel, height, width = features.shape
node1 = features[:,0]
node2 = features[:,1]
resize = transforms.Resize((height,width))
toPIL = transforms.ToPILImage()
toTensor = transforms.ToTensor()
mask1 = toTensor(resize(toPIL(mask[:,0].cpu()))).cuda()
node1_flat = node1.view(batch, -1, width*height).permute(0,2,1)
node2_flat = node2.view(batch, -1, width*height)
mask1_flat = mask1.view(batch, width*height)
return {'feature': node1_flat, 'mask': mask1_flat} | {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
63,967 | lupries/TAGN2 | refs/heads/master | /datasets/dataloader_AGNN.py | import glob
import yaml
import os
from torch.utils.data import Dataset, DataLoader, SequentialSampler
import cv2
import torch
from torchvision import transforms, utils
import numpy as np
from .dataloader import SegDataset
class SegDataset_AGNN(SegDataset):
def __len__(self):
return int(len(self.image_names)/self.batch_size)
def __getitem__(self, idx):
img_names = self.image_names[idx*self.batch_size:(idx+1)*self.batch_size]
images = []
for img_name in img_names:
if self.imagecolorflag:
image = cv2.imread(
img_name, self.imagecolorflag).transpose(2, 0, 1)
else:
image = cv2.imread(img_name, self.imagecolorflag)
images.append(image)
msk_names = self.mask_names[idx*self.batch_size:(idx+1)*self.batch_size]
masks = []
for msk_name in msk_names:
if self.maskcolorflag:
mask = cv2.imread(msk_name, self.maskcolorflag).transpose(2, 0, 1)
else:
mask = cv2.imread(msk_name, self.maskcolorflag)
mask = np.asarray(mask>0).astype(float)
masks.append(mask)
sample = {'image': np.asarray(images), 'mask': np.asarray(masks)}
if self.transform:
sample = self.transform(sample)
return sample
class Resize(object):
"""Resize image and/or masks."""
def __init__(self, imageresize, maskresize):
self.imageresize = imageresize
self.maskresize = maskresize
def __call__(self, sample):
images, masks = sample['image'], sample['mask']
if len(images.shape) == 4:
images = images.transpose(0, 2, 3, 1)
if len(masks.shape) == 4:
masks = masks.transpose(0, 2, 3, 1)
new_masks = np.zeros((masks.shape[0], self.maskresize[1], self.maskresize[0]))
new_images = np.zeros((images.shape[0], self.imageresize[1], self.imageresize[0], images.shape[3]))
for i in range(images.shape[0]):
new_masks[i] = cv2.resize(masks[i], self.maskresize, cv2.INTER_AREA)
new_images[i] = cv2.resize(images[i], self.imageresize, cv2.INTER_AREA)
if len(new_images.shape) == 4:
new_images = new_images.transpose(0, 3, 1, 2)
if len(new_masks.shape) == 4:
new_masks = new_masks.transpose(0, 3, 1, 2)
return {'image': new_images,
'mask': new_masks}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, maskresize=None, imageresize=None):
image, mask = sample['image'], sample['mask']
if len(mask.shape) == 2:
mask = mask.reshape((1,)+mask.shape)
if len(image.shape) == 2:
image = image.reshape((1,)+image.shape)
return {'image': torch.from_numpy(image),
'mask': torch.from_numpy(mask)}
class Normalize(object):
'''Normalize image'''
def __call__(self, sample):
image, mask = sample['image'], sample['mask']
return {'image': image.type(torch.FloatTensor)/255,
'mask': mask.type(torch.FloatTensor)}
def create_dataloader(data_dir, imageFolder, maskFolder, size = (256,256), fraction=None, subset='train', batch_size=4, step=5):
data_transforms = transforms.Compose([Resize(size, size), ToTensor(), Normalize()])
image_dataset = SegDataset_AGNN(data_dir, transform=data_transforms, imageFolder=imageFolder, maskFolder=maskFolder, subset=subset, batch_size=batch_size, step=step)
sampler = SequentialSampler(image_dataset)
dataloader = DataLoader(image_dataset, sampler=sampler, batch_size=batch_size, num_workers=8)
return dataloader
| {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
63,968 | lupries/TAGN2 | refs/heads/master | /datasets/dataloader.py | import glob
import yaml
import os
from torch.utils.data import Dataset, DataLoader, SequentialSampler
import cv2
import torch
from torchvision import transforms, utils
import numpy as np
class SegDataset(Dataset):
"""Segmentation Dataset"""
def __init__(self, root_dir, imageFolder, maskFolder, transform=None, seed=None, fraction=None, subset=None, imagecolormode='rgb', maskcolormode='grayscale', batch_size=3, step=5):
"""
Args:
root_dir (string): Directory with all the images and should have the following structure.
root
--Images
-----Img 1
-----Img N
--Mask
-----Mask 1
-----Mask N
imageFolder (string) = 'Images' : Name of the folder which contains the Images.
maskFolder (string) = 'Masks : Name of the folder which contains the Masks.
transform (callable, optional): Optional transform to be applied on a sample.
seed: Specify a seed for the train and test split
fraction: A float value from 0 to 1 which specifies the validation split fraction
subset: 'Train' or 'Test' to select the appropriate set.
imagecolormode: 'rgb' or 'grayscale'
maskcolormode: 'rgb' or 'grayscale'
"""
self.color_dict = {'rgb': 1, 'grayscale': 0}
assert(imagecolormode in ['rgb', 'grayscale'])
assert(maskcolormode in ['rgb', 'grayscale'])
self.imagecolorflag = self.color_dict[imagecolormode]
self.maskcolorflag = self.color_dict[maskcolormode]
self.root_dir = root_dir
self.transform = transform
self.batch_size = batch_size
seq_file = open(root_dir+'db_info.yaml','r')
sequences = yaml.load(seq_file)['sequences']
self.sequences = [seq['name'] for seq in sequences if (seq['year']==2016 and subset == seq['set'])]
print(self.sequences)
self.image_names = []
self.mask_names = []
if not fraction:
for seq in self.sequences:
image_names = sorted(
glob.glob(os.path.join(self.root_dir, imageFolder, seq, '*')))
mask_names = sorted(
glob.glob(os.path.join(self.root_dir, maskFolder, seq, '*')))
if len(mask_names)<len(image_names):
print(mask_names)
mask = mask_names[0]
mask_names = [mask for i in range(len(image_names))]
print(mask_names)
new_image_names = []
new_mask_names = []
for elem in range(len(image_names)):
if elem + step * (batch_size-1) < len(image_names):
for frame in range(batch_size):
new_image_names.append(image_names[elem + frame * step])
new_mask_names.append(mask_names[elem + frame * step])
else:
for frame in range(batch_size):
new_image_names.append(image_names[elem + frame*step - len(image_names)])
new_mask_names.append(mask_names[elem + frame*step - len(image_names)])
self.image_names += new_image_names
self.mask_names += new_mask_names
if subset is not 'val':
new_image_names = []
new_mask_names = []
indices = np.arange(0, len(self.image_names), batch_size)
np.random.shuffle(indices)
for elem in indices:
for i in range(0, batch_size):
if elem + i < len(self.image_names):
new_image_names.append(self.image_names[elem + i])
new_mask_names.append(self.mask_names[elem + i])
self.image_names = new_image_names
self.mask_names = new_mask_names
else:
assert(subset in ['Train', 'Test'])
self.fraction = fraction
self.image_list = []
self.mask_list = []
for seq in self.sequences:
image_list = np.array(
sorted(glob.glob(os.path.join(self.root_dir, imageFolder, seq, '*'))))
mask_list = np.array(
sorted(glob.glob(os.path.join(self.root_dir, maskFolder, seq, '*'))))
self.image_list += image_list
self.mask_list += mask_list
if seed:
np.random.seed(seed)
indices = np.arange(len(self.image_list))
np.random.shuffle(indices)
self.image_list = self.image_list[indices]
self.mask_list = self.mask_list[indices]
if subset == 'Train':
self.image_names = self.image_list[:int(
np.ceil(len(self.image_list)*(1-self.fraction)))]
self.mask_names = self.mask_list[:int(
np.ceil(len(self.mask_list)*(1-self.fraction)))]
else:
self.image_names = self.image_list[int(
np.ceil(len(self.image_list)*(1-self.fraction))):]
self.mask_names = self.mask_list[int(
np.ceil(len(self.mask_list)*(1-self.fraction))):]
def __len__(self):
return len(self.image_names)
def __getitem__(self, idx):
img_name = self.image_names[idx]
if self.imagecolorflag:
image = cv2.imread(
img_name, self.imagecolorflag).transpose(2, 0, 1)
else:
image = cv2.imread(img_name, self.imagecolorflag)
msk_name = self.mask_names[idx]
if self.maskcolorflag:
mask = cv2.imread(msk_name, self.maskcolorflag).transpose(2, 0, 1)
else:
mask = cv2.imread(msk_name, self.maskcolorflag)
mask = np.asarray(mask>0).astype(float)
sample = {'image': image, 'mask': mask}
if self.transform:
sample = self.transform(sample)
return sample
class SegDataset_Baseline(Dataset):
"""Segmentation Dataset"""
def __init__(self, root_dir, imageFolder, maskFolder, transform=None, seed=None, fraction=None, subset=None, imagecolormode='rgb', maskcolormode='grayscale'):
"""
Args:
root_dir (string): Directory with all the images and should have the following structure.
root
--Images
-----Img 1
-----Img N
--Mask
-----Mask 1
-----Mask N
imageFolder (string) = 'Images' : Name of the folder which contains the Images.
maskFolder (string) = 'Masks : Name of the folder which contains the Masks.
transform (callable, optional): Optional transform to be applied on a sample.
seed: Specify a seed for the train and test split
fraction: A float value from 0 to 1 which specifies the validation split fraction
subset: 'Train' or 'Test' to select the appropriate set.
imagecolormode: 'rgb' or 'grayscale'
maskcolormode: 'rgb' or 'grayscale'
"""
self.color_dict = {'rgb': 1, 'grayscale': 0}
assert(imagecolormode in ['rgb', 'grayscale'])
assert(maskcolormode in ['rgb', 'grayscale'])
self.imagecolorflag = self.color_dict[imagecolormode]
self.maskcolorflag = self.color_dict[maskcolormode]
self.root_dir = root_dir
self.transform = transform
seq_file = open(root_dir+'db_info.yaml','r')
sequences = yaml.load(seq_file)['sequences']
self.sequences = [seq['name'] for seq in sequences if (seq['year']==2016 and subset == seq['set'])]
print(self.sequences)
self.image_names = []
self.mask_names = []
if not fraction:
for seq in self.sequences:
image_names = sorted(
glob.glob(os.path.join(self.root_dir, imageFolder, seq, '*')))
mask_names = sorted(
glob.glob(os.path.join(self.root_dir, maskFolder, seq, '*')))
if len(mask_names)<len(image_names):
print(mask_names)
mask = mask_names[0]
mask_names = [mask for i in range(len(image_names))]
print(mask_names)
self.image_names += image_names
self.mask_names += mask_names
else:
assert(subset in ['Train', 'Test'])
self.fraction = fraction
self.image_list = []
self.mask_list = []
for seq in self.sequences:
image_list = np.array(
sorted(glob.glob(os.path.join(self.root_dir, imageFolder, seq, '*'))))
mask_list = np.array(
sorted(glob.glob(os.path.join(self.root_dir, maskFolder, seq, '*'))))
self.image_list += image_list
self.mask_list += mask_list
if seed:
np.random.seed(seed)
indices = np.arange(len(self.image_list))
np.random.shuffle(indices)
self.image_list = self.image_list[indices]
self.mask_list = self.mask_list[indices]
if subset == 'Train':
self.image_names = self.image_list[:int(
np.ceil(len(self.image_list)*(1-self.fraction)))]
self.mask_names = self.mask_list[:int(
np.ceil(len(self.mask_list)*(1-self.fraction)))]
else:
self.image_names = self.image_list[int(
np.ceil(len(self.image_list)*(1-self.fraction))):]
self.mask_names = self.mask_list[int(
np.ceil(len(self.mask_list)*(1-self.fraction))):]
def __len__(self):
return len(self.image_names)
def __getitem__(self, idx):
img_name = self.image_names[idx]
if self.imagecolorflag:
image = cv2.imread(
img_name, self.imagecolorflag).transpose(2, 0, 1)
else:
image = cv2.imread(img_name, self.imagecolorflag)
msk_name = self.mask_names[idx]
if self.maskcolorflag:
mask = cv2.imread(msk_name, self.maskcolorflag).transpose(2, 0, 1)
else:
mask = cv2.imread(msk_name, self.maskcolorflag)
mask = np.asarray(mask>0).astype(float)
sample = {'image': image, 'mask': mask}
if self.transform:
sample = self.transform(sample)
return sample
class Resize(object):
"""Resize image and/or masks."""
def __init__(self, imageresize, maskresize):
self.imageresize = imageresize
self.maskresize = maskresize
def __call__(self, sample):
image, mask = sample['image'], sample['mask']
if len(image.shape) == 3:
image = image.transpose(1, 2, 0)
if len(mask.shape) == 3:
mask = mask.transpose(1, 2, 0)
mask = cv2.resize(mask, self.maskresize, cv2.INTER_AREA)
image = cv2.resize(image, self.imageresize, cv2.INTER_AREA)
if len(image.shape) == 3:
image = image.transpose(2, 0, 1)
if len(mask.shape) == 3:
mask = mask.transpose(2, 0, 1)
return {'image': image,
'mask': mask}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, maskresize=None, imageresize=None):
image, mask = sample['image'], sample['mask']
if len(mask.shape) == 2:
mask = mask.reshape((1,)+mask.shape)
if len(image.shape) == 2:
image = image.reshape((1,)+image.shape)
return {'image': torch.from_numpy(image),
'mask': torch.from_numpy(mask)}
class Normalize(object):
'''Normalize image'''
def __call__(self, sample):
image, mask = sample['image'], sample['mask']
return {'image': image.type(torch.FloatTensor)/255,
'mask': mask.type(torch.FloatTensor)}
def create_dataloader(data_dir, imageFolder, maskFolder, size = (256,256), fraction=None, subset='train', baseline=False, batch_size=4, step=5):
data_transforms = transforms.Compose([Resize(size, size), ToTensor(), Normalize()])
if baseline:
image_dataset = SegDataset_Baseline(data_dir, transform=data_transforms, imageFolder=imageFolder, maskFolder=maskFolder, subset=subset)
dataloader = DataLoader(image_dataset, batch_size=batch_size, shuffle=True, num_workers=8)
else:
image_dataset = SegDataset(data_dir, transform=data_transforms, imageFolder=imageFolder, maskFolder=maskFolder, subset=subset, batch_size=batch_size, step=step)
sampler = SequentialSampler(image_dataset)
dataloader = DataLoader(image_dataset, sampler=sampler, batch_size=batch_size, num_workers=8)
return dataloader
| {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
63,969 | lupries/TAGN2 | refs/heads/master | /datasets/utils.py | import yaml
def sequence_names(root='davis-2017/data/'):
sequence_file = open(root+'db_info.yaml','r')
sequences = yaml.load(sequence_file)['sequences']
seq_names = [seq['name'] for seq in sequences if seq['year'] == 2016]
return seq_names
def get_sequences(root='davis-2017/data/'):
sequence_file = open(root+'db_info.yaml','r')
sequences = yaml.load(sequence_file)['sequences']
seq_names = [seq['name'] for seq in sequences if seq['year'] == 2016]
return seq_names | {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
63,970 | lupries/TAGN2 | refs/heads/master | /models/__init__.py | from .TAGNN import TAGNN, TAGNN_batch | {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
63,971 | lupries/TAGN2 | refs/heads/master | /models/graphnet/__init__.py | from .AttentiveGraphNeuralNetwork import AGNN, create_fully_connected | {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
63,972 | lupries/TAGN2 | refs/heads/master | /models/attention/__init__.py | from .attention import SelfAttention, InterAttention, GAP | {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
63,973 | lupries/TAGN2 | refs/heads/master | /models/convgru/__init__.py | from .convgru import ConvGRU, ConvGRUCell
| {"/datasets/__init__.py": ["/datasets/dataloader.py"], "/models/graphnet/AttentiveGraphNeuralNetwork.py": ["/models/convgru/__init__.py", "/models/attention/__init__.py"], "/models/TAGNN.py": ["/models/graphnet/__init__.py"], "/datasets/dataloader_AGNN.py": ["/datasets/dataloader.py"], "/models/__init__.py": ["/models/TAGNN.py"], "/models/graphnet/__init__.py": ["/models/graphnet/AttentiveGraphNeuralNetwork.py"], "/models/attention/__init__.py": ["/models/attention/attention.py"]} |
64,023 | Kaifat1/test2 | refs/heads/master | /migrations/versions/962ddbc3429d_.py | """empty message
Revision ID: 962ddbc3429d
Revises: None
Create Date: 2016-04-27 21:26:57.380238
"""
# revision identifiers, used by Alembic.
revision = '962ddbc3429d'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('izdeliya',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('Imya_iz', sa.String(length=60), nullable=True),
sa.Column('Kol_vo', sa.Integer(), nullable=True),
sa.Column('Razmer_kubm', sa.Integer(), nullable=True),
sa.Column('Cena_iz', sa.Numeric(precision=8.3), nullable=True),
sa.Column('Chertez', sa.String(length=255), nullable=True),
sa.Column('Eskiz', sa.String(length=255), nullable=True),
sa.Column('Sert', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_izdeliya_Cena_iz'), 'izdeliya', ['Cena_iz'], unique=False)
op.create_index(op.f('ix_izdeliya_Imya_iz'), 'izdeliya', ['Imya_iz'], unique=False)
op.create_index(op.f('ix_izdeliya_Kol_vo'), 'izdeliya', ['Kol_vo'], unique=False)
op.create_index(op.f('ix_izdeliya_Razmer_kubm'), 'izdeliya', ['Razmer_kubm'], unique=False)
op.create_table('klienty',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('FIO', sa.String(length=120), nullable=True),
sa.Column('Vozrast', sa.Integer(), nullable=True),
sa.Column('Adres', sa.String(length=64), nullable=True),
sa.Column('Email', sa.String(length=120), nullable=True),
sa.Column('Telefon', sa.String(length=20), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_klienty_Adres'), 'klienty', ['Adres'], unique=True)
op.create_index(op.f('ix_klienty_Email'), 'klienty', ['Email'], unique=True)
op.create_index(op.f('ix_klienty_FIO'), 'klienty', ['FIO'], unique=True)
op.create_index(op.f('ix_klienty_Telefon'), 'klienty', ['Telefon'], unique=False)
op.create_index(op.f('ix_klienty_Vozrast'), 'klienty', ['Vozrast'], unique=False)
op.create_table('postavschiki',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('Imya', sa.String(length=60), nullable=True),
sa.Column('Telefon', sa.String(length=20), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_postavschiki_Imya'), 'postavschiki', ['Imya'], unique=False)
op.create_index(op.f('ix_postavschiki_Telefon'), 'postavschiki', ['Telefon'], unique=False)
op.create_table('roly',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('Imya', sa.String(length=64), nullable=True),
sa.Column('access_level', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('Imya')
)
op.create_table('postavki',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('Postavschik', sa.String(length=60), nullable=True),
sa.Column('Material', sa.String(length=60), nullable=True),
sa.Column('Cena_dost', sa.Numeric(precision=8.3), nullable=True),
sa.Column('Data', sa.Date(), nullable=True),
sa.Column('Kol_vo', sa.Integer(), nullable=True),
sa.Column('postavschik_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['postavschik_id'], ['postavschiki.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_postavki_Cena_dost'), 'postavki', ['Cena_dost'], unique=False)
op.create_index(op.f('ix_postavki_Data'), 'postavki', ['Data'], unique=False)
op.create_index(op.f('ix_postavki_Kol_vo'), 'postavki', ['Kol_vo'], unique=False)
op.create_index(op.f('ix_postavki_Material'), 'postavki', ['Material'], unique=False)
op.create_index(op.f('ix_postavki_Postavschik'), 'postavki', ['Postavschik'], unique=False)
op.create_table('sotrudniki',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('FIO', sa.String(length=120), nullable=True),
sa.Column('Dolznost', sa.String(length=64), nullable=True),
sa.Column('Vozrast', sa.Integer(), nullable=True),
sa.Column('Telefon', sa.String(length=20), nullable=True),
sa.Column('Otdel', sa.String(length=40), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=120), nullable=True),
sa.Column('rol_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['rol_id'], ['roly.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_sotrudniki_Dolznost'), 'sotrudniki', ['Dolznost'], unique=True)
op.create_index(op.f('ix_sotrudniki_FIO'), 'sotrudniki', ['FIO'], unique=True)
op.create_index(op.f('ix_sotrudniki_Otdel'), 'sotrudniki', ['Otdel'], unique=False)
op.create_index(op.f('ix_sotrudniki_Telefon'), 'sotrudniki', ['Telefon'], unique=False)
op.create_index(op.f('ix_sotrudniki_Vozrast'), 'sotrudniki', ['Vozrast'], unique=False)
op.create_index(op.f('ix_sotrudniki_email'), 'sotrudniki', ['email'], unique=True)
op.create_table('materialy',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('Imya_m', sa.String(length=60), nullable=True),
sa.Column('Kol_vo', sa.Integer(), nullable=True),
sa.Column('Cena_m', sa.Numeric(precision=8.3), nullable=True),
sa.Column('postavka_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['postavka_id'], ['postavki.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_materialy_Cena_m'), 'materialy', ['Cena_m'], unique=False)
op.create_index(op.f('ix_materialy_Imya_m'), 'materialy', ['Imya_m'], unique=False)
op.create_index(op.f('ix_materialy_Kol_vo'), 'materialy', ['Kol_vo'], unique=False)
op.create_table('zayavki',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('FIO_sotr', sa.String(length=120), nullable=True),
sa.Column('FIO_kl', sa.String(length=120), nullable=True),
sa.Column('Nazv_izd', sa.String(length=60), nullable=True),
sa.Column('Data', sa.Date(), nullable=True),
sa.Column('Avans', sa.Numeric(precision=8.3), nullable=True),
sa.Column('klient_id', sa.Integer(), nullable=True),
sa.Column('sotrudnik_id', sa.Integer(), nullable=True),
sa.Column('izdelie_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['izdelie_id'], ['izdeliya.id'], ),
sa.ForeignKeyConstraint(['klient_id'], ['klienty.id'], ),
sa.ForeignKeyConstraint(['sotrudnik_id'], ['sotrudniki.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_zayavki_Avans'), 'zayavki', ['Avans'], unique=False)
op.create_index(op.f('ix_zayavki_Data'), 'zayavki', ['Data'], unique=False)
op.create_index(op.f('ix_zayavki_FIO_kl'), 'zayavki', ['FIO_kl'], unique=True)
op.create_index(op.f('ix_zayavki_FIO_sotr'), 'zayavki', ['FIO_sotr'], unique=True)
op.create_index(op.f('ix_zayavki_Nazv_izd'), 'zayavki', ['Nazv_izd'], unique=False)
op.create_table('sostav_m',
sa.Column('izdelie_id', sa.Integer(), nullable=True),
sa.Column('aterial_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['aterial_id'], ['materialy.id'], ),
sa.ForeignKeyConstraint(['izdelie_id'], ['izdeliya.id'], )
)
op.create_table('zagotovki',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('Imya_z', sa.String(length=60), nullable=True),
sa.Column('Kol_vo', sa.Integer(), nullable=True),
sa.Column('Dlina', sa.Integer(), nullable=True),
sa.Column('Shirina', sa.Integer(), nullable=True),
sa.Column('Vysota', sa.Integer(), nullable=True),
sa.Column('material_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['material_id'], ['materialy.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_zagotovki_Dlina'), 'zagotovki', ['Dlina'], unique=False)
op.create_index(op.f('ix_zagotovki_Imya_z'), 'zagotovki', ['Imya_z'], unique=False)
op.create_index(op.f('ix_zagotovki_Kol_vo'), 'zagotovki', ['Kol_vo'], unique=False)
op.create_index(op.f('ix_zagotovki_Shirina'), 'zagotovki', ['Shirina'], unique=False)
op.create_index(op.f('ix_zagotovki_Vysota'), 'zagotovki', ['Vysota'], unique=False)
op.create_table('sostav_z',
sa.Column('izdelie_id', sa.Integer(), nullable=True),
sa.Column('polufabricat_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['izdelie_id'], ['izdeliya.id'], ),
sa.ForeignKeyConstraint(['polufabricat_id'], ['zagotovki.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('sostav_z')
op.drop_index(op.f('ix_zagotovki_Vysota'), table_name='zagotovki')
op.drop_index(op.f('ix_zagotovki_Shirina'), table_name='zagotovki')
op.drop_index(op.f('ix_zagotovki_Kol_vo'), table_name='zagotovki')
op.drop_index(op.f('ix_zagotovki_Imya_z'), table_name='zagotovki')
op.drop_index(op.f('ix_zagotovki_Dlina'), table_name='zagotovki')
op.drop_table('zagotovki')
op.drop_table('sostav_m')
op.drop_index(op.f('ix_zayavki_Nazv_izd'), table_name='zayavki')
op.drop_index(op.f('ix_zayavki_FIO_sotr'), table_name='zayavki')
op.drop_index(op.f('ix_zayavki_FIO_kl'), table_name='zayavki')
op.drop_index(op.f('ix_zayavki_Data'), table_name='zayavki')
op.drop_index(op.f('ix_zayavki_Avans'), table_name='zayavki')
op.drop_table('zayavki')
op.drop_index(op.f('ix_materialy_Kol_vo'), table_name='materialy')
op.drop_index(op.f('ix_materialy_Imya_m'), table_name='materialy')
op.drop_index(op.f('ix_materialy_Cena_m'), table_name='materialy')
op.drop_table('materialy')
op.drop_index(op.f('ix_sotrudniki_email'), table_name='sotrudniki')
op.drop_index(op.f('ix_sotrudniki_Vozrast'), table_name='sotrudniki')
op.drop_index(op.f('ix_sotrudniki_Telefon'), table_name='sotrudniki')
op.drop_index(op.f('ix_sotrudniki_Otdel'), table_name='sotrudniki')
op.drop_index(op.f('ix_sotrudniki_FIO'), table_name='sotrudniki')
op.drop_index(op.f('ix_sotrudniki_Dolznost'), table_name='sotrudniki')
op.drop_table('sotrudniki')
op.drop_index(op.f('ix_postavki_Postavschik'), table_name='postavki')
op.drop_index(op.f('ix_postavki_Material'), table_name='postavki')
op.drop_index(op.f('ix_postavki_Kol_vo'), table_name='postavki')
op.drop_index(op.f('ix_postavki_Data'), table_name='postavki')
op.drop_index(op.f('ix_postavki_Cena_dost'), table_name='postavki')
op.drop_table('postavki')
op.drop_table('roly')
op.drop_index(op.f('ix_postavschiki_Telefon'), table_name='postavschiki')
op.drop_index(op.f('ix_postavschiki_Imya'), table_name='postavschiki')
op.drop_table('postavschiki')
op.drop_index(op.f('ix_klienty_Vozrast'), table_name='klienty')
op.drop_index(op.f('ix_klienty_Telefon'), table_name='klienty')
op.drop_index(op.f('ix_klienty_FIO'), table_name='klienty')
op.drop_index(op.f('ix_klienty_Email'), table_name='klienty')
op.drop_index(op.f('ix_klienty_Adres'), table_name='klienty')
op.drop_table('klienty')
op.drop_index(op.f('ix_izdeliya_Razmer_kubm'), table_name='izdeliya')
op.drop_index(op.f('ix_izdeliya_Kol_vo'), table_name='izdeliya')
op.drop_index(op.f('ix_izdeliya_Imya_iz'), table_name='izdeliya')
op.drop_index(op.f('ix_izdeliya_Cena_iz'), table_name='izdeliya')
op.drop_table('izdeliya')
### end Alembic commands ###
| {"/run.py": ["/app/__init__.py"], "/app/views.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py"]} |
64,024 | Kaifat1/test2 | refs/heads/master | /config.py | import os
WTF_CSRF_ENABLED = True
SECRET_KEY = 'qwerty'
SQLALCHEMY_DATABASE_URL = 'mysql://misha:qwerty@localhost/mydb'
SQLALCHEMY_TRACK_MODIFICATIONS = True | {"/run.py": ["/app/__init__.py"], "/app/views.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py"]} |
64,025 | Kaifat1/test2 | refs/heads/master | /run.py | from app import manager, app
manager.run() | {"/run.py": ["/app/__init__.py"], "/app/views.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py"]} |
64,026 | Kaifat1/test2 | refs/heads/master | /app/forms.py | from flask_wtf import Form
from wtforms import StringField, BooleanField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Email
class LoginForm(Form):
login = StringField('Логин', validators=[DataRequired(), Email()])
password = PasswordField('Пароль', validators=[DataRequired()])
remember_me = BooleanField('Запомнить', default=False)
submit = SubmitField('Вход') | {"/run.py": ["/app/__init__.py"], "/app/views.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py"]} |
64,027 | Kaifat1/test2 | refs/heads/master | /app/views.py | from flask import render_template, flash, redirect, abort, session, url_for, request, g
from flask_login import login_user, logout_user, current_user, login_required
from app import app, db, lm
from .forms import LoginForm
from .models import Sotrudnik
@app.route('/')
@app.route ('/index')
def index():
user = {'nickname' : 'Mishanya'}
posts = [
{
'author': { 'nickname': 'John' },
'body': 'Beautiful day in Portland!'
},
{
'author': { 'nickname': 'Susan' },
'body': 'The Avengers movie was so cool!'
}
]
return render_template("index.html",
title = 'Home',
user = user,
posts = posts)
@app.route('/login', methods=['GET', 'POST'])
def login():
#? if g.user is not None and g.user.is_authenticated:
#? return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = Sotrudnik.query.filter_by(email=form.login.data).first()
if user is not None and user.verify_password(form.password.data):
print(user.nickname)
print(type(user.nickname))
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('index'))
flash('Invalid username or/and password')
return render_template('login.html',
title='Sign In',
form=form,
providers=app.config['OPENID_PROVIDERS'])
@app.route('/logout')
def logout():
if current_user.is_authenticated:
logout_user()
flash('You have been logged out')
return redirect(url_for('index'))
@app.before_request
def before_request():
g.user = current_user
@lm.user_loader
def load_user(id):
return Sotrudnik.query.get(int(id))
| {"/run.py": ["/app/__init__.py"], "/app/views.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py"]} |
64,028 | Kaifat1/test2 | refs/heads/master | /app/models.py | from app import db
from . import lm
from werkzeug.security import generate_password_hash, check_password_hash
from hashlib import md5
@lm.user_loader
def load_user(sotrudnik_id):
return Sotrudnik.query.get(int(sotrudnik_id))
class Sotrudnik(db.Model):
__tablename__ = 'sotrudniki'
id = db.Column(db.Integer, primary_key=True)
FIO = db.Column(db.String(120), index=True, unique=True)
Dolznost = db.Column(db.String(64), index=True, unique=True)
Vozrast = db.Column(db.Integer, index=True)
Telefon = db.Column(db.String(20), index=True)
Otdel = db.Column(db.String(40), index=True,)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(120))
rol_id = db.Column(db.Integer, db.ForeignKey('roly.id'))
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def get_id(self):
#try:
#return unicode(self.id) # python 2
#except NameError:
return str(self.id) # python 3
def avatar(self, size):
return 'http://www.gravatar.com/avatar/{}?d=mm&s={}'.\
format(md5(self.email.encode('utf-8')).hexdigest(), size)
def __repr__(self):
return '<Sotrudnik {}>'.format(self.FIO)
class Rol (db.Model): #!!!!!!!!!!!!!!!
__tablename__ = 'roly'
id = db.Column(db.Integer, primary_key=True)
Imya = db.Column(db.String(64), unique=True)
access_level = db.Column(db.Integer)
sotrudniki = db.relationship('Sotrudnik', backref = 'rol')
class Klient(db.Model):
__tablename__ = 'klienty'
id = db.Column(db.Integer, primary_key=True)
FIO = db.Column(db.String(120), index=True, unique=True)
Vozrast = db.Column(db.Integer, index=True)
Adres = db.Column(db.String(64), index=True, unique=True)
Email = db.Column(db.String(120), index=True, unique=True)
Telefon = db.Column(db.String(20), index=True)
def __repr__(self):
return '<Klient {}>'.format(self.FIO)
class Zayavka(db.Model):
__tablename__ = 'zayavki'
id = db.Column(db.Integer, primary_key=True)
FIO_sotr = db.Column(db.String(120), index=True, unique=True)
FIO_kl = db.Column(db.String(120), index=True, unique=True)
Nazv_izd = db.Column(db.String(60), index=True)
Data = db.Column(db.Date, index=True)
Avans = db.Column(db.Numeric(precision=8.3), index=True)
klient_id = db.Column(db.Integer, db.ForeignKey('klienty.id'))
sotrudnik_id = db.Column(db.Integer, db.ForeignKey('sotrudniki.id'))
izdelie_id = db.Column(db.Integer, db.ForeignKey('izdeliya.id'))
sostav_m = db.Table('sostav_m',
db.Column('izdelie_id', db.Integer, db.ForeignKey('izdeliya.id')),
db.Column('aterial_id', db.Integer, db.ForeignKey('materialy.id'))
)
sostav_z = db.Table('sostav_z',
db.Column('izdelie_id', db.Integer, db.ForeignKey('izdeliya.id')),
db.Column('polufabricat_id', db.Integer, db.ForeignKey('zagotovki.id')),
)
class Izdelie(db.Model):
__tablename__ = 'izdeliya'
id = db.Column(db.Integer, primary_key=True)
Imya_iz = db.Column(db.String(60), index=True)
Kol_vo = db.Column(db.Integer, index=True)
Razmer_kubm = db.Column(db.Integer, index=True)
Cena_iz = db.Column(db.Numeric(precision=8.3), index=True)
Chertez = db.Column(db.String(255))
Eskiz = db.Column(db.String(255))
Sert = db.Column(db.String(255))
rabota_id = db.Column(db.Integer, db.ForeignKey('raboty.id'))
Imya_z = db.relationship('zagotovka',
secondary=sostav_z,
backref=db.backref('Imya_iz', lazy='dynamic'),
lazy='dynamic')
Imya_m = db.relationship('material',
secondary=sostav_m,
backref=db.backref('Imya_iz', lazy='dynamic'),
lazy='dynamic')
def __repr__(self):
return '<Izdelie {}>'.format(self.Imya_iz)
class Zagotovka(db.Model):
__tablename__ = 'zagotovki'
id = db.Column(db.Integer, primary_key=True)
Imya_z = db.Column(db.String(60), index=True)
Tip_proiz = db.Column(db.String(60), index=True)
Kol_vo = db.Column(db.Integer, index=True)
Dlina = db.Column(db.Integer, index=True)
Shirina = db.Column(db.Integer, index=True)
Vysota = db.Column(db.Integer, index=True)
material_id = db.Column(db.Integer, db.ForeignKey('materialy.id'))
rabota_id = db.Column(db.Integer, db.ForeignKey('raboty.id'))
def __repr__(self):
return '<Polufabricat {}>'.format(self.Imya_p)
class Material(db.Model):
__tablename__ = 'materialy'
id = db.Column(db.Integer, primary_key=True)
Imya_m = db.Column(db.String(60), index=True)
Kol_vo = db.Column(db.Integer, index=True)
Cena_m = db.Column(db.Numeric(precision=8.3), index=True)
postavka_id = db.Column(db.Integer, db.ForeignKey('postavki.id'))
def __repr__(self):
return '<Material {}>'.format(self.Imya_m)
class Rabota(db.Model):
__tablename__ = 'raboty'
id = db.Column(db.Integer, primary_key=True)
Imya_r = db.Column(db.String(60), index=True)
Koeff = db.Column(db.Integer, index=True)
Proiz = db.Column(db.Integer, index=True)
Cena_r = db.Column(db.Numeric(precision=8.3), index=True)
Ed_izm = db.Column(db.Numeric(precision=8.3), index=True)
Izdelie = db.relationship('Izdelie', backref='rabota', lazy='dynamic')
Zagotovka = db.relationship('Zagotovka', backref='rabota', lazy='dynamic')
class Postavschik(db.Model):
__tablename__ = 'postavschiki'
id = db.Column(db.Integer, primary_key=True)
Imya = db.Column(db.String(60), index=True)
Telefon = db.Column(db.String(20), index=True)
Postavka = db.relationship('Postavka', backref='postavschik', lazy='dynamic')
def __repr__(self):
return '<Postavschik {}>'.format(self.Imya)
class Postavka(db.Model):
__tablename__ = 'postavki'
id = db.Column(db.Integer, primary_key=True)
Postavschik = db.Column(db.String(60), index=True)
Material = db.Column(db.String(60), index=True)
Cena_dost = db.Column(db.Numeric(precision=8.3), index=True)
Data = db.Column(db.Date, index=True)
Kol_vo = db.Column(db.Integer, index=True)
postavschik_id = db.Column(db.Integer, db.ForeignKey('postavschiki.id')) | {"/run.py": ["/app/__init__.py"], "/app/views.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py"]} |
64,029 | Kaifat1/test2 | refs/heads/master | /migrations/versions/64c19cc5124c_.py | """empty message
Revision ID: 64c19cc5124c
Revises: 962ddbc3429d
Create Date: 2016-04-29 23:24:13.396602
"""
# revision identifiers, used by Alembic.
revision = '64c19cc5124c'
down_revision = '962ddbc3429d'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('raboty',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('Imya_r', sa.String(length=60), nullable=True),
sa.Column('Koeff', sa.Integer(), nullable=True),
sa.Column('Proiz', sa.Integer(), nullable=True),
sa.Column('Cena_r', sa.Numeric(precision=8.3), nullable=True),
sa.Column('Ed_izm', sa.Numeric(precision=8.3), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_raboty_Cena_r'), 'raboty', ['Cena_r'], unique=False)
op.create_index(op.f('ix_raboty_Ed_izm'), 'raboty', ['Ed_izm'], unique=False)
op.create_index(op.f('ix_raboty_Imya_r'), 'raboty', ['Imya_r'], unique=False)
op.create_index(op.f('ix_raboty_Koeff'), 'raboty', ['Koeff'], unique=False)
op.create_index(op.f('ix_raboty_Proiz'), 'raboty', ['Proiz'], unique=False)
op.add_column('izdeliya', sa.Column('rabota_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'izdeliya', 'raboty', ['rabota_id'], ['id'])
op.add_column('zagotovki', sa.Column('Tip_proiz', sa.String(length=60), nullable=True))
op.add_column('zagotovki', sa.Column('rabota_id', sa.Integer(), nullable=True))
op.create_index(op.f('ix_zagotovki_Tip_proiz'), 'zagotovki', ['Tip_proiz'], unique=False)
op.create_foreign_key(None, 'zagotovki', 'raboty', ['rabota_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'zagotovki', type_='foreignkey')
op.drop_index(op.f('ix_zagotovki_Tip_proiz'), table_name='zagotovki')
op.drop_column('zagotovki', 'rabota_id')
op.drop_column('zagotovki', 'Tip_proiz')
op.drop_constraint(None, 'izdeliya', type_='foreignkey')
op.drop_column('izdeliya', 'rabota_id')
op.drop_index(op.f('ix_raboty_Proiz'), table_name='raboty')
op.drop_index(op.f('ix_raboty_Koeff'), table_name='raboty')
op.drop_index(op.f('ix_raboty_Imya_r'), table_name='raboty')
op.drop_index(op.f('ix_raboty_Ed_izm'), table_name='raboty')
op.drop_index(op.f('ix_raboty_Cena_r'), table_name='raboty')
op.drop_table('raboty')
### end Alembic commands ###
| {"/run.py": ["/app/__init__.py"], "/app/views.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py"]} |
64,030 | Kaifat1/test2 | refs/heads/master | /app/__init__.py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
from flask_bootstrap import Bootstrap
import os
from flask_login import LoginManager
#from config import basedir
app = Flask(__name__)
app.config.from_object('config')
lm = LoginManager()
lm.session_protection = 'strong'
bootstrap = Bootstrap(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root@localhost/mydb'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from app import views
manager = Manager(app)
manager.add_command('db', MigrateCommand)
lm.init_app(app)
lm.login_view = 'login' | {"/run.py": ["/app/__init__.py"], "/app/views.py": ["/app/__init__.py", "/app/forms.py", "/app/models.py"], "/app/models.py": ["/app/__init__.py"]} |
64,031 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/dirtree.py |
# sample app showing how to use TreeOps
#
# this is a simple file lister, like 'ls -lR', demonstrating
# that it takes no special code to implement all the standard
# tree options (-r, -R, -x, -i, -h)
#
# frankm@hiwaay.net
#
# make sure I can import disthelper
import sys
sys.path.insert(0,'.')
from . import grab_disthelper
# now the real code begins ...
from disthelper.treeops.treeops import *
import os
from stat import *
from time import strftime, localtime
class TreeLister(TreeOps):
"""Example of a TreeOp that lists files recursively."""
def __init__(self):
TreeOps.__init__(self)
self.nr_files = 0
def run(self, argv):
# parse standard tree options (-r, -R, -x, etc.)
p = TreeOptParser('dirtree.py','Show file/directory listing.')
opts,args = p.parse_argv(argv)
if len(args) == 0:
print("** Must give a directory to list.")
p.show_usage()
sys.exit(1)
# remember which files/dirs we couldn't access
self.nofile = []
self.nodir = []
# walk the tree with globbing, etc.
self.runtree(opts,args)
# tell user which files/dirs I couldn't access
if len(self.nofile):
print("I could not access these files:")
for f in self.nofile:
print(" %s" % f)
if len(self.nodir):
print("I could not access these directories:")
for d in self.nodir:
print(" %s" % d)
# - internal API - called as the tree is walked -
def process_one_file(self,fullname,opts):
try:
st = os.stat(fullname)
self.nr_files += 1
except:
self.nofile.append(fullname)
return
print("%-30s %8d %s" % (fullname,st[ST_SIZE],
strftime('%Y-%m-%d',localtime(st[ST_MTIME]))))
def process_one_dir(self,fullname):
print("\nDIRECTORY %s" % fullname)
print("-------------------------------------------------")
def dir_noaccess(self,fullname):
self.nodir.append(fullname)
t = TreeLister()
t.run(sys.argv)
print("Listed %d files." % t.nr_files)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,032 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/attic/textify.py |
# Obsoleted by scripts in disthelper/scripts
# ADD
# auto-guess indentation on .py files
# other files require -w arg to 'tabify'
#
# make it work under python 2.0+
#======================================================================
#
# textify.py - Perform various text transformations on a
# set of files.
#
# Can do the following:
#
# tabify file
# untabify file
# convert file to portable text format (\n)
# convert file to platform-specific text format
#
# Examples:
# Recursively tabify all .py, .pl, and .c files under /home/frankm:
# textify.py -t -r -x py,pl,c /home/frankm
#
# Recursively untabify all .py, .pl, and .c files under /home/frankm
# and convert to portable text format:
# textify.py -t --to-portable -r -x py,pl,c /home/frankm
#
# Recursively convert all README* files to platform-specific
# text format:
# textify.py --to-platform -R "README*"
#
# Requires Python 2.3+
# (Would only be run by developers, so this seems OK to me.)
#
# [The core tab/untabification logic was adapted from IDLE.]
#
#======================================================================
# There is nothing here that couldn't be done with simple shell
# commands and tools under a POSIXy operating system. However, this
# program is meant for cross-platform use where those common tools
# may not exist.
#
# written by Frank McIngvale <frankm@hiwaay.net>
#======================================================================
from optparse import OptionParser
import os, re
from tempfile import mkstemp
from disthelper.treeops.lineops import *
# default tab width, settable with -w
TABWIDTH = 4
def transform_thing( thing, opts, regexlist ):
"thing can be a file or directory"
# create the list of transforms
lineops = []
if opts.tabify:
lineops.append( tabify_line )
if opts.untabify:
lineops.append( untabify_line )
if opts.to_platform:
lineops.append( to_platform_text )
if opts.to_portable:
lineops.append( to_portable_text )
# transform the thing
if os.path.isfile( thing ):
transform_file( lineops, thing )
elif os.path.isdir( thing ) and \
(opts.glob or opts.recursive ):
transform_tree( lineops, root, regexlist )
else:
raise Exception("Unable to transform '%s'" % thing)
def transform_tree( lineops, root, regexlist=[] ):
"""Run list of transforms (lineops) on a directory
tree at 'root', on files matching the regexlist."""
for path,dnames,fnames in os.walk( root ):
for name in fnames:
for r in regexlist:
if r.match( name ):
full = os.path.join(root, name)
transform_file(lineops, full)
break
def transform_file( lineops, filename ):
"""Run list of transforms (lineops) on a single file."""
hout, tname = mkstemp()
#print "CONVERT FILE ",filename,tname,lineop
for line in open( filename, 'rb' ):
# do transforms
buf = line
for op in lineops:
buf = op(line)
while len(buf):
nr = os.write(hout, buf)
buf = buf[nr:]
os.close(hout)
os.unlink(filename)
os.rename(tname, filename)
# parse cmdline
o = OptionParser(usage="%prog [opts] filename ...\n\nPerform text transformations on a set of files.")
o.add_option("-t","--tabify",
action='store_true', dest='tabify', default=False,
help='Perform tabification (replace spaces with tabs).')
o.add_option("-u","--untabify",
action='store_true', dest='untabify', default=False,
help='Perform untabification (replace tabs with spaces).')
o.add_option('','--to-platform',
action='store_true', dest='to_platform', default=False,
help='Convert to platform-specific text format')
o.add_option('','--to-portable',
action='store_true', dest='to_portable', default=False,
help='Convert to portable text format (\\n line endings)')
o.add_option('-w','--tab-width', dest='tabwidth', default=TABWIDTH,
type='int', help='Set TAB width (default=%d)' % TABWIDTH)
o.add_option("-r","--recursive",
action='store_true', dest='recursive', default=False,
help='Include subdirectories.')
o.add_option("-R","--recursive-glob",
action='store', dest='glob', type='string', default=None,
help='Recurse (like -r) but use GLOB recursively to match files to transform')
o.add_option("-x","--extension",type="string",
action="append", dest="extlist", default=[],
help="Transform only files with the given file extension(s) [seperate multiple extensions with commas, or use -x multiple times]")
o.add_option('-i','--ignore-case',
action='store_true', dest='nocase', default=False,
help='Ignore case when matching filenames with -x or -R')
opts, args = o.parse_args()
#print opts
#print args
# note that "-x py,pl -x c" will result in: ['py,pl', 'c'], so
# normalize to one extension per string. assume that, if the
# user forces extra spaces into the string by using quotes,
# that they really intended it, so don't worry about stripping them
extlist = []
if opts.extlist is not None:
for ext in opts.extlist:
extlist += ext.lower().split(',')
globlist = []
# First, make a regex out of each extension match
for ext in extlist:
globlist.append( r'^.+\.%s$' % ext )
# Now add any glob pattern the user specified with -R
if opts.glob:
# turn the shell-style glob into a regex
g = opts.glob.replace('.',r'\.').replace('*','.*')
g = '^' + g + '$'
globlist.append(g)
# compile them all
reflags = 0
if opts.nocase:
reflags = re.I
globlist = [re.compile(x,reflags) for x in globlist]
# set user-selected tabwidth
TABWIDTH = opts.tabwidth
for arg in args:
tab_or_untab_thing( arg, opts, globlist )
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,033 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/plattext.py |
#
# Convert a tree of files into platform-specific text format,
# with filename matching.
#
#
# frankm@hiwaay.net
#
# make sure I can import disthelper
import sys
sys.path.insert(0,'.')
from . import grab_disthelper
# now the real code begins ...
from disthelper.treeops.treeops import *
from disthelper.treeops.fileops import *
from disthelper.treeops.lineops import to_platform_text
p = TreeOptParser('plattext.py',
'Convert a tree to platform-specific text format, with filename matching.')
opts,args = p.parse_argv(sys.argv)
#print opts
#print args
if len(args) == 0:
if len(opts.regexlist) > 0:
# user gave a glob but no targets - add cwd
args.append('.')
else:
# don't know what user wants
p.show_usage()
sys.exit(1)
# make a file transform from the lineop
fileop = FileTransformFromLineOp( to_platform_text )
# now make into a tree operation
treeop = TreeOpFromFileTransform( fileop )
# and run the tree
treeop.runtree(opts, args)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,034 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/test/test_tabify.py |
# test tabify.py and untabify.py
#
# raises exception on error
import sys, os
def test_untabify( txt_tabbed, txt_untabbed ):
open('a.txt','wb').write(txt_tabbed)
os.system('%s %s -w 4 a.txt' % (sys.executable, '../untabtree.py'))
buf = open('a.txt','rb').read()
if buf != txt_untabbed:
raise Exception("untabify FAILED")
else:
print("untabify: OK")
def test_tabify( txt_untabbed, txt_tabbed ):
open('a.txt','wb').write(txt_untabbed)
os.system('%s %s a.txt' % (sys.executable, '../tabtree.py'))
buf = open('a.txt','rb').read()
if buf != txt_tabbed:
raise Exception("tabify FAILED")
else:
print("tabify: OK")
# test 1
tabbed_text_1 = """
def foo( a, b, c ):
\tj = 0
\tfor i in range(10):
\t\tj += i
\treturn j
"""
untabbed_text_1 = """
def foo( a, b, c ):
\x20\x20\x20\x20j = 0
\x20\x20\x20\x20for i in range(10):
\x20\x20\x20\x20\x20\x20\x20\x20j += i
\x20\x20\x20\x20return j
"""
# test 2
tabbed_text_2 = """
def foo( a, b, c ):
\tj = 0
\tfor i in range(10):
\t\tfor k in range(20):
\t\t\tj += i*k
\treturn j
"""
untabbed_text_2 = """
def foo( a, b, c ):
\x20\x20\x20\x20j = 0
\x20\x20\x20\x20for i in range(10):
\x20\x20\x20\x20\x20\x20\x20\x20for k in range(20):
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20j += i*k
\x20\x20\x20\x20return j
"""
# test 3
tabbed_text_3 = """
def foo( a, b, c ):
\tj = 0
\tfor i in range(10):
\t\tfor k in range(20):
\t\t\t j += i*k # 2 extra spaces in this line
\treturn j
"""
untabbed_text_3 = """
def foo( a, b, c ):
\x20\x20\x20\x20j = 0
\x20\x20\x20\x20for i in range(10):
\x20\x20\x20\x20\x20\x20\x20\x20for k in range(20):
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20j += i*k # 2 extra spaces in this line
\x20\x20\x20\x20return j
"""
print(tabbed_text_1)
test_untabify(tabbed_text_1,untabbed_text_1)
test_tabify(untabbed_text_1,tabbed_text_1)
print(tabbed_text_2)
test_untabify(tabbed_text_2,untabbed_text_2)
test_tabify(untabbed_text_2,tabbed_text_2)
print(tabbed_text_3)
test_untabify(tabbed_text_3,untabbed_text_3)
test_tabify(untabbed_text_3,tabbed_text_3)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,035 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/rmfind.py |
#
# Recursively remove specific files.
#
# This is a portable replacement for simple uses of rm `find ..`.
#
# Example:
# POSIX:
# rm `find src -name "*.pyc"`
#
# rmfind:
# python rmfind.py -R "*.pyc" src
# -or-
# python rmfind.py -x pyc src
#
# Use -h for help.
#
#
# frankm@hiwaay.net
#
# make sure I can import disthelper
import sys
sys.path.insert(0,'.')
from . import grab_disthelper
# now the real code begins ...
from disthelper.treeops.treeops import *
from disthelper.misc import unlink
class TreeOpRmFind(TreeOps):
"TreeOps worker"
# - internal API - called by TreeOps -
def process_one_file(self,fullname,opts):
"Called for each matched file."
if opts.verbose:
print("rm",fullname)
unlink(fullname)
def process_one_dir(self,fullname):
"Called for each directory along the way."
pass
def dir_noaccess(self,fullname):
"""Called when access is denied to a directory
(strictly informational, there is no provision to
retry the operation)."""
pass
p = TreeOptParser('rmfind.py',
'Recursively remove files matching a pattern.')
opts,args = p.parse_argv(sys.argv)
#print opts
#print args
if len(args) == 0:
# do NOT automatically add '.' as a target, since this
# is such a destructive command
p.show_usage()
sys.exit(1)
if len(opts.regexlist) == 0:
# safety ... running with no args means "rm *".
print("***")
print("*** No pattern given - I'm assuming you DON'T really mean `rm *` or `rm -rf *`")
print("*** If you really want to do `rm -rf *`, then use -R \"*\"")
print("***")
sys.exit(1)
# Run TreeOpRmFind on the tree
treeop = TreeOpRmFind()
treeop.runtree(opts,args)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,036 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/test/test_tabs_2.py | import sys, os
def test_untabify( txt_tabbed, txt_untabbed ):
open('a.txt','wb').write(txt_tabbed)
os.system('%s %s -w 4 a.txt' % (sys.executable, '../untabtree.py'))
buf = open('a.txt','rb').read()
if buf != txt_untabbed:
raise Exception("untabify FAILED")
else:
print("untabify: OK")
def test_tabify( txt_untabbed, txt_tabbed ):
open('a.txt','wb').write(txt_untabbed)
os.system('%s %s a.txt' % (sys.executable, '../tabtree.py'))
buf = open('a.txt','rb').read()
if buf != txt_tabbed:
raise Exception("tabify FAILED")
else:
print("tabify: OK")
untabbed_1 = """
\"\"\"
Two spaces inside a comment.
\"\"\"
def aaa():
\x20\x20\x20\x20i = 1
"""
tabbed_1 = """
\"\"\"
Two spaces inside a comment.
\"\"\"
def aaa():
\ti = 1
"""
test_tabify(untabbed_1, tabbed_1)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,037 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/util/cmdline.py | #
# BasicOptParser - minimal, portable, optparse substitute.
#
# Works with Python 1.5.2 and up.
#
# written by frankm@hiwaay.net
#
# public API
__all__ = ['BasicOptParser']
# Unfortunately, gnu_getopt is only available on Python 2.2+,
# so have to stick with basic getopt (otherwise, you'd confuse
# users since option parsing would change depending on which
# Python they used to run their scripts). I guess I could do
# the parsing here too, but getopt is 200+ lines of debugged
# code, so use it.
from getopt import getopt
import string
OPT_STRING = 0
OPT_BOOL = 1
OPT_LIST = 2
OPT_INT = 3
class BasicOptParser:
"""Minimal optparse substitute, since optparse requires Python 2.3+.
The API is similar to optparse, but fewer types/options,
and subject to the limitations of getopt (i.e. no multiple instances
of options). Works with Python 1.5.2 and up.
Like optparse, options are set as attributes of an object returned
from process() [see process() below]. The 'attr' argument of
add_stropt/add_boolopt specifies the named attribute (analogous
to the 'dest' argument in optparse.add_option)."""
def __init__(self,prog_name,prog_info):
"""
prog_name & prog_info are used for the help text.
prog_name is the actual program name.
prog_info is a one-liner description.
"""
self.opts = [] # keep a linear list, for help generation
self.optmap = {} # keep a mapping as well for fast lookup
self.prog_name = prog_name
self.prog_info = prog_info
def add_stropt(self, shortopt, longopt, attr, help=''):
"""Add an option taking a string argument, i.e. '-R value'.
Defaults to None if option not used."""
self.add_typeopt( OPT_STRING, shortopt, longopt, attr, help )
def add_listopt(self, shortopt, longopt, attr, help=''):
"""Add an option taking a list of comma-separated strings,
i.e. '-R foo,bar,baz'. Strings are stored as a list.
Defaults to [] if option not used."""
self.add_typeopt( OPT_LIST, shortopt, longopt, attr, help )
def add_boolopt(self, shortopt, longopt, attr, help=''):
"""Add a boolean (on/off) option, i.e. '-r'.
Stores 1 if option used, 0 if not."""
self.add_typeopt( OPT_BOOL, shortopt, longopt, attr, help )
def add_intopt(self, shortopt, longopt, attr, help=''):
"""Add am integer option, i.e. '-r NN'.
Stores None if option not used."""
self.add_typeopt( OPT_INT, shortopt, longopt, attr, help )
def process(self, argv):
"""Returns (opts,args) where attributes are set in 'opts' for
each option (like with optparse), and args is the list of
non-option strings."""
# convert opts to getopt args & call getopt
sshort, llong = self.make_getopt_args()
opts, args = getopt(argv, sshort, llong)
# process getopt results
ropt = BasicOptDataVal(self.opts)
for opt, val in opts:
tup = self.optmap[opt]
if tup[0] == OPT_STRING:
setattr(ropt,tup[3],val)
elif tup[0] == OPT_LIST:
setattr(ropt,tup[3],string.split(val,','))
elif tup[0] == OPT_BOOL:
setattr(ropt,tup[3],1)
elif tup[0] == OPT_INT:
setattr(ropt,tup[3],int(val))
else:
raise Exception("* internal error *")
return ropt,args
def show_usage(self):
"""Print usage/help information"""
print("%s - %s\n\nUsage: %s [options] arg, ...\n" % \
(self.prog_name,self.prog_info,self.prog_name))
for otype, s, l, attr, help in self.opts:
if otype in [OPT_STRING,OPT_INT]:
arg = ' arg'
elif otype == OPT_LIST:
arg = ' arg,...'
else:
arg = ''
hs = ''
if len(s):
hs = hs + '-%s%s, ' % (s,arg)
if len(l):
hs = hs + '--%s%s' % (l,arg)
if len(help):
hs = hs + ':\n\t' + help
print(hs)
# -*- internal API below -*-
def add_typeopt(self, otype, shortopt, longopt, attr, help=''):
if shortopt != '' and len(shortopt) != 1:
raise Exception("shortopt must be a single char, or ''")
tup = (otype, shortopt, longopt, attr, help)
# add to linear list (for help)
self.opts.append( tup )
# add to map, prepending '-' or '--'
# (getopt leaves the -/-- on the options it returns)
if len(shortopt):
self.optmap['-'+shortopt] = tup
if len(longopt):
self.optmap['--'+longopt] = tup
def make_getopt_args(self):
sshort = '' # getopt() short-arg string
llong = [] # getopt() long-arg list
# what we add to arg, based on type
shortadd = {OPT_STRING: ':', OPT_LIST: ':', OPT_BOOL: '',
OPT_INT: ':'}
longadd = {OPT_STRING: '=', OPT_LIST: '=', OPT_BOOL: '',
OPT_INT: '='}
for otype, shortopt, longopt, attr, help in self.opts:
if len(shortopt):
sshort = sshort + shortopt + shortadd[otype]
if len(longopt):
llong.append( longopt + longadd[otype] )
return (sshort, llong)
# this is the datatype returned as the 'opts' object from
# BasicOptParser.process(). Basically just an empty object
# to hold attributes, but performs initialization of defaults.
class BasicOptDataVal:
def __init__(self,opts):
for otype,s,l,attr,h in opts:
# string & int default to None
if otype in [OPT_STRING,OPT_INT]:
setattr(self,attr,None)
# string list defaults to []
elif otype == OPT_LIST:
setattr(self,attr,[])
# bool defaults to 0 (off)
elif otype == OPT_BOOL:
setattr(self,attr,0)
else:
raise Exception("* internal error *")
def __str__(self):
s = 'BasicOptDataVal:\n'
for k,v in list(self.__dict__.items()):
s = s + ' %s: %s\n' % (str(k),str(v))
return s
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,038 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/grab_disthelper.py | import sys
# Enable scripts to run in place without disthelper installed
# Note that I ALWAYS want to find the copy that is in the
# directory above me. The user might have installed disthelper
# in site-packages, and it could be out of date.
import os
# get my directory
p = os.path.split(sys.argv[0])[0]
if len(p): p = os.path.abspath(p)
else: p = os.path.abspath('.')
# insert my directory into sys.path so I can grab other
# modules that live here
sys.path.insert(0,p)
# find disthelper/ parent directory & insert in sys.path
while not os.path.isdir(os.path.join(p,'disthelper')):
p = os.path.split(p)[0]
sys.path.insert(0,p)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,039 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/test/test_textfmt.py |
# test plattext.py and porttext.py
#
# raises exception on error
lines_1 = ['aaa','bbb','ccc','ddd','eee']
import sys, os
def test_plattext( txt_port, txt_plat ):
open('a.txt','wb').write(txt_port)
os.system('%s %s a.txt' % (sys.executable, '../plattext.py'))
buf = open('a.txt','rb').read()
if buf != txt_plat:
print("GOT ",repr(buf))
print("EXPECT ",repr(txt_plat))
raise Exception("plattext FAILED")
else:
print("plattext: OK")
def test_porttext( txt_plat, txt_port ):
open('a.txt','wb').write(txt_plat)
os.system('%s %s a.txt' % (sys.executable, '../porttext.py'))
buf = open('a.txt','rb').read()
if buf != txt_port:
print("GOT ",repr(buf))
print("EXPECT ",repr(txt_port))
raise Exception("plattext FAILED")
else:
print("plattext: OK")
txt_port = '\n'.join(lines_1) + '\n' # make sure last line has '\n'
txt_plat = os.linesep.join(lines_1) + os.linesep # make sure last line as separator
print("TEST %s, %s" % (repr(txt_port),repr(txt_plat)))
test_plattext( txt_port, txt_plat )
test_porttext( txt_plat, txt_port )
# show that 'foreign' line endings are converted to portable
# format correctly
print("TEST %s, %s" % (repr(txt_port),repr(txt_plat)))
txt_port = '\n'.join(lines_1) + '\n'
txt_plat = '\n\r'.join(lines_1)
test_porttext( txt_plat, txt_port )
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,040 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/__init__.py |
# this exists so that other packages can use grab_disthelper if needed
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,041 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/setuplib.py | #
# I found myself writing the same little functions over
# and over in setup.py files, so I've collected them here.
#
# (See also disthelper.misc for more generally useful functions.)
#
# frankm@hiwaay.net
#
from distutils.core import setup, Extension
import os, re
from disthelper.misc import *
from glob import glob
import shutil
from disthelper.treeops import TreeOps, TreeOptParser
# make 'import *' safe
__all__ = ['run','gen_all_swigs','clean_all','C_SWIG','CPP_SWIG',
'zip_current_dir','increment_build_nr']
# file extensions for C/C++ files that SWIG generates
SWIG_c_ext = "c"
SWIG_cpp_ext = "cxx"
def run(cmd,ignore_err=0):
print("Command: ", cmd, ", cwd: ",os.getcwd())
if os.system(cmd) != 0 and not ignore_err:
print("ERROR")
sys.exit(1)
def fix_wrapper(file,modname):
if os.name == 'posix': # fix only required on win32
return
print("Fixing %s ..." % file)
fout = open(file,'at')
# for some reason, SWIG is defining "init_modname", but python
# is looking for "initmodname" ... weird. so, add trampoline
# entry point
fout.write('#ifdef __cplusplus\n')
fout.write('extern "C"\n')
fout.write('#endif\n')
fout.write("SWIGEXPORT(void)init%s(void){SWIG_init();}\n"%modname)
def gen_swig(swig_obj,swig_prog,wrap_ext,swig_opts):
basename = swig_obj.swig_basename
hfile = '%s.h' % basename
ifile = '%s.i' % basename
pyfile = '%s.py' % basename
wrapfile = '%s_wrap.%s' % (basename, wrap_ext)
# if mod .h/.i newer than .py/_wrap, regenerate
if not os.path.isfile(pyfile) or \
not os.path.isfile(wrapfile) or \
mtime(hfile) > mtime(pyfile) or \
mtime(ifile) > mtime(pyfile) or \
mtime(hfile) > mtime(wrapfile) or \
mtime(ifile) > mtime(wrapfile):
print("Creating %s & %s" % (pyfile,wrapfile))
run('%s %s %s' % (swig_prog,swig_opts,ifile))
fix_wrapper(wrapfile,basename)
def gen_c_swig(swig_exe, mod):
gen_swig(mod, swig_exe, SWIG_c_ext,
'-python' + ' '.join(mod.swig_extra_args))
def gen_cpp_swig(swig_exe, mod):
gen_swig(mod, swig_exe, SWIG_cpp_ext,
'-c++ -shadow -python ' + ' '.join(mod.swig_extra_args))
def gen_all_swigs(swig_exe, modlist):
for mod in modlist:
if isinstance(mod, C_SWIG):
gen_c_swig(swig_exe, mod)
elif isinstance(mod, CPP_SWIG):
gen_cpp_swig(swig_exe, mod)
else:
print("********* UNKNOWN SWIG TYPE *************")
raise Exception()
def clean_all_swigs(modlist):
for mod in modlist:
for name in ['%s.py' % mod.swig_basename,
'%s_wrap.%s' % (mod.swig_basename,SWIG_c_ext),
'%s_wrap.%s' % (mod.swig_basename,SWIG_cpp_ext)]:
if os.path.isfile(name):
print('del %s' % name)
unlink(name)
def clean_all(ext_list, extra_patt=[]):
clean_all_swigs(ext_list)
shutil.rmtree('build',1)
rmfiles = []
for patt in ['*.pyc','*~','*.so','*.pyd',
'*.o','core','core.*'] + extra_patt:
rmfiles.append( glob(patt) )
for name in rmfiles:
unlink(name)
class SWIG_Extension(Extension):
def __init__(self,name,sources,libs=[],incdirs=[],libdirs=[],
define_macros=[],swig_args=[]):
Extension.__init__(self,name=name,sources=sources,
libraries=libs,include_dirs=incdirs,
library_dirs=libdirs,define_macros=define_macros)
self.swig_basename = name
self.swig_extra_args = swig_args
class C_SWIG(SWIG_Extension):
"""A C extension module using SWIG.
Expects three files in dir:
name.c = Module source
name.h = Module header
name.i = SWIG interface for module.
extra_sources is a list of filenames to include in
the compilation."""
def __init__(self,name,extra_sources=[],define_macros=[],
swig_args=[]):
SWIG_Extension.__init__(self,name=name,
sources=['%s.c' % name,
'%s_wrap.%s' % \
(name,SWIG_c_ext)] + extra_sources,
define_macros=define_macros,
swig_args=swig_args)
class CPP_SWIG(SWIG_Extension):
"""A C++ extension module using SWIG.
Expects three files in dir:
name.cpp = Module source
name.h = Module header
name.i = SWIG interface for module.
extra_sources is a list of filenames to include in
the compilation."""
def __init__(self,name,extra_sources=[],extra_libs=[],define_macros=[],
incdirs=[],libdirs=[],swig_args=[]):
libs = extra_libs
if os.name == 'posix':
libs.append('stdc++')
SWIG_Extension.__init__(self,name=name,
sources=['%s.cpp' % name,
'%s_wrap.%s' % \
(name,SWIG_cpp_ext)] + extra_sources,
incdirs=incdirs,
libs=libs,
libdirs=libdirs,
define_macros=define_macros,
swig_args=swig_args)
# I don't use gnosis.pyconfig here because I want disthelper
# to be able to be used completely independently
try:
import zipfile, zlib
HAVE_ZIPSTUFF = 1
except: HAVE_ZIPSTUFF = 0
try:
import tarfile
HAVE_TAR = 1
except: HAVE_TAR = 0
def _error_no_zip():
print("** Sorry, this version of Python cannot create .zip files.")
def _error_no_tar():
print("** Sorry, this version of Python cannot create .tar files.")
class zip_tree(TreeOps):
"Worker class for 'zip -r' functionality."
def __init__(self, outname, add_prefix=None, exclude_re_list=[],
overwrite=1):
TreeOps.__init__(self)
if overwrite:
if os.path.isfile(outname):
unlink(outname)
mode = 'w'
else:
mode = 'a'
self.zipname = outname
self.zf = zipfile.ZipFile(outname, mode, zlib.DEFLATED)
self.add_prefix = add_prefix
self.exclude_re_list = exclude_re_list
def process_one_file(self,name,opts):
#print "Adding",name
# don't add zipfile to self
if samepath(name, self.zipname):
return
for r in self.exclude_re_list:
if r.match(name):
#print "*** EXCLUDE ",name
return # matched exclusion list
if self.add_prefix is None:
self.zf.write(name)
else:
self.zf.write(name,os.path.normpath(os.path.join(self.add_prefix,name)))
def process_one_dir(self,name):
#print "DIR ",name
pass
def on_end_processing(self):
self.zf.close()
class tar_tree(TreeOps):
"Worker class for tar functionality."
def __init__(self, outname, mode, add_prefix=None, exclude_re_list=[],
overwrite=1):
TreeOps.__init__(self)
if overwrite:
if os.path.isfile(outname):
unlink(outname)
self.tarname = outname
self.tarfile = tarfile.open(outname, mode)
self.add_prefix = add_prefix
self.exclude_re_list = exclude_re_list
def process_one_file(self,name,opts):
#print "Adding",name
# don't add tarfile to self
if samepath(name, self.tarname):
return
for r in self.exclude_re_list:
if r.match(name):
#print "** EXCLUDE",name
return # matched exclusion list
if self.add_prefix is None:
self.tarfile.add(name)
else:
self.tarfile.add(name, os.path.normpath(
os.path.join(self.add_prefix,name)))
def process_one_dir(self,name):
pass
def on_end_processing(self):
self.tarfile.close()
def _zip_current_dir( zfilename, add_prefix=None, exclude_re_list=[] ):
"""
Zip up the current directory, just as if you had
done 'zip -r zfilename *' (except it will grab all
.dotfiles as well).
Does NOT require a 'zip' program to be installed.
zfilename = Output filename.
add_prefix = Prefix to prepend to all filenames added.
If None, names will not be modified.
(This is useful when you want a toplevel directory
to appear in the zipfile, without having to
chdir('..') and zipping the directory from there.
add_prefix will be os.path.joined with each filename.)
exclude_re_list = Files will be excluded if they match one of these
(compiled) regexes.
"""
top = TreeOptParser('dummy','dummy')
# call as if used passed args 'prog -R"*,.*" .'
# [must glob with '*' and '.*' at each level]
opts,args = top.parse_argv(['dummy','-R*,.*','.'])
zt = zip_tree(zfilename,add_prefix,exclude_re_list)
zt.runtree(opts,args)
if HAVE_ZIPSTUFF:
zip_current_dir = _zip_current_dir
else:
zip_current_dir = _error_no_zip
# convenience
def tar_bz2_current_dir( bz2filename, add_prefix=None,
exclude_re_list=[] ):
tar_current_dir( bz2filename, 'w:bz2', add_prefix, exclude_re_list )
def tar_gz_current_dir( gzfilename, add_prefix=None,
exclude_re_list=[] ):
tar_current_dir( gzfilename, 'w:gz', add_prefix, exclude_re_list )
def _tar_current_dir( tarfilename, mode, add_prefix=None,
exclude_re_list=[] ):
"""
Tar up the current directory, just as if you had
done 'tar . - |(bzip2|gzip) -9 > zfilename' (except it will grab all
.dotfiles as well).
Does NOT require tar, gzip, or bzip2 program to be installed.
tarfilename = Output filename.
mode = 'w:gz' or 'w:bz2', to specify gzip/bzip2
add_prefix = Prefix to prepend to all filenames added.
If None, names will not be modified.
(This is useful when you want a toplevel directory
to appear in the tarfile, without having to
chdir('..') and tarring the directory from there.
add_prefix will be os.path.joined with each filename.)
exclude_re_list = Files will be excluded if they match one of these
(compiled) regexes.
"""
top = TreeOptParser('dummy','dummy')
# call as if used passed args 'prog -R"*,.*" .'
# [must glob with '*' and '.*' at each level]
opts,args = top.parse_argv(['dummy','-R*,.*','.'])
t = tar_tree(tarfilename,mode,add_prefix,exclude_re_list)
t.runtree(opts,args)
if HAVE_TAR:
tar_current_dir = _tar_current_dir
else:
tar_current_dir = _error_no_tar
def increment_build_nr():
#tempdir = mkdtemp()
tempdir = make_tempdir()
tempfile = os.path.join(tempdir,'v.out')
fin = open('version.py','r')
fout = open(tempfile,'w')
while 1:
line = fin.readline()
if len(line) == 0:
break
m = re.match(r'BUILD_NR\s*=\s*([0-9]+)',line)
if m:
fout.write('BUILD_NR = %d\n' % (int(m.group(1)) + 1))
else:
# keep version.py in unix format
while len(line) and line[-1] in '\r\n':
line = line[:-1]
fout.write('%s\n' % line)
del fin
del fout
unlink('version.py')
shutil.copy(tempfile,'version.py')
unlink(tempfile)
os.rmdir(tempdir)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,042 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/attic/test-tabber.py |
#
# Test program for tabber.py
#
# Note, it is very important NOT to tabify/untabify/reformat
# the input files (t*.txt) -- the test code expects them
# to have either tabs or spaces, and changing them will
# give false failures.
#
#
# frankm@hiwaay.net
#
from difflib import Differ, unified_diff
import string
import os, sys
from shutil import copy as filecopy
def test_tabber( filename, test1, test2 ):
name2 = '%s.a' % filename
name3 = '%s.b' % filename
# copy name -> name.a and run test1 on name.a
filecopy( filename, name2 )
cmd = 'python tab.py %s %s' % (test1,name2)
print("Command: ",cmd)
os.system(cmd)
# load both files
lines_a = open( filename, 'rb' ).readlines()
lines_b = open( name2, 'rb' ).readlines()
# now, a straight diff should show changes
df = list(unified_diff(lines_a,lines_b))
if len(df) == 0:
print("*** ERROR - expected a change when tabifying %s" % filename)
sys.exit(1)
#else:
# print ''.join(df)
# however, after lstripping each line, they should be
# equal again
lines_a = list(map( string.lstrip, lines_a ))
lines_b = list(map( string.lstrip, lines_b ))
df = list(unified_diff(lines_a,lines_b))
if len(df) != 0:
print("*** ERROR - not the same after %s %s" % (test1,filename))
else:
print('OK - leading whitespace changed as expected.')
# copy name -> name.b and run test2 on name.b
filecopy(name2, name3)
cmd = 'python tab.py %s %s' % (test2,name3)
print("Command: ",cmd)
os.system(cmd)
# now original & name3 should be identical
lines_a = open( filename, 'rb' ).readlines()
lines_b = open( name3, 'rb' ).readlines()
df = list(unified_diff(lines_a,lines_b))
if len(df) != 0:
print("*** ERROR - lossage after %s %s" % (test2,filename))
sys.exit(1)
else:
print("OK - no changes after %s -> %s" % (test1,test2))
# t1 has spaces, so run --tabify first
test_tabber('t1.txt','--tabify','--untabify')
# t2 has tabs, so run --untabify first
test_tabber('t2.txt','--untabify','--tabify')
# t3 has spaces, so run --tabify first
test_tabber('t3.txt','--tabify','--untabify')
# t4 has spaces, so run --tabify first
test_tabber('t4.txt','--tabify','--untabify')
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,043 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/difftree.py |
# diff two directories, using TreeOps
#
# Should give the same output as "diff -r", although the
# ordering of files might be different.
#
# frankm@hiwaay.net
# make sure I can import disthelper
import sys
sys.path.insert(0,'.')
from . import grab_disthelper
# now the real code begins ...
from disthelper.treeops.treeops import *
import os, re
from stat import *
from difflib import unified_diff
from disthelper.misc import mtime
import time
class TreeDiffer(TreeOps):
"""Diff two trees using TreeOps"""
def __init__(self):
TreeOps.__init__(self)
def run(self, argv):
# parse standard tree options (-r, -R, -x, etc.)
p = TreeOptParser('difftree.py','Diff two directories.')
opts,args = p.parse_argv(argv)
if len(args) != 2 or \
not os.path.isdir(args[0]) or \
not os.path.isdir(args[1]):
print("** Must give two directories to diff.")
p.show_usage()
sys.exit(1)
# remember which files/dirs I couldn't access
self.nofile = []
self.nodir = []
# I'm going to chdir() to the first dir - this way
# I know that all paths I get will be of the form './ ...'
# which I can just tack on to the second directory root.
# However, I need to remember the relative paths, for
# display purposes
self.first_dir_rel = args[0]
self.second_dir_rel = args[1]
# remember the absolute path of the second dir
self.second_dir_abs = os.path.abspath(args[1])
# save current dir so I can restore at the end
self.start_dir = os.getcwd()
os.chdir(self.first_dir_rel)
# now args will just be '.'
# walk the tree with globbing, etc.
self.runtree(opts,['.'])
# tell user which files/dirs I couldn't access
if len(self.nofile):
print("I could not access these files:")
for f in self.nofile:
print(" %s" % f)
if len(self.nodir):
print("I could not access these directories:")
for d in self.nodir:
print(" %s" % d)
# - internal API - called as the tree is walked -
def process_one_file(self,fullname,opts):
# name for open()
name_open1 = fullname
# name for display
name_disp1 = os.path.join(self.first_dir_rel,fullname[2:])
# name for open()
name_open2 = os.path.join(self.second_dir_abs,fullname[2:])
# name for display
name_disp2 = os.path.join(self.second_dir_rel,fullname[2:])
#print "diff %s, %s," % (name_disp1,name_disp2)
# how to open a file in binary mode?
if os.name == 'posix':
mode = 'r'
else:
mode = 'rb'
if not os.path.isfile(name_open1) or \
not os.path.isfile(name_open2):
return # missing file - will be caught in process_one_dir()
buf_1 = open(name_open1,mode).read()
buf_2 = open(name_open2,mode).read()
binary_regex = r'[^\x09\x0a\x0d\x20-\x7f]'
# is either file binary?
if re.search(binary_regex,buf_1) or \
re.search(binary_regex,buf_2):
# yes, just display if it differs
if buf_1 != buf_2:
print("Files %s and %s differ." % \
(name_disp1,name_disp2))
else:
lines_1 = buf_1.splitlines()
if not len(lines_1):
lines_1 = ['']
lines_2 = buf_2.splitlines()
if not len(lines_2):
lines_2 = ['']
diffs = list(unified_diff(lines_1,lines_2,
name_disp1,name_disp2,
time.ctime(mtime(name_open1)),
time.ctime(mtime(name_open2))))
if len(diffs):
for line in diffs:
while line[-1] in '\r\n':
line = line[:-1]
print(line)
def process_one_dir(self,fullname):
# at each dir, compare a list of names to see if
# any are unique to either
names1 = os.listdir(fullname)
names2 = os.listdir(os.path.join(self.second_dir_abs,fullname[2:]))
for name in names1:
if name not in names2:
print("Only in %s: %s" % (fullname,name))
for name in names2:
if name not in names1:
print("Only in %s: %s" % (os.path.join(self.second_dir_abs,
fullname[2:]), name))
def dir_noaccess(self,fullname):
self.nodir.append(fullname)
def on_end_processing(self):
# restore initial directory
os.chdir(self.start_dir)
t = TreeDiffer()
t.run(sys.argv)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,044 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/attic/make_mingw_pylibs.py |
#
# In order to use mingw (or cygwin with 'gcc -mno-cygwin')
# to create native win32 extension modules, you must create
# import libraries for the Python DLLs that gcc can use.
#
# This script automates the procedure found at:
# http://sebsauvage.net/python/mingw.html
#
# [excluding the parts about modifying distutils]
#
#
# frankm@hiwaay.net
#
# This is compatible back to Python 1.5.2, hence the
# old syntax here and there.
from disthelper.find_python import *
import disthelper.find_python as find_python
from disthelper.misc import *
import sys, string, os
from shutil import copy2
try:
# _winreg requires Python 2.0+, so make it optional
from winreg import OpenKey, HKEY_LOCAL_MACHINE, EnumKey, \
QueryInfoKey, QueryValueEx
HAVE_WIN32_REGISTRY = 1
except:
HAVE_WIN32_REGISTRY = 0
try:
# use win32all, if installed
import win32api
HAVE_WIN32_API = 1
except:
HAVE_WIN32_API = 0
def make_mingw_lib_from_dll( destname, dllname ):
"""Take a win32 DLL and create a lib*.a file suitable
for linking with mingw.
dllname is the full pathname of the .DLL to convert.
destname is the name for the converted mingw library.
This is an automation of the procedure found at:
http://sebsauvage.net/python/mingw.html"""
# make sure necessary progs are available
dlltool = find_dlltool_or_bail()
pexports = find_pexports_or_bail()
print("Converting %s -> %s" % (dllname,destname))
savedir = os.getcwd()
# do the work in tempdir and copy resulting .a to correct place
tempdir = make_tempdir()
os.chdir(tempdir)
copy2( dllname, tempdir )
# create .def file
cmd = "%s %s > temp.def" % \
(pexports, os.path.basename(dllname))
os.system(cmd)
# create .a
cmd = "%s --dllname %s --def temp.def --output-lib %s" % \
(dlltool, os.path.basename(dllname), destname)
os.system(cmd)
# remove temporary files & tempdir
unlink('temp.def')
unlink(os.path.basename(dllname))
os.chdir(savedir)
os.rmdir(tempdir)
def get_winroot_from_registry():
"""Get the Windows directory, e.g. c:\WINDOWS, from the
registry, or None if not found or error."""
if HAVE_WIN32_REGISTRY == 0:
return None
try:
# SystemRoot is found here on win9x machines
topkey = OpenKey(HKEY_LOCAL_MACHINE,"SOFTWARE\\Microsoft\\Windows\\CurrentVersion")
path,typ = QueryValueEx(topkey,'SystemRoot')
return path
except:
pass
try:
# On NT/2k/etc., SystemRoot is under 'Windows NT' instead
topkey = OpenKey(HKEY_LOCAL_MACHINE,"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion")
path,typ = QueryValueEx(topkey,'SystemRoot')
return path
except:
pass
# maybe it doesn't exist under some versions of win32
return None
def get_dll_search_path():
"""Generate a list of all the places where the pythonNN.dll
files might be stored. This roughly duplicates the standard
win32 rules. There may be a lot of duplicates, but this routine
attempts to be comprehensive."""
# searchlist contains full paths
searchlist = []
# check with win32api, if present
if HAVE_WIN32_API:
# p will be e.g. c:\windows\system
p = win32api.GetSystemDirectory()
searchlist.append(p)
# on NT, p will contain SYSTEM32, so add SYSTEM as well
p = os.path.join(os.path.dirname(p), 'SYSTEM')
searchlist.append(p)
# add, e.g. c:\windows
searchlist.append( win32api.GetWindowsDirectory() )
# generate some root paths, then add SYSTEM & SYSTEM32 to each
rootlist = []
# check the registry
path = get_winroot_from_registry()
if path is not None:
rootlist.append( path )
# add PATH directories
rootlist.extend( string.split( os.environ['PATH'], os.pathsep ))
# now, for each, add SYSTEM & SYSTEM32, in the hope
# that one of the paths is, e.g. c:\windows
for path in rootlist:
searchlist.append( path )
searchlist.append( os.path.join(path,'SYSTEM') )
searchlist.append( os.path.join(path,'SYSTEM32') )
# add the .exe directory
searchlist.append( os.path.dirname( os.path.abspath( sys.executable )))
# add the cwd
searchlist.append( os.getcwd() )
return searchlist
def find_dlltool_or_bail():
"Find dlltool.exe, or bail out."
name = find_exe_in_path('dlltool')
if name is None:
print("***")
print("*** ERROR - dlltool.exe not found in PATH.")
print("***")
print("*** Make sure you have installed gcc from either")
print("*** cygwin or mingw.")
print("***")
sys.exit(1)
else:
return name
def find_pexports_or_bail():
"Find pexports.exe or bail out."
name = find_exe_in_path('pexports')
if name is None:
print("***")
print("*** ERROR - pexports.exe not found in PATH.")
print("*** Please download it from:")
print("*** http://starship.python.net/crew/kernr/mingw32/pexports-0.42h.zip")
print("***")
print("*** And place 'pexports.exe' in your PATH.")
print("***")
sys.exit(1)
else:
return name
def find_python_win32_dll( ver ):
"""Find the win32 .dll matching a given version of Python.
ver is a 3-element tuple like (2,1,3)
Returns full path to .dll, or None if not found."""
name = 'python%d%d.dll' % (ver[0],ver[1])
for path in get_dll_search_path():
full = os.path.join( path,name )
if os.path.isfile(full):
return full
return None
if len(sys.argv) < 2:
print("\nUsage: make_mingw_pylibs.py [--all] [version]")
print("")
print(" Creates MinGW libraries (.a) from the Python .DLLs.")
print(" This allows you to use MinGW (or cygwin, with 'gcc -mno-cygwin')")
print(" to create Python extension modules that will run on any win32")
print(" system (i.e. cygwin not needed).")
print("")
print(" The generated .a files are automatically installed into the")
print(" correct directory (i.e. PYTHONROOT\\libs)")
print("")
print("You can pass either:")
print("")
print(" --all: Find and convert all Python DLLs on the host system.")
print("")
print(" version: convert DLLs for a specific version only (e.g. 2.3)")
print("")
sys.exit(1)
if sys.argv[1] == '--all':
thelist = get_python_verlist()
else:
try:
pyver = find_python.parse_version_string(sys.argv[1])
except:
print("")
print("** Sorry, can't parse version string '%s'." % sys.argv[1])
print("** You must pass a string like 2, 2.1, 2.1.3.")
print("")
sys.exit(1)
# the DLLs are only specified to minor version (i.e. 2.1)
# so only need to match that much of the string
exe = find_py_minor(pyver)
if exe is None:
print("")
print("** Sorry, no such Python version %s" % sys.argv[1])
print("")
sys.exit(1)
thelist = [(exe,pyver[:2]+[0])]
had_failure = 0
for exe,ver in thelist:
dll = find_python_win32_dll( ver )
print("%s, version = %d.%d.%d, dll = %s" % \
(exe,ver[0],ver[1],ver[2],dll))
# the .a file will be placed in $PYTHONDIR\libs
libdir = os.path.join( os.path.dirname(exe), 'libs' )
if os.access(libdir, os.W_OK) == 0:
print("**** WARNING ****")
print("**** Can't write to %s, skipping!" % libdir)
had_failure = 1
else:
libname = os.path.splitext(os.path.basename(dll))[0]
libname = os.path.join( libdir,"lib%s.a" % libname)
make_mingw_lib_from_dll(libname,dll)
if had_failure:
print("*** WARNING ***")
print("*** There were one or more errors in the conversion process.")
print("*** Recommend you correct the errors and try again.")
else:
print("**")
print("** Conversion complete")
print("**")
print("** Now, to build native win32 extension modules, all you")
print("** have to do is call your setup.py like this:")
print("**")
print("** python setup.py -cmingw32")
print("**")
print("** [This works for both MinGW & cygwin with 'gcc -mno-cygwin']")
print("**")
print("** NOTE: If you let distutils run SWIG for you, and you")
print("** are using Python <= 2.2, read the notes at:")
print("**")
print("** http://sebsauvage.net/python/mingw.html")
print("**")
print("** On minor changes you'll need to make to distutils.")
print("**")
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,045 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/attic/cyg4win32.py | # this is cyg4win32.py from gnosis.disthelper
#
# Do NOT edit/delete the first line of this file!! (it is used
# as a sanity check to prevent accidentally overwriting files)
#
# This is a lightly modified version of the Mingw32CCompiler.
#
# It is meant for using cygwin's 'gcc -mno-cygwin' to compile
# Python extensions that will run under a win32-native (VC++
# compiled) version of Python [note: I believe that is SUPPOSED
# to work out of the box using -cmingw32, but it must be somewhat
# out of date, since I had to make the hacks here.]
#
# To use it, call your setup.py like this:
#
# python setup.py build -ccyg4win32
#
# This file must be installed in distutils (alongside ccompiler.py)
# or distutils can't find it. ugh.
# There are some hacks here that seem out of place, however, since
# this class is meant for a specific compiler on a specific platform,
# having the hacks here prevents ugliness in setup.py
#
# frankm@hiwaay.net
#
from distutils.cygwinccompiler import Mingw32CCompiler
import re
class Cygwin4Win32Compiler(Mingw32CCompiler):
compiler_type = 'cyg4win32'
def __init__(self, verbose=0, dry_run=0, force=0 ):
Mingw32CCompiler.__init__(self,verbose,dry_run,force)
def check_for_gcc(self, where):
# ensure gcc has been set for these attributes ...
checklist = ['compiler_cxx','compiler_so','linker_exe',
'linker_so','compiler']
for k in checklist:
if getattr(self,k)[0] != 'gcc':
print("**** WARNING in %s: Expecting gcc for '%s', got '%s'" % \
(where,k,getattr(self,k)))
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
# make sure gcc is being used
self.check_for_gcc('_compile')
Mingw32CCompiler._compile(self, obj, src, ext, cc_args, extra_postargs,
pp_opts)
def link (self, target_desc, objects, output_filename, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
# for some reason, this is being set to 'cc', which is wrong
self.compiler_cxx = ['gcc','-mno-cygwin']
# make sure gcc is being used
self.check_for_gcc('_link')
if re.search(r'c\+\+',target_lang,re.I):
# stdc++ isn't being included since platform != posix
if 'stdc++' not in libraries:
libraries.append('stdc++')
Mingw32CCompiler.link(self,target_desc,objects,output_filename,
output_dir,libraries,library_dirs,
runtime_library_dirs,export_symbols,
debug,extra_preargs,extra_postargs,
build_temp,target_lang)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,046 | MEERQAT/Gnosis_Utils | refs/heads/master | /setup.py | #
# Installing Gnosis_Utils:
#
# python setup.py install
#
# Or, if you have multiple versions of Python installed,
# you can install for all of them at once using:
#
# python setup.py install_all
#
# Other commands available (for maintainers):
#
# # build but don't install
# python setup.py build
#
# # create source distribution (placed in dist/)
# python setup.py makedist
#
# # clean out junk files
# python setup.py clean
#
# You should *not* run any other distutils commands, since
# there is extra magic that needs to be done.
# [FYI, this file is called 'setup.py' because that
# is what users traditionally expect, but also because
# that name is hardcoded a few places in distutils.]
#
# --frankm@hiwaay.net
# put disthelper & gnosis in path so they're usable before installation
import sys
sys.path.insert(0,'.')
from distutils.core import setup
import os, re, shutil
from glob import glob
from disthelper.misc import unlink, rmtree, make_tempdir
from disthelper.setuplib import zip_current_dir, tar_bz2_current_dir, \
tar_gz_current_dir
# use string functions instead of string methods, to maintain
# compatibility w/Python 1.5 (even though PyXML has dropped support
# for Python < 2.0, so the XML portions of gnosis won't work, unless
# you find an old PyXML)
import string
import gnosis.pyconfig
# sanity check for botched Python installations
# (probably means that pyexpat wasn't built)
def is_DOM_okay():
s = '<?xml version="1.0"?><P hello="1"></P>'
import io
try:
from xml.dom import minidom
# test 1: will it parse at all?
doc = minidom.parse(io.StringIO(s))
# test 2: does getAttribute on non-existant nodes crash?
# (xml.pickle DOM parser assumes this returns '')
a = doc.firstChild.getAttribute('bogus')
return 1
except:
# need a newer version of PyXML
return 0
def remake_MANIFEST():
print("Creating MANIFEST ...")
# create empty MANIFEST
unlink('MANIFEST')
open('MANIFEST','w').write('')
# create real MANIFEST, including MANIFEST in listing
# (yes, it's weird, but works best this way)
os.system('%s setup.py sdist --manifest-only' % sys.executable)
if not os.path.exists('MANIFEST'):
# should only happen in the dev environment if you do a 'make clean'
# without remaking the sdist
remake_MANIFEST()
def ensure_at_toplevel():
"Make kinda sure I'm at the toplevel."
files = ['gnosis','MANIFEST','disthelper']
for file in files:
if not os.path.exists(file):
print("*** Hey, you're supposed to run this from the toplevel.")
sys.exit(1)
# just check globally for now
ensure_at_toplevel()
# ensure that an entire path exists
# os.makedirs doesn't seem to work as I expected ... :-(
# [I think this was a bug in earlier Python versions, but anyways
# it's well tested now and doesn't hurt anything. --fpm]
def makepath(path):
# path is from MANIFEST, so we know it'll have forward slashes
# (see distutils docs)
parts = string.split(path,'/')
partial = ''
for part in parts:
partial = os.path.join(partial,part)
try: os.mkdir(partial)
except: pass
# try running setup(), returning 1 on success, 0 on failure
def do_setup():
import gnosis.version # only import when needed!
try:
pkglist = ["gnosis",
"gnosis.anon",
"gnosis.magic",
"gnosis.util",
"gnosis.util.convert",
"gnosis.xml",
"gnosis.xml.objectify",
"gnosis.xml.objectify.doc",
"gnosis.xml.objectify.test",
"gnosis.xml.pickle",
"gnosis.xml.pickle.ext",
"gnosis.xml.pickle.util",
"gnosis.xml.pickle.doc",
"gnosis.xml.pickle.test",
"gnosis.xml.pickle.parsers"]
# see if this Python has the capability to run certain
# packages.
# (this is really weird ... I have to remove them
# from the pkglist *AND* delete them in copy_extra_files.
# I don't understand distutils sometimes ...)
if gnosis.pyconfig.Have_Generators():
# need generators for relax
pkglist.append( "gnosis.xml.relax" )
if gnosis.pyconfig.Have_ListComprehensions():
# need list comprehensions here
pkglist.append( "gnosis.xml.validity" )
kwargs = {
# Note: PKG-INFO is autogenerated & placed in the sdist
# using info from this dictionary, as follows:
#
# name -> Name
# version -> Version
# description -> Summary
# url -> Home-page
# author -> Author
# author_email -> Author-email
# license -> License
# long_description -> Description
# platforms -> Platform (multiple lines if needed)
'name' : gnosis.version.PKGNAME,
'version': gnosis.version.VSTRING,
'description': "Modules and Utilities for XML Documents and Other Miscellany",
'long_description': "Include modules: xml.pickle, xml.objectify, xml.indexer, xml.validity, and friends.",
'long_description': "Include modules: xml.pickle, xml.objectify, "\
"indexer, xml.indexer, xml.validity, gnosis.anon, "\
"multimethods, magic, and friends.",
'author': "Gnosis Software",
'author_email': "mertz@gnosis.cx",
'url': "http://gnosis.cx/download/",
'packages': pkglist,
'license': "Public Domain"
}
# Python 2.0 dies if you pass a 'platforms' arg, but if we
# leave it out, we get UNKNOWN in PKG-INFO. Our sdist will
# always be built with Python >= 2.1, so it's not a problem.
# This keeps us from bombing when installing on Python 2.0.
if not hasattr(sys,'version_info') or \
(int(sys.version_info[0]) >= 2 and \
int(sys.version_info[1]) >= 1):
kwargs['platforms'] = 'Any'
# Python <= 2.2 misspells 'license' (deprecation warning in 2.3+)
if not hasattr(sys,'version_info') or \
not (int(sys.version_info[0]) >= 2 and \
int(sys.version_info[1]) >= 3):
kwargs['licence'] = kwargs['license']
del kwargs['license']
setup(*[], **kwargs)
rval = 1
except Exception as exc:
print("*** ERROR: %s" % str(exc))
rval = 0
del gnosis.version
return rval
# by default, we copy EVERYTHING from MANIFEST to build/lib*.
# this has the nice side-effect of converting all text files
# to the platform text format. setting this to zero means we
# only copy files NOT already under build/lib*
copy_all_files = 1
def copy_extra_files():
destroot = glob(os.path.join('build','lib'))[0]
# go through MANIFEST to see what is supposed to be under build directory
print("Copying extra files to %s ..." % destroot)
f = open('MANIFEST','r')
for srcfile in f.readlines():
srcfile = string.rstrip(srcfile) # remove newline char(s)
dest = os.path.join(destroot,os.path.normpath(srcfile))
if not os.path.exists(dest) or copy_all_files:
dir,file = os.path.split(dest)
if file == 'setup.py':
continue # skip
makepath(dir)
open(dest,'w').write(open(srcfile,'r').read())
if not gnosis.pyconfig.Have_Generators():
# see note about pkglist in do_setup() ... have to delete it here too
rmtree(os.path.join(destroot,'gnosis','xml','relax'))
if not gnosis.pyconfig.Have_ListComprehensions():
# ditto re: do_setup()
rmtree(os.path.join(destroot,'gnosis','xml','validity'))
if len(sys.argv) < 2:
import gnosis.version # import only when needed!
print("\nGnosis Utilities %s installation." % gnosis.version.VSTRING)
print("")
print("Usage:")
print("")
print(" # Install, for a single version of Python")
print(" python setup.py install")
print("")
print(" # Install for ALL Python versions on this machine")
print(" python setup.py install_all")
print("")
print("** Maintainer commands **")
print("")
print(" # create source distribution (in dist/)")
print(" python setup.py makedist")
print("")
print(" # clean out junk files")
print(" python setup.py clean")
print("")
print("** DO NOT run any other distutils commands **")
print("")
del gnosis.version
sys.exit(1)
if 'build' in sys.argv:
# when building for multiple Pythons, it isn't smart
# enough to seperate the builds, so manually delete
# the build/ tree before beginning
rmtree('build')
# fall through to do_setup() below
if 'clean' in sys.argv:
print("Cleaning tree ...")
# remove .pyc, *~, coredumps, .md5envelope files
patts = "*.pyc,*~,core,*.md5envelope"
os.system('%s disthelper/scripts/rmfind.py -v -R "%s" .' % \
(sys.executable,patts))
# rm -rf build & dist
rmtree('build')
rmtree('dist')
# remove some test output files that may be around
gxpt = os.path.join('gnosis','xml','pickle','test')
names = glob(os.path.join(gxpt,'TESTS.OUT-*'))
for name in names:
unlink(name)
unlink(os.path.join(gxpt,'aaa.xml'))
unlink('MANIFEST')
unlink('PKG-INFO')
# setup will complain if MANIFEST missing, so just regenerate
remake_MANIFEST()
sys.exit(0)
if 'taball' in sys.argv:
"Convert spaces->tabs in all .py files"
#os.system('%s disthelper/scripts/tabtree.py -v -x py -r .' % \
# sys.executable)
sys.exit(0)
if 'formatdist' in sys.argv:
"Format files for distribution."
# convert tabs to spaces in all .py files to be politically correct :-)
print("Untabifying tree ...")
os.system('%s disthelper/scripts/untabtree.py -w 4 -x py -r .' % \
sys.executable)
# put sources in unix textfile format (\n), so auto-conversion
# to platform format works (in setup.py)
print("Converting text format ...")
os.system('%s disthelper/scripts/porttext.py -x py -r .' % sys.executable)
sys.exit(0)
if 'makedist' in sys.argv:
"Create source distribution"
# clean tree
os.system('%s setup.py clean' % sys.executable)
# format text files
os.system('%s setup.py formatdist' % sys.executable)
# create MANIFEST
remake_MANIFEST()
rmtree('dist')
# actually, we don't want the sdist anymore -- the 'master'
# archive has everything (primarily due to the fact that
# we DON'T want to install disthelper, but it still needs
# to be in the sdist, and make sdist won't do that
# the ONLY think needed from the sdist is PKG-INFO.
# Could just write it ourselves, but lets do the cheesy
# thing and grab it from the sdist :-)
os.system('%s setup.py sdist --keep-temp' % sys.executable)
import gnosis.version # only import when needed!
rawdir = os.path.join('%s-%s' % (gnosis.version.PKGNAME,
gnosis.version.VSTRING))
if not os.path.isdir(rawdir):
print("*****************************************")
print("* AACK! Where is sdist rawdir?")
print("*****************************************")
raise Exception("Punt")
buf = open(os.path.join(rawdir,'PKG-INFO'),'r').read()
del gnosis.version
open('PKG-INFO','w').write(buf)
print("Deleting %s" % rawdir)
rmtree( rawdir )
# start clean again ... (sdist has same name as the .tgz
# I'm going to create!)
print("Deleting dist/")
rmtree('dist')
# create dist (which *includes* MANIFEST just created)
# (yes, its gross, but required as far as I can tell)
rmtree('dist')
os.system('%s setup.py master' % sys.executable)
sys.exit(0)
if 'master' in sys.argv:
import gnosis.version # only as needed!
myver = gnosis.version.VSTRING
# -- create archives, multiple formats for user convenience --
tempdir = make_tempdir()
excludes = [re.compile('.+\.pyc$')]
# .zip file
zipname = os.path.join(tempdir,'Gnosis_Utils-%s.zip' % myver)
print("Creating %s ..." % os.path.basename(zipname))
zip_current_dir( zipname, 'Gnosis_Utils-%s' % myver, excludes )
# .tar.gz
targz_name = os.path.join(tempdir,'Gnosis_Utils-%s.tar.gz' % myver)
print("Creating %s ..." % os.path.basename(targz_name))
tar_gz_current_dir( targz_name, 'Gnosis_Utils-%s' % myver, excludes )
# .tar.bz2
tarbz2_name = os.path.join(tempdir,'Gnosis_Utils-%s.tar.bz2' % myver)
print("Creating %s ..." % os.path.basename(tarbz2_name))
tar_bz2_current_dir( tarbz2_name, 'Gnosis_Utils-%s' % myver, excludes )
# place in dist/
if not os.path.isdir('dist'):
os.mkdir('dist')
# shutil.move() doesn't exist in older Pythons
shutil.copy2(zipname,'dist')
unlink(zipname)
shutil.copy2(targz_name,'dist')
unlink(targz_name)
shutil.copy2(tarbz2_name,'dist')
unlink(tarbz2_name)
os.rmdir(tempdir)
sys.exit(0)
if 'install' in sys.argv:
#if not os.path.isdir('build'):
#print "** Please run build command first **"
#sys.exit(1)
# os.system('%s setup.py build' % sys.executable)
# make sure it's a clean build!
os.system('%s setup.py build' % sys.executable)
# fall through to do_setup()
if 'install_all' in sys.argv:
from disthelper.find_python import get_python_verlist
# rebuild command line, replacing 'install_all' with 'install'
args = ''
for arg in sys.argv:
if arg == 'install_all':
args = args + ' install '
else:
args = args + ' ' + arg + ' '
print("Searching for all installed Pythons ...")
pylist = get_python_verlist()
for exe,info in pylist:
print('%s %s' % (exe,args))
os.system('%s %s' % (exe,args))
sys.exit(0)
if do_setup() != 1:
print("** ERROR: setup() failed")
sys.exit(1)
if 'build' in sys.argv:
copy_extra_files()
if not is_DOM_okay():
print("")
print("** WARNING: xml.dom.minidom is not working.")
print("** Some portions of the package will not work.")
print("** If you are using Python 2.0, you should install PyXML:")
print("** http://pyxml.sourceforge.net/topics/download.html")
print("")
elif 'install' in sys.argv:
print("\n*** Gnosis_Utils - Installed OK ***\n")
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,047 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/util/__init__.py | # ancient WinZips can't handle 0-length files
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,048 | MEERQAT/Gnosis_Utils | refs/heads/master | /gnosis/xml/pickle/test/test_fail_raise_1.py | # used for sanity checking the test harness
# "fail" a test with an explicity raise, oldstyle
raise Exception("AAA")
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,049 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/treeops/__init__.py | # make some things available at the toplevel
from .treeops import TreeOps, TreeOptParser
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,050 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/misc.py |
#
# More handy functions I found myself reinventing every
# time I made a setup.py
#
# This should be compatible back to Python 1.5.2.
#
# frankm@hiwaay.net
#
import os, string
from stat import *
import shutil
# public interface
__all__ = ['mtime','find_in_path','find_exe_in_path','unlink','make_tempdir',
'samepath']
def mtime(file): # convenience
return os.stat(file)[ST_MTIME]
def __normpath_for_comp(path):
# normalize path for textual comparison
#
# this does an implicit os.path.normpath() as well
return os.path.normcase(os.path.abspath(path))
def samepath(path1,path2):
"Test for equality of path names. Neither path has to exist."
# device:inode comparison can only be done on posix & mac (OS X).
# however, using inodes requires that the file exists.
# I want this function to work on non-existant paths as well.
# (I may add a future 'samefile'/'samedir' to check for existing
# files/dirs)
#
# so, do the best I can without inodes ...
return __normpath_for_comp(path1) == __normpath_for_comp(path2)
def find_in_path( filename ):
"""Search PATH for the named file.
Returns full pathname if found, None if not."""
pathlist = string.split( os.environ['PATH'], os.pathsep )
for path in filter( os.path.isdir, pathlist ):
name = os.path.join( path,filename )
if os.path.isfile(name):
return name
return None
def find_exe_in_path( basename ):
"""Find executable in PATH (after adding extension to
basename, as appropriate).
Returns full pathname, or None if not found."""
if os.name == 'posix' and sys.platform[:6] == 'cygwin':
return find_in_path( basename+'.exe')
elif os.name == 'posix':
return find_in_path( basename )
elif os.name in ['nt','os2']:
name = find_in_path( basename+'.exe' )
if name is None:
name = find_in_path( basename )
return name
else:
return find_in_path( basename )
def unlink(filename):
"""An unlink() wrapper to work around win32 problems
in some Python versions. This is like 'rm -f' - the file
doesn't have to exist."""
if not os.path.isfile(filename):
return
try:
os.unlink(filename)
except:
os.remove(filename)
def rmtree(path):
"Like 'rm -rf path'"
if os.path.isdir(path):
shutil.rmtree(path)
try:
# use secure mkdtemp if available
from tempfile import mkdtemp
HAVE_MKDTEMP = 1
except:
from tempfile import mktemp
HAVE_MKDTEMP = 0
def make_tempdir():
"""Make a temporary directory, securely if possible.
Creates and returns the full pathname. Caller is
responsible for deleting dir when finished."""
if HAVE_MKDTEMP:
name = mkdtemp()
else:
name = mktemp()
os.mkdir(name)
return name
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,051 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/find_python.py | #---------------------------------------------------------------
# find_python.py
#
# Routines for enumerating the Pythons available on a
# machine, and finding a Python matching certain criteria.
#
# Can be used as a module or standalone script.
#
# frankm@hiwaay.net
#---------------------------------------------------------------
#
# This works under all Pythons since 1.5.2, so some of the
# syntax looks a bit outdated :-)
#
# Note that Python 2.0+ is recommended under win32 so that
# the registry can be accessed.
#
#---------------------------------------------------------------
# TODO: match os.name and/or sys.platform in matching routines
import sys
# make disthelper accessible
sys.path.insert(0,'.')
# now the real code begins ...
import os, re, string
from stat import *
from disthelper.misc import make_tempdir, unlink
# this can be used as a module or script. here are the
# public functions ...
__all__ = ['get_python_verlist','find_py_atleast','find_py_between',
'find_py_minor']
try:
# using _winreg requires Python 2.0+, however we can detect
# Python version back to (at least) 1.5.2 since they all
# follow the same registration scheme.
from winreg import OpenKey, HKEY_LOCAL_MACHINE, EnumKey, \
QueryInfoKey, QueryValueEx
HAVE_WIN32_REGISTRY = 1
except:
HAVE_WIN32_REGISTRY = 0
# to make version comparisons easy, I make the 'bold' assumption that
# no digit of the version # will be greater than 255, and calculate
# a linear value here.
def canon_ver( ver ):
return ver[0]*256*256 + ver[1]*256 + ver[2]
def get_pythons_from_registry():
"""Search the win32 registry for installed Pythons.
Returns a list of Python executables for all the Pythons
installed on the host machine."""
if HAVE_WIN32_REGISTRY == 0:
return []
# get the toplevel key
topkey = OpenKey(HKEY_LOCAL_MACHINE,"SOFTWARE\\Python\\PythonCore")
# under PythonCore will be subkeys like '2.0', '2.1', etc.
nr_vers = QueryInfoKey(topkey)[0]
namelist = []
# for each of those keys, get the InstallPath
for i in range(nr_vers):
verkey = OpenKey(topkey, "%s\\InstallPath" % EnumKey(topkey,i))
path,typ = QueryValueEx(verkey,None)
name = os.path.join(path,'python.exe')
if os.path.isfile(name):
namelist.append(name)
return namelist
def find_pythons_in_dir( dirname ):
"""
Find everything that looks like a Python executable
in the given directory. Returns a list containing
the fullpath to each executable.
"""
# I could be more strict here, and change the pattern
# based on OS, but that gets messy, and there is no way
# around the underlying flaw that a malicious file could
# sneak in here. But that's a generic flaw, and the reason
# why you aren't supposed to put '.' in your PATH on POSIXy
# systems.
patt = re.compile('^python[0-9\.]*(\.exe)?$',re.I)
found = []
for name in os.listdir(dirname):
if patt.match( name ):
found.append( os.path.join(dirname, name) )
return found
def find_all_pythons():
"""Search system for all Python executables. The returned
list may contain duplicates."""
allpys = []
# split PATH according to platform rules
pathlist = string.split( os.environ['PATH'], os.pathsep )
# search PATH, excluding nonexistant dirs
for path in filter( os.path.isdir, pathlist ):
allpys.extend( find_pythons_in_dir( path ) )
# check the win32 registry, as appropriate
allpys.extend( get_pythons_from_registry() )
# and of course I'm running under a Python, in case
# no others were found
allpys.append( os.path.abspath(sys.executable) )
return allpys
def get_pyver_from_exe( exename ):
"""
Given a python executable, find out its version.
Returns version as 3-item list:
(os.name, sys.platform, version)
Where version is a 3-element list, e.g. [2,1,3]
Returns None if can't get version.
"""
# hack: when running a win32-native Python from a cygwin shell,
# with cygwin Python installed, you'll see cygwin symlinks to the
# real cygwin Python, but NTVDM.exe will crash when trying to run
# the symlinks. Of course, to win32, they aren't links, so I can't
# just filter them with islink(). Instead, I check if the binary
# looks too small to be real.
if os.stat(exename)[ST_SIZE] < 1000:
return None
# this is required to work on Python 1.5.2
# note that splitting sys.version doesn't work on .0 releases, so
# I try sys.version_info first, but it isn't available on 1.5.2.
# Don't insert any lefthand spaces!
pycmd = """
import sys, string, os
try: v = sys.version_info[0],sys.version_info[1],sys.version_info[2]
except: v = map(int,string.split(string.split(sys.version)[0],'.'))
open('lineout','w').write('%s %s %s %s %s\\n' % (os.name,sys.platform,v[0],v[1],v[2]))
"""
# the most portable thing to do is write pycmd to a file, and
# have pycmd write its results to a file as well. so make
# a temp directory to run from.
savedir = os.getcwd()
tempdir = make_tempdir()
os.chdir(tempdir)
f = open('test.py','w')
f.write( pycmd )
del f # explicit close seems to be needed under win32 (i.e. open().write() fails)
os.system('%s test.py' % exename)
if not os.path.isfile('lineout'):
return None # failed to run
f = open('lineout','r')
line = f.readline()
del f # explicitly, for win32
unlink('lineout')
unlink('test.py')
os.chdir(savedir)
os.rmdir(tempdir)
p = line.split()
return (p[0], p[1], list(map( int, p[2:] )))
def get_python_verlist():
"""
Returns a list of all Pythons available on the host system.
The list is guaranteed to not contain duplicates (i.e. if
the host system has 'python' symlinked to 'python2.3', that
will be caught and only one will be entered in the list; however
there is no guarantee which particular one will be removed).
Returns list of tuples:
(exe_fullpath, info)
Where info is the tuple from get_pyver_from_exe()
"""
l = []
fv = []
for pyexe in find_all_pythons():
v = get_pyver_from_exe(pyexe)
if v != None and v not in fv: # watch for duplicates
l.append( (pyexe, v) )
fv.append(v)
return l
def find_py_atleast( minver ):
"""
Find a Python executable in the local machine PATH of
at least minver.
minver is a tuple giving the minumum version. i.e. to
search for 2.0.x, pass (2,0,0).
Note: There is no guarantee the returned version
will be the highest (or lowest) version that satisfies
the criteria.
"""
wantver = canon_ver(minver)
for pyexe, info in get_python_verlist():
thisver = canon_ver(info[2])
if thisver >= wantver:
return pyexe
# can't satisfy requirement
return None
def find_py_between( minver, maxver ):
"""
Find a Python executable in the local machine PATH of
at least minver, and at most maxver.
Note: There is no guarantee the returned version
will be the highest (or lowest) version that satisfies
the criteria.
"""
minver = canon_ver(minver)
maxver = canon_ver(maxver)
for pyexe, info in get_python_verlist():
thisver = canon_ver(info[2])
if thisver >= minver and thisver <= maxver:
return pyexe
# can't satisfy requirement
return None
def find_py_minor( ver ):
# I make the same assumption here on max#'s as in canon_ver
return find_py_between( [ver[0],ver[1],0],
[ver[0],ver[1],255] )
def usage():
print("Usage: find_python args")
print("")
print("Find a Python executable on the local machine satisfying")
print("user-specified criteria.")
print("")
print("args can be one of:")
print("")
print(" show")
print(" List all Pythons in a human-readable way.")
print("")
print(" atleast version")
print(" Find a Python of at least the given version number.")
print(" (version can be like '2', '2.1', '2.2.1')")
print("")
print(" between ver1 ver2")
print(" Find a Python at least ver1 and not higher than ver2")
print("")
print(" match-minor version")
print(" Find a Python matching the minor version")
print(" (i.e. '2.0' matches 2.0.0, 2.0.1, .. , but NOT 2.1+)")
print("")
sys.exit(1)
def parse_version_string( ver ):
# this will catch errors, like passing non-numeric strings
l = list(map(int, string.split(ver,'.')))
if len(l) > 3:
raise Exception("Version string too long")
# pad with 0's
return l + [0]*(3-len(l))
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
if sys.argv[1] == 'show':
pylist = find_all_pythons()
print("Here are all the Pythons I found:")
for name in pylist:
print("\t%s" % name)
print("Here is the minimum set, with version numbers:")
verlist = get_python_verlist()
for exe,ver in verlist:
print("\t%s = Python %s, %s, %d.%d.%d" % (exe,ver[0],ver[1],
ver[2][0],ver[2][1],ver[2][2]))
if sys.argv[1] == 'atleast':
if len(sys.argv) != 3:
usage()
print(find_py_atleast( parse_version_string(sys.argv[2]) ))
elif sys.argv[1] == 'between':
if len(sys.argv) != 4:
usage()
print(find_py_between( parse_version_string(sys.argv[2]),
parse_version_string(sys.argv[3]) ))
elif sys.argv[1] == 'match-minor':
if len(sys.argv) != 3:
usage()
print(find_py_minor( parse_version_string(sys.argv[2]) ))
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,052 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/attic/dll_to_mingw.py |
#
# In order to use mingw (or cygwin with 'gcc -mno-cygwin')
# to create native win32 extension modules, you must create
# import libraries for the Python DLLs that gcc can use.
#
# This script automates the procedure found at:
# http://sebsauvage.net/python/mingw.html
#
# [excluding the parts about modifying distutils]
#
#
# frankm@hiwaay.net
#
# This is compatible back to Python 1.5.2, hence the
# old syntax here and there.
#from disthelper.find_python import *
#import disthelper.find_python as find_python
#from disthelper.misc import *
import sys, string, os
from shutil import copy2
try:
# _winreg requires Python 2.0+, so make it optional
from winreg import OpenKey, HKEY_LOCAL_MACHINE, EnumKey, \
QueryInfoKey, QueryValueEx
HAVE_WIN32_REGISTRY = 1
except:
HAVE_WIN32_REGISTRY = 0
try:
# use win32all, if installed
import win32api
HAVE_WIN32_API = 1
except:
HAVE_WIN32_API = 0
def find_in_path( filename ):
"""Search PATH for the named file.
Returns full pathname if found, None if not."""
pathlist = string.split( os.environ['PATH'], os.pathsep )
for path in filter( os.path.isdir, pathlist ):
name = os.path.join( path,filename )
if os.path.isfile(name):
return name
return None
def find_exe_in_path( basename ):
"""Find executable in PATH (after adding extension to
basename, as appropriate).
Returns full pathname, or None if not found."""
if os.name == 'posix' and sys.platform[:6] == 'cygwin':
return find_in_path( basename+'.exe')
elif os.name == 'posix':
return find_in_path( basename )
elif os.name in ['nt','os2']:
return find_in_path( basename+'.exe' )
else:
return find_in_path( basename )
def make_mingw_lib_from_dll( destname, dllname ):
"""Take a win32 DLL and create a lib*.a file suitable
for linking with mingw.
dllname is the full pathname of the .DLL to convert.
destname is the name for the converted mingw library.
This is an automation of the procedure found at:
http://sebsauvage.net/python/mingw.html"""
# since I chdir() below ...
destname = os.path.abspath(destname)
# make sure necessary progs are available
dlltool = find_dlltool_or_bail()
pexports = find_pexports_or_bail()
print("Converting %s -> %s" % (dllname,destname))
savedir = os.getcwd()
# do the work in tempdir and copy resulting .a to correct place
tempdir = make_tempdir()
os.chdir(tempdir)
copy2( dllname, tempdir )
# create .def file
cmd = "%s %s > temp.def" % \
(pexports, os.path.basename(dllname))
print("CMD: ",cmd)
os.system(cmd)
# create .a
cmd = "%s --dllname %s --def temp.def --output-lib %s" % \
(dlltool, os.path.basename(dllname), destname)
print("CMD: ",cmd)
os.system(cmd)
# remove temporary files & tempdir
unlink('temp.def')
unlink(os.path.basename(dllname))
os.chdir(savedir)
os.rmdir(tempdir)
def find_dlltool_or_bail():
"Find dlltool.exe, or bail out."
name = find_exe_in_path('dlltool')
if name is None:
print("***")
print("*** ERROR - dlltool.exe not found in PATH.")
print("***")
print("*** Make sure you have installed gcc from either")
print("*** cygwin or mingw.")
print("***")
sys.exit(1)
else:
return name
def find_pexports_or_bail():
"Find pexports.exe or bail out."
name = find_exe_in_path('pexports')
if name is None:
print("***")
print("*** ERROR - pexports.exe not found in PATH.")
print("*** Please download it from:")
print("*** http://starship.python.net/crew/kernr/mingw32/pexports-0.42h.zip")
print("***")
print("*** And place 'pexports.exe' in your PATH.")
print("***")
sys.exit(1)
else:
return name
try:
# use secure mkdtemp if available
from tempfile import mkdtemp
HAVE_MKDTEMP = 1
except:
from tempfile import mktemp
HAVE_MKDTEMP = 0
def make_tempdir():
"""Make a temporary directory, securely if possible.
Creates and returns the full pathname. Caller is
responsible for deleting dir when finished."""
if HAVE_MKDTEMP:
name = mkdtemp()
else:
name = mktemp()
os.mkdir(name)
return name
def unlink(filename):
"""An unlink() wrapper to work around win32 problems
in some Python versions."""
try:
os.unlink(filename)
except:
os.remove(filename)
if os.name != 'nt':
print("*** ERROR, not running under win32.")
print("*** Make sure you're running with a win32 native Python,")
print("*** not a cygwin version.")
sys.exit(1)
#print find_pexports_or_bail()
#print find_dlltool_or_bail()
if len(sys.argv) < 3:
print("Usage: dll_to_mingw.py DLL_NAME OUT_NAME")
print("Where:")
print(" DLL_NAME = .DLL file to convert")
print(" OUT_NAME = Name for lib to create (typically libNNN.a)")
sys.exit(1)
make_mingw_lib_from_dll(sys.argv[2],sys.argv[1])
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,053 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/untabtree.py |
#
# Untabify an entire tree, with filename matching.
#
# frankm@hiwaay.net
#
# make sure I can import disthelper
import sys
sys.path.insert(0,'.')
from . import grab_disthelper
# now the real code begins ...
from disthelper.treeops.treeops import *
from disthelper.treeops.fileops import *
from disthelper.treeops.lineops import untabify_line, copy_line
from .indentcheck import *
# define the command-line arg parser
class argv_parser(TreeOptParser):
def __init__(self):
TreeOptParser.__init__(self,'untabtree.py',
'Untabify a directory tree, with filename matching.')
# add my specific options
self.add_intopt( 'w', 'tabwidth', 'tabwidth',
"Set the tab width (REQUIRED)" )
p = argv_parser()
opts,args = p.parse_argv(sys.argv)
if opts.tabwidth is None:
print("** ERROR: You must specify a tab-width with -w")
p.show_usage()
sys.exit(1)
if len(args) == 0:
if len(opts.regexlist) > 0:
# user gave a glob but no targets - add cwd
args.append('.')
else:
# don't know what user wants
p.show_usage()
sys.exit(1)
class UntabifyFileTransform(FileTransformFromLineOp):
def __init__(self,tabwidth):
FileTransformFromLineOp.__init__(self, self.my_untabify)
self.tabwidth = tabwidth
def my_untabify(self,line):
return untabify_line(line, self.tabwidth)
def process(self, file_out, file_in):
if has_tab_space_mixing( file_in ):
# too dangerous to try untabifying with mixed line beginnings
print("ERROR: Skipping file '%s' - has mixed tabs & spaces" % file_in.name)
self.set_lineop( copy_line )
else:
self.set_lineop( self.my_untabify )
FileTransformFromLineOp.process(self, file_out, file_in)
# make a file operation
fileop = UntabifyFileTransform(opts.tabwidth)
# ... into a tree operation
treeop = TreeOpFromFileTransform( fileop )
# ... and run the tree
treeop.runtree(opts, args)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,054 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/treeops/fileops.py |
#
# A FileTransform takes an input filename, does some
# processing on it, and writes the processed file
# to the original filename.
#
# frankm@hiwaay.net
#
from disthelper.misc import make_tempdir, unlink
from disthelper.treeops.treeops import *
import os
from shutil import copy2
class FileTransform:
def __init__(self):
pass
def run(self, filename):
"""Run processing on a given file, with results
being written back to same filename."""
# make a tempdir so I can use a file-like object
# instead of an OS-level handle (like with mkstemp)
tdir = make_tempdir()
tname = os.path.join(tdir,'process.out')
#print "TEMPDIR = ",tdir
#print "TEMPFILE = ",tname
f_out = open(tname,'wb')
f_in = open(filename,'rb')
# process in->out
self.process(f_out, f_in)
del f_out
del f_in
# copy tempfile -> filename
#print "COPY %s -> %s" % (tname,filename)
# I think this is secure ... since caller owns filename
# there isn't a race, right? (unlike writing into a tempdir
# which could have malicious symlinks in it)
copy2( tname, filename )
#print "RMDIR %s" % tname
# clean up tempdir
unlink(tname)
os.rmdir(tdir)
# -*- internal API -*-
def process(self,fileobj_out,fileobj_in):
"""Subclasses override this to do their processing.
Inputs:
fileobj_out - a file-like object to write to.
fileobj_in - a file-like object to read from."""
pass
class TreeOpFromFileTransform(TreeOps):
"""Turn a FileTransform into a TreeOps"""
def __init__(self, filetransform):
"filetransform is the transform to use"
TreeOps.__init__(self)
self.filetransform = filetransform
# - internal API - called by TreeOps -
def process_one_file(self,fullname,opts):
"Called for each matched file."
if opts.verbose:
print(fullname)
#print "RUN FILE XFORM %s ON %s" % (fullname,str(self.filetransform))
self.filetransform.run(fullname)
def process_one_dir(self,fullname):
"Called for each directory along the way."
pass
def dir_noaccess(self,fullname):
"""Called when access is denied to a directory
(strictly informational, there is no provision to
retry the operation)."""
pass
class FileTransformFromLineOp(FileTransform):
"""Turn a LineOp into a FileTransform"""
def __init__(self, lineop):
self.set_lineop( lineop )
# - internal API -
def set_lineop(self, op):
self.lineop = op
def process(self,fileobj_out,fileobj_in):
# do in awkward-but-backward-compatible way ..
line = fileobj_in.readline()
while len(line) != 0:
#print "LINE ",repr(line)
buf = line
buf = self.lineop(line)
fileobj_out.write(buf)
line = fileobj_in.readline()
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,055 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/indentcheck.py | #
# Detect indentation style of Python code
#
# frankm@hiwaay.net
#
__all__ = ['guess_indentation','has_tab_space_mixing']
# make sure I can import disthelper
import sys
sys.path.insert(0,'.')
from . import grab_disthelper
from tokenize import tokenize, INDENT, ERRORTOKEN
class IndentDetector:
"""
Detect the indentation style of a Python source file.
Sometimes works on other file types, but no guarantee.
This class hides the ugly details of using the Python 1.5.2-compatible
tokenize() function.
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.found_indent = 0
def readline(self):
# at first, I would return EOF here after finding the INDENT.
# however, that leads to an exception if you EOF in the middle
# of a multi-line statement, so skip the optimization.
return self.fileobj.readline()
def token_eater(self, ttype, tokstr, start, end, line):
if ttype == INDENT:
if self.found_indent:
return # only count the first one!
self.found_indent = 1
if line[0] == '\t':
# file uses tabs
self.uses_tabs = 1
self.tabwidth = 0
else:
# file uses spaces
self.uses_tabs = 0
self.tabwidth = end[1] - start[1]
elif ttype == ERRORTOKEN:
self.uses_tabs = -1
self.tabwidth = -1
class MixedIndentDetector:
"""
Detect if a Python file has mixed tabs & spaces for indentation.
Sometimes works on other file types, but no guarantee.
This class hides the ugly details of using the Python 1.5.2-compatible
tokenize() function.
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.found_tab = -1
self.errlist = []
def readline(self):
return self.fileobj.readline()
def token_eater(self, ttype, tokstr, start, end, line):
if ttype == INDENT:
if line[0] == '\t':
if self.found_tab == 0:
# already found indent w/space - error
self.errlist.append('<TAB> ' + line)
else:
self.found_tab = 1
elif line[0] == '\x20':
if self.found_tab == 1:
# already found indent w/tab - error
self.errlist.append('<SPACE> ' + line)
else:
self.found_tab = 0
elif ttype == ERRORTOKEN:
self.errlist.append('<COMPILATION ERROR> ' + line)
def get_tab_space_mixing( fileobj ):
# need to restore ofs when finished
ofs = fileobj.tell()
o = MixedIndentDetector(fileobj)
tokenize( o.readline, o.token_eater )
# restore position
fileobj.seek(ofs)
return o.errlist
def has_tab_space_mixing( fileobj ):
"""
Check if fileobj has mixed tabs & spaces.
If so, you should NOT run guess_indentation, or do any sort
of tab/untab on the file as you'd likely screw it up.
"""
return (len( get_tab_space_mixing(fileobj) ) > 0)
def guess_indentation( fileobj ):
"""
Given a file-like object, guess its indentation style.
This is intended for use on Python source code, but
works (somewhat) on random text files as well.
Returns:
(uses_tabs, tabwidth)
If uses_tabs == 1, the file uses tab characters for
indentation, and tabwidth isn't used.
If uses_tabs == 0, the file uses spaces for indentation
and tabwidth is the number of spaces per indent.
The file offset of fileobj is preserved. It is recommended
that you pass fileobj with its offset set to zero, but
it is not required.
If the file contains no indentations, the return value is (-1,-1).
"""
# need to restore ofs when finished
ofs = fileobj.tell()
o = IndentDetector(fileobj)
tokenize( o.readline, o.token_eater )
# restore position
fileobj.seek(ofs)
if not hasattr(o,'uses_tabs'):
# file had no indentations, so it doesn't matter
# what values I use
return (0,4)
else:
return (o.uses_tabs, o.tabwidth)
#
# Can also run a standalone script to show problems over
# a tree.
#
# now the real code begins ...
from disthelper.treeops.treeops import *
import os
from stat import *
class TreeChecker(TreeOps):
"""Example of a TreeOp that checks indentation."""
def __init__(self):
TreeOps.__init__(self)
def run(self, argv):
# parse standard tree options (-r, -R, -x, etc.)
p = TreeOptParser('indentcheck.py','Check indentation of a directory tree.')
opts,args = p.parse_argv(argv)
if len(args) == 0:
print("** Must give a directory and/or file to check.")
p.show_usage()
sys.exit(1)
# remember which files/dirs we couldn't access
self.nofile = []
self.nodir = []
# walk the tree with globbing, etc.
self.runtree(opts,args)
# tell user which files/dirs I couldn't access
if len(self.nofile):
print("I could not access these files:")
for f in self.nofile:
print(" %s" % f)
if len(self.nodir):
print("I could not access these directories:")
for d in self.nodir:
print(" %s" % d)
# - internal API - called as the tree is walked -
def process_one_file(self,fullname,opts):
fileobj = open(fullname,'r')
errs = get_tab_space_mixing(fileobj)
if len(errs):
s = "ERROR: '%s' has mixed tab/space line beginnings." % fullname
if not opts.verbose:
s += ' (use -v for details)'
print(s)
if opts.verbose and len(errs):
print("Bad lines are shown below:")
for line in errs:
print(' '+line)
def process_one_dir(self,fullname):
pass
def dir_noaccess(self,fullname):
self.nodir.append(fullname)
if __name__ == '__main__':
t = TreeChecker()
# special case, if user gave me no switches (except
# possibly -v), but includes dirnames, add the standard
# options that make sense (-r -x py) for convenience.
alldirs = 1
for arg in sys.argv[1:]:
if arg != '-v' and not os.path.isdir(arg):
alldirs = 0
if len(sys.argv) > 1 and alldirs:
t.run(['dummy','-r','-x','py'] + sys.argv[1:])
else:
t.run(sys.argv)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,056 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/treeops/lineops.py |
#
# a lineop takes a single line of text and performs
# a transformation on it, returning the new text.
#
# frankm@hiwaay.net
#
# public interface
__all__ = ['to_platform_text','to_portable_text',
'tabify_line', 'untabify_line', 'copy_line']
import os, re
# a null transform for convenience
def copy_line(line):
return line
def strip_line_ending( line ):
"""strip off all \r,\n (yes, really do all combinations -
this cleans up lines in the case of editing a text file
on the 'wrong' platform and ending up with weird endings.
Also check the front since 'foreign' line endings can be
split strangely.)"""
# surely there's a better way?
while len(line) and line[-1] in '\n\r':
line = line[:-1]
while len(line) and line[0] in '\n\r':
line = line[1:]
return line
#
# I define '\n' endings as 'portable' text since you can
# take a file in '\n' format and do this:
#
# open( out, 'w' ).write( open( in, 'r' ) )
#
# ... and end up with the correct line endings on all
# platforms. The same is NOT true if you take a DOS file
# and do the above on Unix - it'll have wrong line endings.
# Therefore, '\n' is the defacto 'portable' format.
#
def to_platform_text( line ):
"""Convert line to platform-specific format, stripping
any existing line ending characters."""
# in case it's zero-length, don't add chars
if not len(line):
return ''
return strip_line_ending(line) + os.linesep
def to_portable_text( line ):
"""Convert line to portable format, stripping
any existing line ending characters."""
# in case it's zero-length, don't add chars
if not len(line):
return ''
return strip_line_ending(line) + '\n'
#-----------------------------------------------------
# start of code adapted from IDLE
#-----------------------------------------------------
# modified from tabify/untabify_region_event to
# just do a single line
def tabify_line( line, TABWIDTH=4 ):
"Tabify a line (replace spaces with tabs)"
#print "LINE ",line
raw, effective = classifyws(line, TABWIDTH)
#print "RAW,EFF",raw,effective
ntabs, nspaces = divmod(effective, TABWIDTH)
#print "NRTAB,NRSPACE",ntabs,nspaces
return '\t' * ntabs + ' ' * nspaces + line[raw:]
def untabify_line( line, TABWIDTH=4 ):
"Untabify a line (replace tabs with spaces)"
#return line.expandtabs(TABWIDTH)
# note - I only want to replace leading tabs.
# line.expandtabs() replaces embedded tabs also.
i = 0
out = ''
while line[i] == '\t':
out += '\x20' * TABWIDTH
i += 1
out += line[i:]
return out
# Look at the leading whitespace in s.
# Return pair (# of leading ws characters,
# effective # of leading blanks after expanding
# tabs to width tabwidth)
def classifyws(s, tabwidth):
raw = effective = 0
for ch in s:
if ch == ' ':
raw = raw + 1
effective = effective + 1
elif ch == '\t':
raw = raw + 1
effective = (effective // tabwidth + 1) * tabwidth
else:
break
return raw, effective
#-----------------------------------------------------
# end of code adapted from IDLE
#-----------------------------------------------------
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,057 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/attic/frankcc.py | #
# Obsolete
#
#
# frankm@hiwaay.net
#
from distutils.unixccompiler import UnixCCompiler
class FrankCompiler(UnixCCompiler):
compiler_type = 'frankcc'
def preprocess(self, source,
output_file=None, macros=None, include_dirs=None,
extra_preargs=None, extra_postargs=None):
print("PREPROCESS")
UnixCCompiler.preprocess(self,source,output_file,macros,
include_dirs,extra_preargs,exta_postargs)
def _compile(self,obj,src,ext,cc_args,extra_postargs,pp_opts):
print("_compile")
UnixCCompiler._compiler(self,obj,src,ext,cc_args,extra_postargs,pp_opts)
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
print("LINK")
UnixCCompiler.link(self, target_desc, objects,
output_filename, output_dir, libraries,
library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs,
extra_postargs, build_temp, target_lang)
def library_dir_option(self,dir):
return UnixCCompiler.library_dir_option(self,dir)
def find_library_file(self,dirs,lib,debug=0):
return UnixCCompiler.find_library_file(self,dirs,lib,debug)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,058 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/comptree.py |
# Sample app showing how to use TreeOps
#
# This byte-compiles all matched files.
#
# frankm@hiwaay.net
#
# make sure I can import disthelper
import sys
sys.path.insert(0,'.')
from . import grab_disthelper
# now the real code begins ...
from disthelper.treeops.treeops import *
import os
from stat import *
from time import strftime, localtime
import compiler
class TreeCompiler(TreeOps):
"""Example of a TreeOp that byte-compiles matched files."""
def __init__(self):
TreeOps.__init__(self)
def run(self, argv):
# parse standard tree options (-r, -R, -x, etc.)
p = TreeOptParser('comptree.py','Byte-compile a tree of files.')
opts,args = p.parse_argv(argv)
if len(args) == 0:
print("** Must give a directory and/or file to byte-compile.")
p.show_usage()
sys.exit(1)
# remember which files/dirs we couldn't access
self.nofile = []
self.nodir = []
# walk the tree with globbing, etc.
self.runtree(opts,args)
# tell user which files/dirs I couldn't access
if len(self.nofile):
print("I could not access these files:")
for f in self.nofile:
print(" %s" % f)
if len(self.nodir):
print("I could not access these directories:")
for d in self.nodir:
print(" %s" % d)
# - internal API - called as the tree is walked -
def process_one_file(self,fullname,opts):
print("Byte-compiling %s ..." % fullname)
compiler.compileFile(fullname)
def process_one_dir(self,fullname):
pass
def dir_noaccess(self,fullname):
self.nodir.append(fullname)
t = TreeCompiler()
# special case, if user gave me no switches, but
# includes dirnames, add the standard options that
# make sense (-r -x py) for convenience.
alldirs = 1
for arg in sys.argv[1:]:
if not os.path.isdir(arg):
alldirs = 0
if len(sys.argv) > 1 and alldirs:
t.run(['dummy','-r','-x','py'] + sys.argv[1:])
else:
t.run(sys.argv)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,059 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/treeops/treeops.py |
#
# TreeOps encompasses common code for working with
# directory trees - walking the tree, matching the
# filenames vs. a set of regexes, etc.
#
# To do something useful, derive a class from TreeOps and
# override the private API (process_one_file, process_one_dir, etc.)
#
# Note this module should be compatible to Python 1.5.2
#
# frankm@hiwaay.net
#
import os, sys, re
from disthelper.util.cmdline import *
from types import *
from glob import glob
# public API
__all__ = ['TreeOps','TreeOptParser']
class TreeOps:
def __init__(self):
self.__recurse_level = 0
pass
def runtree(self,opts,things):
"""Run using opts and a list of things to process
(typically these are the (opts,args) returned from
TreeOptParser.parse_argv)."""
self.__recurse_level = self.__recurse_level + 1
if type(things) not in [list,tuple]:
self.on_end_processing() # let caller cleanup before bailing
raise Exception("'things' must be a sequence")
# this could be more compact, but have to maintain 1.5.2 compat
descend = []
for thing in things:
matched = 0
for r in opts.exclude_list:
if r.match(thing):
matched = 1
break
if matched:
#print "EXCLUDE ",thing
continue # skip name in exclusion list
if os.path.islink(thing):
continue
if os.path.isfile(thing):
# match on filename
matched = 0
for r in opts.regexlist:
if r.match(thing):
matched = 1
break
if matched or len(opts.regexlist) == 0:
# this is still a relative path, i.e. relative
# to 'thing', so it is correct to pass it here
self.process_one_file(thing,opts)
elif os.path.isdir(thing):
self.process_one_dir(thing)
try:
names = os.listdir(thing)
except:
self.dir_noaccess(thing)
continue
for name in names:
full = os.path.join(thing,name)
# always descend dir, no matching
if os.path.isdir( full ) and opts.recursive:
# finish processing all the files in this
# directory before descending subdirectories.
# this keeps things nice and simple for subclasses,
# knowing that we'll finish a directory completely
# before switching to another one
descend.append( full )
elif os.path.isfile( full ):
# this is still a relative path, i.e. relative
# to 'thing', so it is correct to pass it here
# (to be checked in block above)
self.runtree(opts, [full])
# process subdirectories found
if len(descend):
self.runtree( opts, descend )
self.__recurse_level = self.__recurse_level - 1
if self.__recurse_level == 0:
self.on_end_processing()
# - internal API - subclasses do their work here -
# General note: Why relative paths here?
# Think e.g. 'zip -r' -- you need to know the relative
# path that the user intended, since you don't want to
# store the entire path from / (or c:).
def process_one_file(self, name, opts):
"""
Called for each matched file (name is relative path).
'opts' are the same as were passed to process().
"""
pass
def process_one_dir(self, name):
"Called for each directory (name is relative path)."
pass
def dir_noaccess(self, name):
"""Called when access is denied to a directory (name is relative path).
Strictly informational, there is no provision to
retry the operation."""
pass
def on_end_processing(self):
"""Called after all processing has completed, so
subclasses can do any necessary cleanup."""
pass
class TreeOptParser(BasicOptParser):
"""A specialization of BasicOptParser, which adds options
common to programs that need to recurse a tree, selecting
files by certain options."""
def __init__(self,name,info):
"""
name: Program name (for help text)
info: One-liner description (for help text)
"""
BasicOptParser.__init__(self,name,info)
# options common to all TreeOpts
self.add_boolopt( 'h', 'help', 'help', 'Show this help screen' )
self.add_boolopt( 'r', 'recursive', 'recursive',
'Recurse subdirectories')
self.add_listopt( 'R', 'recursive-glob', 'globlist',
'Like -r, but match filenames to pattern(s).\n\t(Seperate multiple patterns with commas.)')
self.add_listopt( 'x', 'extension', 'extlist',
'Give a list of file extensions to match.\n\t(Separate multiple extensions with commas.)')
self.add_boolopt( 'i', 'ignore-case', 'nocase',
'Ignore case when matching filenames.'),
self.add_boolopt( 'v', 'verbose', 'verbose',
'Be verbose while running')
self.add_listopt( '', 'exclude', 'exclude',
'Regular expression list of names to exclude')
def parse_argv(self, argv, glob_args=1):
"""Parse command line args (typically you'll pass sys.argv,
though any list of strings will do). Note that if you pass
a list of strings, the first one must be the program name.
Returns (opts,args), just like BasicOptParser.process(),
with these specializations:
1. extlist & globlist are converted to regexes and
stored in attr 'regexlist'.
2. If -R given, -r is turned on as well.
3. If glob_args == 1, args will be glob-expanded before
returning.
4. if --exclude given, turns patterns into regex list
and stored in attr 'exclude_list'
"""
opts,args = self.process(argv[1:])
if opts.help:
self.show_usage()
sys.exit(0)
regexlist = []
# First, make a regex out of each extension match
for ext in opts.extlist:
regexlist.append( r'^.+\.%s$' % ext )
# Now add any glob pattern the user specified with -R
for glob_arg in opts.globlist:
# turn the shell-style glob into a regex
g = glob_arg.replace('.',r'\.').replace('*','.*')
g = '^' + g + '$'
regexlist.append(g)
# compile them all
if opts.nocase:
regexlist = [re.compile(x,re.I) for x in regexlist]
else:
regexlist = [re.compile(x) for x in regexlist]
# save into opts
setattr(opts, 'regexlist', regexlist)
# -R implies -r (simplifies checks later)
if len(opts.globlist):
setattr(opts, 'recursive', 1)
# turn --exclude into regexes
exclude_list = []
for excl_arg in opts.exclude:
# turn the shell-style glob into a regex
g = excl_arg.replace('.',r'\.').replace('*','.*')
g = '^' + g + '$'
exclude_list.append(g)
# compile them all
if opts.nocase:
exclude_list = [re.compile(x,re.I) for x in exclude_list]
else:
exclude_list = [re.compile(x) for x in exclude_list]
# save into opts
setattr(opts, 'exclude_list', exclude_list)
# hm, initially I was appending '.' if args was empty,
# but that needs to be a program-specific decision,
# since it's not always desired
if glob_args:
newargs = []
for arg in args:
newargs.extend( glob(arg) )
args = newargs
return opts,args
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,060 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/attic/iall.py |
#
# shortcut script to install Gnosis for all Python versions
# installed on the local machine
#
# Two ways to run:
#
# 1. If sitting in toplevel dir (above gnosis/), will build
# and install Gnosis from gnosis/ for all python versions,
# first removing any existing Gnosis installation.
#
# 2. If sitting in dist/, unpack the built sdist and install
# from it for all Python versions, first removing any
# existing Gnosis installation.
#
# Requires: tar, rm
#
import os, sys, re
from glob import glob
# so I don't have to do this everywhere below ..
if os.name not in ['posix','os2','nt']:
# need to know pathsep in PATH, .exe extension (see below)
print("***")
print("*** SORRY - don't know how to run on this platform.")
print("***")
sys.exit(1)
def run(cmd):
print("%s" % cmd)
if os.system(cmd) != 0:
print("ERROR")
sys.exit(1)
def exe_in_path( name ):
if os.name == 'posix':
pathlist = os.getenv('PATH').split(':')
else:
pathlist = os.getenv('PATH').split(';')
for path in pathlist:
full = os.path.join(path,name)
if os.path.isfile( full ):
return 1
return 0
def enum_pythons():
# check for versioned binaries first
vers = ['2.0','2.1','2.2','2.3']
if os.name == 'posix':
patt = ['python%s'] * len(vers)
else:
patt = ['python%s.exe'] * len(vers)
allpyvers = list(map(lambda x,y: x % y, patt, vers))
existpy = [x for x in allpyvers if exe_in_path(x)]
if len(existpy) == 0:
# on a system w/out versioned names, use default
if exe_in_path(patt[0] % ''):
existpy = [patt[0] % '']
return existpy
for py in enum_pythons():
if os.name == 'posix':
# remove previous version (only on posix, where it's
# in a standard location)
run('rm -rf /usr/lib/%s/site-packages/gnosis' % py)
l = glob('Gnosis_Utils-*.tar.gz')
if len(l) == 2:
print("** Hey, delete the -master first, if you're sitting in dist/")
sys.exit(1)
if len(l) == 1:
# I'm sitting in dist/ - unpack the sdist & install
m = re.match('(Gnosis_Utils-[0-9\.]+)\.tar\.gz',l[0])
if not m:
raise Exception("Yikes - what happened?")
run('tar zxvf Gnosis_Utils*.tar.gz')
os.chdir(m.group(1))
run('%s setup.py build' % py)
run('%s setup.py install' % py)
os.chdir('..')
run('rm -rf %s' % m.group(1))
else:
# I'm sitting above gnosis/ - build & install directly
run('rm -rf build')
run('%s setup.py build' % py)
run('%s setup.py install' % py)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,061 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/test/mega_tabber_test.py |
#
# run an untab/tab/untab cycle over a copy of the entire
# gnosis & disthelper trees and diff the results
#
import os,shutil,sys
from stat import *
if not os.path.isfile('setup.py'):
raise Exception("You must run this from the toplevel")
if os.path.isdir('ttt'):
raise Exception("ttt already exists - bailing out!")
os.mkdir('ttt')
os.chdir('ttt')
shutil.copytree('../gnosis','gnosis')
shutil.copytree('../disthelper','disthelper')
# untabbing is the most foolproof, so use untabbed sources as the reference
print("Untab first copy ...")
os.system('%s disthelper/scripts/untabtree.py -w 4 -r -x py disthelper gnosis' % sys.executable)
os.rename('gnosis','UT-gnosis')
os.rename('disthelper','UT-disthelper')
shutil.copytree('../gnosis','gnosis')
shutil.copytree('../disthelper','disthelper')
# untab copy
print("Untab second copy ...")
os.system('%s disthelper/scripts/untabtree.py -w 4 -r -x py disthelper gnosis' % sys.executable)
# then tab
print("Tab second copy ...")
os.system('%s disthelper/scripts/tabtree.py -r -x py disthelper gnosis' % sys.executable)
# and untab again, hopefully to original condition :-)
print("Untab second copy ...")
os.system('%s disthelper/scripts/untabtree.py -w 4 -r -x py disthelper gnosis' % sys.executable)
print("Diff first and second copies ...")
#os.system('diff --exclude="*.pyc" -u -r UT-disthelper disthelper > diff.disthelper')
#os.system('diff --exclude="*pyc" -u -r UT-gnosis gnosis > diff.gnosis')
os.system('%s disthelper/scripts/difftree.py --exclude="*.pyc" -r UT-disthelper disthelper > diff.disthelper' % sys.executable)
os.system('%s disthelper/scripts/difftree.py --exclude="*pyc" -r UT-gnosis gnosis > diff.gnosis' % sys.executable)
err = 0
if os.stat('diff.disthelper')[ST_SIZE] != 0:
print("******* WARNING: ttt/diff.disthelper not 0 bytes,")
err = 1
if os.stat('diff.gnosis')[ST_SIZE] != 0:
print("******* WARNING: ttt/diff.disthelper not 0 bytes,")
err = 1
if err == 0:
print("** SUCCESS!! Both diffs were zero bytes! **\n")
print("** You should `rm -rf ttt` after you inspect the results.")
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,062 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/attic/example-setup.py | #
# Obsolete
#
#
# frankm@hiwaay.net
#
from distutils.core import setup, Extension
import os, sys, re
from stat import *
from glob import glob
import distutils.ccompiler
from disthelper.misc import *
PYVER = "%d.%d" % (sys.version_info[0],sys.version_info[1])
def find_swig():
# first check for SWIG tagged with the Python version#
# (I'm probably the only one who sets their system up like this :-)
swig = find_exe_in_path('swig%s' % PYVER)
if swig is not None:
return swig
# else, find regular swig in path
return find_exe_in_path('swig')
SWIG_PROG = find_swig()
swig_c_ext = "c"
swig_cpp_ext = "cxx"
distutils.ccompiler.compiler_class['cyg4win32'] = \
('cyg4win32',
'Cygwin4Win32Compiler',
'Customized "cygwin -mno-cygwin"')
def run(cmd,ignore_err=0):
print("Command: ", cmd, ", cwd: ",os.getcwd())
if os.system(cmd) != 0 and not ignore_err:
print("ERROR")
sys.exit(1)
def mtime(file):
return os.stat(file)[ST_MTIME]
def fix_wrapper(file,modname):
if os.name == 'posix': # fix only required on win32
return
print("Fixing %s ..." % file)
fout = open(file,'at')
# for some reason, SWIG is defining "init_modname", but python
# is looking for "initmodname" ... weird. so, add trampoline
# entry point
fout.write('#ifdef __cplusplus\n')
fout.write('extern "C"\n')
fout.write('#endif\n')
fout.write("SWIGEXPORT(void)init%s(void){SWIG_init();}\n"%modname)
def gen_swig(basename,swig_prog,wrap_ext,swig_opts):
hfile = '%s.h' % basename
ifile = '%s.i' % basename
pyfile = '%s.py' % basename
wrapfile = '%s_wrap.%s' % (basename, wrap_ext)
# if mod .h/.i newer than .py/_wrap, regenerate
if not os.path.isfile(pyfile) or \
not os.path.isfile(wrapfile) or \
mtime(hfile) > mtime(pyfile) or \
mtime(ifile) > mtime(pyfile) or \
mtime(hfile) > mtime(wrapfile) or \
mtime(ifile) > mtime(wrapfile):
print("Creating %s & %s" % (pyfile,wrapfile))
run('%s %s %s' % (swig_prog,swig_opts,ifile))
fix_wrapper(wrapfile,basename)
def gen_c_swigs(modlist):
for mod in modlist:
gen_swig(mod, SWIG_PROG, swig_c_ext, '-python')
def gen_cpp_swigs(modlist,extra_swig_args=''):
for mod in modlist:
gen_swig(mod, SWIG_PROG, swig_cpp_ext, '-c++ -shadow -python')
class SWIG_Extension(Extension):
def __init__(self,name,sources,libs=[]):
Extension.__init__(self,name=name,sources=sources,
libraries=libs)
class C_SWIG(SWIG_Extension):
"""A C extension module using SWIG.
Expects three files in dir:
name.c = Module source
name.h = Module header
name.i = SWIG interface for module.
extra_sources is a list of filenames to include in
the compilation."""
def __init__(self,name,extra_sources=[]):
SWIG_Extension.__init__(self,name=name,
sources=['%s.c' % name,
'%s_wrap.%s' % \
(name,swig_c_ext)] + extra_sources)
class CPP_SWIG(SWIG_Extension):
"""A C++ extension module using SWIG.
Expects three files in dir:
name.cpp = Module source
name.h = Module header
name.i = SWIG interface for module.
extra_sources is a list of filenames to include in
the compilation."""
def __init__(self,name,extra_sources=[]):
libs = []
if os.name == 'posix':
libs.append('stdc++')
SWIG_Extension.__init__(self,name=name,
sources=['%s.cpp' % name,
'%s_wrap.%s' % \
(name,swig_cpp_ext)] + extra_sources,
libs=libs)
C_MODS = ['test_c']
CPP_MODS = ['test_cpp']
ext_list = []
ext_list.append(C_SWIG('test_c'))
ext_list.append(CPP_SWIG('test_cpp'))
def do_setup():
#try:
setup ( name = 'test',
version = '0.1',
description = "testing extension modules",
author = "Frank McIngvale",
author_email= "frankm@hiwaay.net",
url = 'localhost',
ext_modules = ext_list,
license = "GPL",
)
# return 1
#except:
# return 0
def copy_libs_to_cwd(modlist):
# g_patt is where setup placed the built extensions
if os.name == 'posix':
g_patt = 'build/lib.linux*'
else:
g_patt = 'build/lib.win32*'
g = glob(g_patt)
if len(g) != 1:
print("Can't find libdir!")
sys.exit(1)
for mod in modlist:
# under Linux/posix, rename mod.so to _mod.so.
# under win32, rename mod.pyd to _mod.pyd
if os.name == 'posix':
cmd = "cp %s/%s.so _%s.so" %(g[0],mod,mod)
else:
cmd = "copy %s\\%s.pyd _%s.pyd" %(g[0],mod,mod)
print(cmd)
run(cmd)
if 'build' in sys.argv:
gen_c_swigs(C_MODS)
gen_cpp_swigs(CPP_MODS)
#do_setup()
#copy_libs_to_cwd(C_MODS+CPP_MODS)
elif 'info' in sys.argv:
print("Installed programs:")
print(" SWIG = %s" % SWIG_PROG)
print(distutils.ccompiler.new_compiler(compiler='mingw32'))
elif 'clean' in sys.argv:
os.system('rm -rf build *~ *.so *.pyd *_wrap.%s *_wrap.%s' % (swig_c_ext,swig_cpp_ext))
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,063 | MEERQAT/Gnosis_Utils | refs/heads/master | /disthelper/scripts/tabtree.py |
#
# Tabify an entire directory tree, with filename matching.
#
# frankm@hiwaay.net
#
# make sure I can import disthelper
import sys
sys.path.insert(0,'.')
from . import grab_disthelper
# now the real code begins ...
from disthelper.treeops.treeops import *
from disthelper.treeops.fileops import *
from disthelper.treeops.lineops import tabify_line, copy_line
from .indentcheck import *
# define the command-line arg parser
class argv_parser(TreeOptParser):
def __init__(self):
TreeOptParser.__init__(self,'tabtree.py',
'Tabify a directory tree, with filename matching.')
# add my specific options
self.add_intopt( 'w', 'tabwidth', 'tabwidth',
"Set the tab width. It is recommended you DON'T use this if you're\n\ttabifying Python code. However, for non-Python code, it is\n\tbetter to use this option.")
p = argv_parser()
opts,args = p.parse_argv(sys.argv)
if len(args) == 0:
if len(opts.regexlist) > 0:
# user gave a glob but no targets - add cwd
args.append('.')
else:
# don't know what user wants
p.show_usage()
sys.exit(1)
# need a null transform
def copy_line(line):
return line
class TabifyFileTransform(FileTransformFromLineOp):
def __init__(self,tabwidth=None):
FileTransformFromLineOp.__init__(self, self.my_tabify)
self.TABWIDTH = tabwidth
def my_tabify(self,line):
return tabify_line(line, self.TABWIDTH)
def process(self, file_out, file_in):
# save original value to restore at end
tw = self.TABWIDTH
# set lineop, might change it below
self.set_lineop( self.my_tabify )
if has_tab_space_mixing( file_in ):
# too dangerous to try tabifying with mixed line beginnings
print("ERROR: Skipping file '%s' - has mixed tabs & spaces" % file_in.name)
self.set_lineop( copy_line )
elif self.TABWIDTH is None:
# -w not given, try and guess tabwidth
(uses_tabs, tabwidth) = guess_indentation(file_in)
#print "GUESSED ",uses_tabs,tabwidth
if uses_tabs == 1:
print("** NOTE ** %s is already tabified - not changing." % \
file_in.name)
self.set_lineop( copy_line )
elif uses_tabs < 0 and tabwidth < 0 and self.TABWIDTH < 0:
print("** NOTE ** Can't determine tab settings for %s - not changing." % \
file_in.name)
self.set_lineop( copy_line )
elif uses_tabs == 0:
self.TABWIDTH = tabwidth
#print "TABWIDTH NOW ",self.TABWIDTH
FileTransformFromLineOp.process(self, file_out, file_in)
self.TABWIDTH = tw
# make a file operation
fileop = TabifyFileTransform(opts.tabwidth)
# ... into a tree operation
treeop = TreeOpFromFileTransform( fileop )
# run the tree
treeop.runtree(opts, args)
| {"/disthelper/scripts/dirtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/attic/textify.py": ["/disthelper/treeops/lineops.py"], "/disthelper/scripts/plattext.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py"], "/disthelper/scripts/rmfind.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/setuplib.py": ["/disthelper/misc.py", "/disthelper/treeops/__init__.py"], "/disthelper/scripts/difftree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/misc.py"], "/disthelper/attic/make_mingw_pylibs.py": ["/disthelper/find_python.py", "/disthelper/misc.py"], "/setup.py": ["/disthelper/misc.py", "/disthelper/setuplib.py", "/disthelper/find_python.py"], "/disthelper/treeops/__init__.py": ["/disthelper/treeops/treeops.py"], "/disthelper/find_python.py": ["/disthelper/misc.py"], "/disthelper/scripts/untabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"], "/disthelper/treeops/fileops.py": ["/disthelper/misc.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/indentcheck.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/scripts/comptree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py"], "/disthelper/treeops/treeops.py": ["/disthelper/util/cmdline.py"], "/disthelper/attic/example-setup.py": ["/disthelper/misc.py"], "/disthelper/scripts/tabtree.py": ["/disthelper/scripts/__init__.py", "/disthelper/treeops/treeops.py", "/disthelper/treeops/fileops.py", "/disthelper/treeops/lineops.py", "/disthelper/scripts/indentcheck.py"]} |
64,133 | MhYao2014/MNIST | refs/heads/master | /SGD_Train.py | import numpy as np
import datetime
from Train import Train_hyperparam
from NN import NN
from data_process import load_train_labels
from data_process import load_train_images
def one_hot_transformer(labels_numpy):
y_truth = np.zeros([60000,10])
for row, position in enumerate(labels_numpy.astype(np.int)):
y_truth[row, position] = 1
return y_truth
class SGD_Train(Train_hyperparam):
def __init__(self):
pass
def sgd_train(self, fnn, images_vectors, labels_numpy):
'''
This method will organize the training process.
:param fnn: A instance of FNN class.
:param images_vectors: A numpy array with shape (60000, 784)
:param labels_numpy: A numpy array with shape (60000, )
:return: None
'''
accuracy = 0
Accuracy = 0
wrong_persent = 100
iteration = 0
starttime = datetime.datetime.now()
while Accuracy < 100 - self.train_hyperparam['stop_criterion']:
images_batch, labels_batch = fnn.batcher(images_vectors, labels_numpy)
#pred_category, wrong_persent = fnn.forward(images_batch, labels_batch, if_train=True)
fnn.forward(images_batch, labels_batch, if_train=True)
fnn.backward(labels_batch,self.train_hyperparam['step_size'])
iteration += 1
if iteration > self.train_hyperparam['max_iteration']:
print ("The training process may failed, we have trained for %d iterations.\n" % self.train_hyperparam['max_iteration'])
break
if iteration % (60000 // fnn.model_hyperparam['batch_size']) == 0:
fnn.model_hyperparam['dropout_percent'] = 0
accuracy = self.evaluate(fnn, images_vectors, labels_numpy)
Accuracy = Accuracy * 0.9 + accuracy * 0.1
print('This is the %dth iterations, and the accuracy on the test data set is: %f%%' %
(iteration // (60000 // fnn.model_hyperparam['batch_size']),Accuracy))
fnn.model_hyperparam['dropout_percent'] = 0.05
Accuracy = Accuracy*0.9 + accuracy * 0.1
endtime = datetime.datetime.now()
print ("\nThe iterations take about %d seconds\n" % (endtime - starttime).seconds)
print ('\nThe training process finished !\n')
def evaluate(self, fnn, images_vectors, labels_numpy):
account = 0
for i in range(50000 // fnn.model_hyperparam['batch_size']):
pred_labels, _ = fnn.forward(images_vectors[i * fnn.model_hyperparam['batch_size']:
(i + 1) *fnn.model_hyperparam['batch_size']],
labels_numpy[i * fnn.model_hyperparam['batch_size']:
(i + 1) *fnn.model_hyperparam['batch_size']],
if_train=True)
account += np.nonzero(pred_labels - np.nonzero(labels_numpy[i * fnn.model_hyperparam['batch_size']:
(i + 1) *fnn.model_hyperparam['batch_size']])[1])[0].shape[0]
accuracy = (100 - 100 * (account / 50000))
return accuracy
if __name__ == '__main__':
###################
# path to the training data's images and labels
###################
train_images_idx3_ubyte_file = './train-images.idx3-ubyte'
train_labels_idx1_ubyte_file = './train-labels.idx1-ubyte'
##################
# Here we go
##################
images_numpy = load_train_images(idx3_ubyte_file=train_images_idx3_ubyte_file)
labels_numpy = load_train_labels(idx1_ubyte_file=train_labels_idx1_ubyte_file)
labels_numpy = one_hot_transformer(labels_numpy)
print('\nThe shape of all data images are:', images_numpy.shape)
print('\nThe shape of all data labels are:', labels_numpy.shape)
images_vectors = images_numpy.reshape((60000, -1)) / 255
fnn = NN()
train = SGD_Train()
train.train_hyperparam['stop_criterion'] = 2
fnn.model_hyperparam['batch_size'] = 128
fnn.model_hyperparam['layer1_dim'] = 134
fnn.model_hyperparam['layer2_dim'] = 34
fnn.model_hyperparam['layer4_dim'] = 10
fnn.model_hyperparam['dropout_percent'] = 0.05
print('\nThe hyperparameters of this fully connected neuron network are:\n',fnn.model_hyperparam)
print('The hyperparameters of training process are:\n',train.train_hyperparam)
train.sgd_train(fnn=fnn ,images_vectors=images_vectors[0:60000] ,labels_numpy=labels_numpy[0:60000])
Accuracy = 0
account = 0
fnn.model_hyperparam['dropout_percent'] = 0
for i in range(10000 // fnn.model_hyperparam['batch_size']):
pred_labels, _ = fnn.forward(images_vectors[50000 + i * fnn.model_hyperparam['batch_size']:50000 + (i + 1) * fnn.model_hyperparam['batch_size']],
labels_numpy[50000 + i * fnn.model_hyperparam['batch_size']:50000 + (i + 1) * fnn.model_hyperparam['batch_size']], if_train=True)
account += np.nonzero(pred_labels - np.nonzero(labels_numpy[50000 + i * fnn.model_hyperparam['batch_size']:
50000 + (i + 1) * fnn.model_hyperparam['batch_size']])[1])[0].shape[0]
print('The accuracy on the whole data set is %f %%:\n' % (100 - 100 * (account / 10000))) | {"/SGD_Train.py": ["/Train.py", "/NN.py", "/data_process.py"], "/FNN.py": ["/data_process.py"], "/NN.py": ["/FNN.py", "/Neuron.py"], "/Train.py": ["/data_process.py", "/FNN.py"], "/CNN.py": ["/NN.py"]} |
64,134 | MhYao2014/MNIST | refs/heads/master | /FNN.py | import numpy as np
from data_process import load_train_labels
from data_process import load_train_images
class FNN_hyperparam():
def __init__(self):
pass
model_hyperparam = {
'vari_dim': 784,
'layer1_dim': 128,
'layer2_dim': 64,
'layer3_dim': 30,
'layer4_dim': 10,
'batch_size': 512,
}
class FNN(FNN_hyperparam):
def __init__(self):
self.model_param, self.middle_results = self.initialize()
pass
def initialize(self):
'''
This function will initialize all the parameters (matrix and bias vectors) of forward neural network with random number.
:return: None
'''
W_input2layer1 = np.random.randn(784,
self.model_hyperparam['layer1_dim'])
b_input2layer1 = np.random.randn(1,self.model_hyperparam['layer1_dim'])
W_layer12layer2 = np.random.randn(self.model_hyperparam['layer1_dim'],
self.model_hyperparam['layer2_dim'])
b_layer12layer2 = np.random.randn(1,self.model_hyperparam['layer2_dim'])
W_layer22layer3 = np.random.randn(self.model_hyperparam['layer2_dim'],
self.model_hyperparam['layer3_dim'])
b_layer22layer3 = np.random.randn(1,self.model_hyperparam['layer3_dim'])
W_layer32layer4 = np.random.randn(self.model_hyperparam['layer3_dim'],
self.model_hyperparam['layer4_dim'])
b_layer32layer4 = np.random.randn(1,self.model_hyperparam['layer4_dim'])
return ([W_input2layer1, b_input2layer1, W_layer12layer2, b_layer12layer2, W_layer22layer3, b_layer22layer3, W_layer32layer4, b_layer32layer4],{})
def batcher(self, images_vectors, labels_numpy):
'''
This method will randomly take "batch_size" samples out of the whole training data (images_batch, labels_batch) pair.
:param images_vectors: A numpy array with shape (60000, 784)
:param labels_numpy: A numpy array with shape (60000, )
:return images_batch: A numpy array with shape (batch_size, 784)
:return labels_batch: A numpy array with shape (batch_size, )
'''
index = np.random.choice(50000, self.model_hyperparam['batch_size'])
images_batch = images_vectors[index]
labels_batch = labels_numpy[index]
return images_batch, labels_batch
def sigmoid(self, input):
'''
Compute the sigmoid function for the input here.
:param input: A scalar or numpy array.
:return output: A scalar or numpy array sigmoid(input)
'''
output = 1.0 / (1.0 + np.exp(- input))
return output
def softmax(self,input):
"""
Compute the softmax function for each row of the input x.
:param input: A D dimensional vector or N X D dimensional numpy matrix.
:return input: Softmax(input)
"""
orig_shape = input.shape
if len(input.shape) > 1:
minus_max_row = lambda a: a - np.max(a)
input = np.apply_along_axis(minus_max_row, 1, input)
input = np.exp(input)
denomi_row = lambda a: 1.0 / np.sum(a)
denomi = np.apply_along_axis(denomi_row, 1, input)
input = input * denomi.reshape(-1,1)
else:
input_max = np.max(input)
input = input - input_max
numerator = np.exp(input)
denomi = 1.0 / np.sum(numerator)
input = numerator.dot(denomi)
assert input.shape == orig_shape
return input
def forward(self,images_batch, labels_batch=None, if_train=False):
'''
This method will calculate the forward process; if "if_train" is True,
it will return more than just the predicted catagory, but also the loss and the middle result.
:return images_batch: A numpy array with shape (batch_size, 784)
:return labels_batch: A numpy array with shape (batch_size, )
:param if_train: When "True", this method will return (pred_catagory, loss, middle_result);
Otherwise, this method will return (pred_catagory, loss=None, middle_result=None)
:return: (pred_category, loss=None, middle_result=None)
'''
layer1_before = np.matmul(images_batch, self.model_param[0]) + self.model_param[1]
self.middle_results['layer1_before'] = layer1_before
layer1 = self.sigmoid(layer1_before)
self.middle_results['layer1']=layer1
layer2_before = np.matmul(layer1, self.model_param[2]) + self.model_param[3]
self.middle_results['layer2_before'] = layer2_before
layer2 = self.sigmoid(layer2_before)
self.middle_results['layer2']=layer2
layer3_before = np.matmul(layer2, self.model_param[4]) + self.model_param[5]
self.middle_results['layer3_before'] = layer3_before
layer3 = self.sigmoid(layer3_before)
self.middle_results['layer3'] = layer3
layer4_before = np.matmul(layer3, self.model_param[6]) + self.model_param[7]
self.middle_results['layer4_before'] = layer4_before
layer4 = self.softmax(layer4_before)
self.middle_results['layer4']=layer4
if if_train:
logit_pred = -np.log(layer4)
loss = np.choose(labels_batch.astype(np.int),logit_pred.T).sum() / labels_batch.shape[0]
self.middle_results['loss'] = np.choose(labels_batch.astype(np.int),logit_pred.T)
pred_category = np.argmax(layer4,axis=1)
right = np.where((pred_category - labels_batch.astype(np.int)) == 0)[0].shape[0]
wrong_percent =100 - (right / labels_batch.shape[0] * 100)
else:
loss = 0
pred_category = np.argmax(layer4, axis=1)
wrong_percent = 0
return (pred_category, wrong_percent)
def update_param(self, model_param):
'''
This method will take the new model parameters and update them into fnn.model_param
:param model_param:
:return:
'''
self.model_param = model_param
return
if __name__ == '__main__':
###################
# path to the training data's images and labels
###################
train_images_idx3_ubyte_file = './train-images.idx3-ubyte'
train_labels_idx1_ubyte_file = './train-labels.idx1-ubyte'
##################
# Here we go
##################
images_numpy = load_train_images(idx3_ubyte_file=train_images_idx3_ubyte_file)
labels_numpy = load_train_labels(idx1_ubyte_file=train_labels_idx1_ubyte_file)
print('\nThe shape of all data images are:', images_numpy.shape)
print('\nThe shape of all data labels are:', labels_numpy.shape)
images_vectors = images_numpy.reshape((60000, -1))
fnn = FNN()
images_batch, labels_batch = fnn.batcher(images_vectors=images_vectors,
labels_numpy=labels_numpy)
pred_cate, loss = fnn.forward(images_batch, labels_batch, if_train=True)
print('\nThe predicted category are:\n', pred_cate)
print('\nThe grand-truth category are:\n', labels_batch.astype(np.int))
print('\n The average loss is:', loss)
print('\n',fnn.middle_results['loss'].shape)
| {"/SGD_Train.py": ["/Train.py", "/NN.py", "/data_process.py"], "/FNN.py": ["/data_process.py"], "/NN.py": ["/FNN.py", "/Neuron.py"], "/Train.py": ["/data_process.py", "/FNN.py"], "/CNN.py": ["/NN.py"]} |
64,135 | MhYao2014/MNIST | refs/heads/master | /NN.py | import numpy as np
from FNN import FNN
from Neuron import Neuron
class NN(FNN):
def __init__(self):
self.net = self.build_net()
def build_net(self):
h_input = Neuron(name='h_input',
variable_dim=self.model_hyperparam['vari_dim'],
hidden_dim=self.model_hyperparam['layer1_dim'],
acti_function='sigmoid')
h_1 = Neuron(name='h_1',
variable_dim=self.model_hyperparam['layer1_dim'],
hidden_dim=self.model_hyperparam['layer2_dim'],
acti_function='sigmoid')
h_output = Neuron(name='h_output',
variable_dim=self.model_hyperparam['layer2_dim'],
hidden_dim=self.model_hyperparam['layer4_dim'],
acti_function='softmax')
net = {
'h_input': h_input,
'h_1': h_1,
'h_output': h_output,
}
return net
def forward(self, images_batch, labels_batch, if_train=False):
h_input_state = self.net['h_input'].forward(vari=images_batch, dropout_percent=self.model_hyperparam['dropout_percent'])
h_1_state = self.net['h_1'].forward(vari=h_input_state, dropout_percent=self.model_hyperparam['dropout_percent'])
h_output_state = self.net['h_output'].forward(vari=h_1_state, dropout_percent=self.model_hyperparam['dropout_percent'])
#print(h_output_state)
if if_train:
# logit_pred = -np.log(h_output_state)
# loss = np.choose(labels_batch.astype(np.int),logit_pred.T).sum() / labels_batch.shape[0]
pred_category = np.argmax(h_output_state,axis=1)
temp = np.nonzero(labels_batch)[1]
wrong_count = np.nonzero(pred_category - np.nonzero(labels_batch)[1])[0].shape[0]
wrong_percent = wrong_count / labels_batch.shape[0] * 100
else:
# loss = 0
pred_category = np.argmax(h_output_state, axis=1)
wrong_percent = 0
return pred_category, wrong_percent
def backward(self, labels_batch,step_size):
# print (self.net['h_output'].middle_result)
y_truth = labels_batch
grade_for_h_output = -y_truth*(1/self.net['h_output'].middle_result['hidden_state_h_output'])
grade_for_h_1 = self.net['h_output'].backward(grade_by_before=grade_for_h_output)
grade_for_h_input = self.net['h_1'].backward(grade_by_before=grade_for_h_1)
grade_for_vari = self.net['h_input'].backward(grade_by_before=grade_for_h_input)
self.net['h_output'].update_grad(step_size=step_size)
self.net['h_1'].update_grad(step_size=step_size)
self.net['h_input'].update_grad(step_size=step_size)
if __name__ == '__main__':
h_input = Neuron(name='h_input', variable_dim=784, hidden_dim=30, acti_function='softmax')
print(h_input)
fnn = NN() | {"/SGD_Train.py": ["/Train.py", "/NN.py", "/data_process.py"], "/FNN.py": ["/data_process.py"], "/NN.py": ["/FNN.py", "/Neuron.py"], "/Train.py": ["/data_process.py", "/FNN.py"], "/CNN.py": ["/NN.py"]} |
64,136 | MhYao2014/MNIST | refs/heads/master | /temp.py | import numpy as np
a = np.array([
[3,4],
[1,-1],
[7,3]
])
print()
musk = np.zeros(a[0].shape)
print(a*musk)
one_sample = lambda row: np.diag(row) - np.matmul(row.reshape(-1,1),row.reshape(1,-1))
Print = lambda row: print (row)
b = np.array([
[[1,1],
[0,1]],
[[3,4],
[5,1]],
[[1,2],
[2,1]]
])
c = np.array([[3,7],
[5,9],
[11,8]]).reshape(3,1,2)
print(a)
print(np.sum(a,axis=0))
print(np.apply_along_axis(one_sample,axis=1,arr=a))
print(b)
print(c)
print(c*b)
print(np.sum(c*b,axis=2))
print(np.tensordot(c,b,axes=([2],[2]))[range(c.shape[0]),0,range(c.shape[0]),:])
print(np.tensordot(c,b,axes=([2],[2])).shape)
| {"/SGD_Train.py": ["/Train.py", "/NN.py", "/data_process.py"], "/FNN.py": ["/data_process.py"], "/NN.py": ["/FNN.py", "/Neuron.py"], "/Train.py": ["/data_process.py", "/FNN.py"], "/CNN.py": ["/NN.py"]} |
64,137 | MhYao2014/MNIST | refs/heads/master | /Train.py | import datetime
import numpy as np
from data_process import load_train_labels
from data_process import load_train_images
from FNN import FNN
class Train_hyperparam():
def __int__(self):
pass
train_hyperparam = {
'step_size': 0.3,
'stop_criterion': 7,
'max_iteration': 100000000,
}
class Train(Train_hyperparam):
def __int__(self):
pass
def get_grade(self, middle_result, model_param, labels_batch, images_batch):
'''
This method will calculate the grade of all parameters in fnn.
:param middle_result: A dictionary and each value is a numpy array. The middle result of fnn's forward process.
:param model_param: A list and each entry of this list is a numpy array. All the parameters in fnn.
:param labels_batch: A numpy array with shape (batch_size, ).
:return grade_param: A list and each entry of this list is a numpy array. All the gradient of all parameters in fnn.
'''
y_truth = np.zeros(middle_result['layer4'].shape)
for row, position in enumerate(labels_batch.astype(np.int)):
y_truth[row, position] = 1
layer4_before_grad = middle_result['layer4'] - y_truth
W_layer32layer4_grad = np.matmul(middle_result['layer3'].T, layer4_before_grad) / labels_batch.shape[0]
b_layer32layer4_grad = np.sum(layer4_before_grad,axis=0) / labels_batch.shape[0]
layer3_grad = np.matmul(layer4_before_grad, model_param[6].T)
layer3_before_grad = (layer3_grad * ( middle_result['layer3'] - middle_result['layer3']**2 ))
W_layer22layer3_grad = np.matmul(middle_result['layer2'].T, layer3_before_grad) / labels_batch.shape[0]
b_layer22layer3_grad = np.sum(layer3_before_grad,axis=0) / labels_batch.shape[0]
layer2_grad = np.matmul(layer3_before_grad, model_param[4].T)
layer2_before_grad = (layer2_grad * (middle_result['layer2'] - middle_result['layer2'] ** 2))
W_layer12layer2_grad = np.matmul(middle_result['layer1'].T, layer2_before_grad) / labels_batch.shape[0]
b_layer12layer2_grad = np.sum(layer2_before_grad,axis=0) / labels_batch.shape[0]
layer1_grad = np.matmul(layer2_before_grad, model_param[2].T)
layer1_before_grad = (layer1_grad * (middle_result['layer1'] - middle_result['layer1'] ** 2))
W_input2layer1_grad = np.matmul(images_batch.T, layer1_before_grad) / labels_batch.shape[0]
b_input2layer1_grad = np.sum(layer1_before_grad,axis=0) / labels_batch.shape[0]
grade_param = [W_input2layer1_grad, b_input2layer1_grad, W_layer12layer2_grad, b_layer12layer2_grad, W_layer22layer3_grad, b_layer22layer3_grad, W_layer32layer4_grad, b_layer32layer4_grad]
return grade_param
def update_grade(self, model_param, grade_param):
'''
This method will perform the fixed step size gradient descent
:param model_param: A list and each entry of this list is a numpy array. All the parameters in fnn.
:param grade_param: A list and each entry of this list is a numpy array. All the gradient of all parameters in fnn.
:return: model_param: A list and each entry of this list is a numpy array. All the parameters in fnn.
'''
for index, param in enumerate(model_param):
model_param[index] = model_param[index] - self.train_hyperparam['step_size']*grade_param[index]
return model_param
def sgd_training(self, fnn, images_vectors, labels_numpy):
'''
This method will organize the training process.
:param fnn: A instance of FNN class.
:param images_vectors: A numpy array with shape (60000, 784)
:param labels_numpy: A numpy array with shape (60000, )
:return: None
'''
loss_emperical = 100
iteration = 0
starttime = datetime.datetime.now()
while loss_emperical > self.train_hyperparam['stop_criterion']:
images_batch, labels_batch = fnn.batcher(images_vectors, labels_numpy)
pred_category, loss_batch_average = fnn.forward(images_batch, labels_batch, if_train=True)
grade_param = self.get_grade(fnn.middle_results, fnn.model_param, labels_batch, images_batch)
new_model_param = self.update_grade(fnn.model_param, grade_param)
fnn.update_param(new_model_param)
loss_emperical = 0.9 * loss_emperical + 0.1 * loss_batch_average
iteration += 1
if iteration > self.train_hyperparam['max_iteration']:
print ("The training process may failed, we have trained for %d iterations.\n" % self.train_hyperparam['max_iteration'])
break
# if iteration >= 15000 and iteration<30000:
# self.train_hyperparam['step_size'] = 0.1
# elif iteration >=30000 and iteration < 45000:
# self.train_hyperparam['step_size'] = 0.05
if iteration % (50000//fnn.model_hyperparam['batch_size']) == 0:
account = 0
for i in range(10000//fnn.model_hyperparam['batch_size']):
_, accuracy = fnn.forward(images_vectors[50000 + i * fnn.model_hyperparam['batch_size']:50000 + (i + 1) * fnn.model_hyperparam['batch_size']],
labels_numpy[50000 + i * fnn.model_hyperparam['batch_size']:50000 + (i + 1) * fnn.model_hyperparam['batch_size']], if_train=True)
account += np.nonzero(_ - labels_numpy[50000 + i * fnn.model_hyperparam['batch_size']:50000 + (i + 1) * fnn.model_hyperparam['batch_size']])[0].shape[0]
print('This is the %dth iterations, and the accuracy on the test data set is: %f%%' % (iteration//(50000//fnn.model_hyperparam['batch_size']), (100 - 100 * (account / 10000))))
loss_emperical = 100 * (account / 10000)
if loss_emperical < 8:
fnn.model_hyperparam['step_size'] = 0.015
else:
fnn.model_hyperparam['step_size'] = 0.2
#print("This is the %dth iteration, and the accuracy is: %d%%" % (iteration, 100 - loss_emperical))
endtime = datetime.datetime.now()
print("\nThe iterations take about %d seconds\n" % (endtime - starttime).seconds)
print ('\nThe training process finished !\n')
if __name__ == '__main__':
###################
# path to the training data's images and labels
###################
train_images_idx3_ubyte_file = './train-images.idx3-ubyte'
train_labels_idx1_ubyte_file = './train-labels.idx1-ubyte'
##################
# Here we go
##################
images_numpy = load_train_images(idx3_ubyte_file=train_images_idx3_ubyte_file)
labels_numpy = load_train_labels(idx1_ubyte_file=train_labels_idx1_ubyte_file)
print('\nThe shape of all data images are:', images_numpy.shape)
print('\nThe shape of all data labels are:', labels_numpy.shape)
images_vectors = images_numpy.reshape((60000, -1))
fnn = FNN()
train = Train()
train.sgd_training(fnn=fnn ,images_vectors=images_vectors[0:60000] ,labels_numpy=labels_numpy[0:60000])
Accuracy = 0
account = 0
for i in range(20):
_, accuracy = fnn.forward(images_vectors[50000 + i * 500:50000 + (i + 1) * 500],
labels_numpy[50000 + i * 500:50000 + (i + 1) * 500], if_train=True)
account += np.nonzero(_ - labels_numpy[50000 + i * 500:50000 + (i + 1) * 500])[0].shape[0]
print('The accuracy on the whole data set is %f %%:\n' % (100 - 100 * (account / 10000)))
| {"/SGD_Train.py": ["/Train.py", "/NN.py", "/data_process.py"], "/FNN.py": ["/data_process.py"], "/NN.py": ["/FNN.py", "/Neuron.py"], "/Train.py": ["/data_process.py", "/FNN.py"], "/CNN.py": ["/NN.py"]} |
64,138 | MhYao2014/MNIST | refs/heads/master | /CNN.py | import numpy as np
from NN import NN
class CNN(NN):
def __init__(self):
pass
| {"/SGD_Train.py": ["/Train.py", "/NN.py", "/data_process.py"], "/FNN.py": ["/data_process.py"], "/NN.py": ["/FNN.py", "/Neuron.py"], "/Train.py": ["/data_process.py", "/FNN.py"], "/CNN.py": ["/NN.py"]} |
64,139 | MhYao2014/MNIST | refs/heads/master | /data_process.py | import numpy as np
import struct
def decode_idx3_ubyte(idx3_ubyte_file):
''' Turn the binary file of training data into numpy array
:param idx3_ubyte_file: The path to training data of ubyte format
:return images: The training data of numpy array format
'''
bin_data = open(file=idx3_ubyte_file, mode='rb').read()
offset = 0
fmt_header = '>IIII'
magic_number, images_number, num_rows, num_cols = struct.unpack_from(fmt_header, buffer=bin_data, offset=offset)
print("\nmagic:%d, count:%d, size:%dX%d" % (magic_number, images_number, num_rows, num_cols))
image_size = num_cols * num_rows
offset += struct.calcsize(fmt_header)
fmt_image = '>' + str(image_size) + 'B'
images = np.empty(shape=(images_number, num_rows, num_cols))
for i in range(images_number):
if ( i + 1 ) % 10000 == 0:
print ("done %d" % ( i + 1 ) + " pictures")
images[i] = np.array(struct.unpack_from(fmt_image, buffer=bin_data, offset=offset)).reshape((num_rows, num_cols))
offset += struct.calcsize(fmt_image)
return images
def decode_idx1_ubyte(idx1_ubyte_file):
'''Turn the binary file of training data's label into numpy array
:param idx1_ubyte_file: The path to training data's label of ubyte format
:return labels: The training data's label of numpy array
'''
bin_data = open(file=idx1_ubyte_file, mode='rb').read()
offset = 0
fmt_header = '>ii'
magic_number, images_number = struct.unpack_from(fmt_header, buffer=bin_data, offset=offset)
print("\nmagic: %d, count: %d" % (magic_number, images_number))
offset += struct.calcsize(fmt_header)
fmt_image = 'B'
labels = np.empty(images_number)
for i in range(images_number):
if ( i + 1 ) % 10000 == 0:
print ("done %d" % ( i + 1 ) + " labels")
labels[i] = struct.unpack_from(fmt_image, buffer=bin_data, offset=offset)[0]
offset += struct.calcsize(fmt_image)
return labels
def load_train_images(idx3_ubyte_file):
''' Load the training data's image from idx3_ubyte_file
:param idx3_ubyte_file: path to the training data's image
:return images: The training data of numpy array format
'''
return decode_idx3_ubyte(idx3_ubyte_file=idx3_ubyte_file)
def load_train_labels(idx1_ubyte_file):
''' Load the training data's labels from idx1_ubyte_file
:param idx1_ubyte_file: path to training data's label
:return The training data's label of numpy array
'''
return decode_idx1_ubyte(idx1_ubyte_file=idx1_ubyte_file)
if __name__ == '__main__':
train_images_idx3_ubyte_file = './train-images.idx3-ubyte'
train_labels_idx1_ubyte_file = './train-labels.idx1-ubyte'
images = load_train_images(idx3_ubyte_file=train_images_idx3_ubyte_file)
print (images.shape,'\n')
labels = load_train_labels(idx1_ubyte_file=train_labels_idx1_ubyte_file)
print (labels.shape)
| {"/SGD_Train.py": ["/Train.py", "/NN.py", "/data_process.py"], "/FNN.py": ["/data_process.py"], "/NN.py": ["/FNN.py", "/Neuron.py"], "/Train.py": ["/data_process.py", "/FNN.py"], "/CNN.py": ["/NN.py"]} |
64,140 | MhYao2014/MNIST | refs/heads/master | /Neuron.py | import numpy as np
class Neuron(object):
def __init__(self, name, variable_dim, hidden_dim, acti_function):
self.name = name
self.acti_function = acti_function
self.model_param, self.param_grad = self.initialize(variable_dim, hidden_dim)
self.middle_result = {}
self.function_dict = self.build_function_dict()
def build_function_dict(self):
function_dict = {}
def sigmoid(input):
'''
Compute the sigmoid function for the input here.
:param input: A scalar or numpy array.
:return output: A scalar or numpy array sigmoid(input)
'''
output = 1.0 / (1.0 + np.exp(- input))
return output
def sigmoid_grad(input):
output = input - input**2
return output
def softmax(input):
"""
Compute the softmax function for each row of the input x.
:param input: A D dimensional vector or N X D dimensional numpy matrix.
:return input: Softmax(input)
"""
orig_shape = input.shape
if len(input.shape) > 1:
minus_max_row = lambda a: a - np.max(a)
input = np.apply_along_axis(minus_max_row, 1, input)
input = np.exp(input)
denomi_row = lambda a: 1.0 / np.sum(a)
denomi = np.apply_along_axis(denomi_row, 1, input)
input = input * denomi.reshape(-1, 1)
else:
input_max = np.max(input)
input = input - input_max
numerator = np.exp(input)
denomi = 1.0 / np.sum(numerator)
input = numerator.dot(denomi)
assert input.shape == orig_shape
return input
def softmax_grad(input):
one_sample_jocabian = lambda row: np.diag(row) - np.matmul(row.reshape(-1,1),row.reshape(1,-1))
output = np.apply_along_axis(func1d=one_sample_jocabian, axis=1, arr=input)
return output
def relu(input):
row, colum = np.where(input < 0, axis=0)
return output
function_dict['sigmoid'] = sigmoid
function_dict['sigmoid_grad'] = sigmoid_grad
function_dict['softmax'] = softmax
function_dict['softmax_grad'] = softmax_grad
return function_dict
def forward(self,vari, dropout_percent):
self.middle_result['vari_'+self.name] = vari
Z = np.matmul(vari, self.model_param['W_'+self.name]) + self.model_param['b_'+self.name]
self.middle_result['Z_'+self.name] = Z
hidden_state = self.function_dict[self.acti_function](Z)
if self.acti_function != 'softmax':
dropout_mask = np.random.binomial([np.ones(hidden_state[0].shape)], 1 - dropout_percent)[0] * \
(1.0 / (1 - dropout_percent))
hidden_state *= dropout_mask
self.middle_result['hidden_state_' + self.name] = hidden_state
return hidden_state
def backward(self,grade_by_before):
if self.acti_function == 'softmax':
grade_for_Z = np.tensordot(grade_by_before.reshape(grade_by_before.shape[0],1,-1),
self.function_dict[self.acti_function + '_grad'](self.middle_result['hidden_state_'+self.name]),
axes=([2],[2]))[range(grade_by_before.shape[0]),0,range(grade_by_before.shape[0]),:]
else:
grade_for_Z = grade_by_before * self.function_dict[self.acti_function + '_grad'](self.middle_result['hidden_state_'+self.name])
self.param_grad['W_grad_'+self.name] = np.matmul(self.middle_result['vari_'+self.name].T, grade_for_Z) / grade_for_Z.shape[0]
self.param_grad['b_grad_' + self.name] = np.sum(grade_for_Z,axis=0) / grade_for_Z.shape[0]
grade_for_vari = np.matmul(grade_for_Z, self.model_param['W_'+self.name].T)
return grade_for_vari
def update_grad(self,step_size):
self.model_param['W_'+self.name] = self.model_param['W_'+self.name] - step_size*self.param_grad['W_grad_'+self.name]
self.model_param['b_' + self.name] = self.model_param['b_' + self.name] - step_size * self.param_grad['b_grad_' + self.name]
def initialize(self,variable_dim, hidden_dim):
model_param = {
'W_'+self.name: np.random.randn(variable_dim, hidden_dim),
'b_'+self.name: np.random.randn(1,hidden_dim),
}
param_grad = {
'W_grad_'+self.name: np.zeros(model_param['W_'+self.name].shape),
'b_grad_'+self.name: np.zeros(model_param['b_'+self.name].shape),
}
return model_param, param_grad | {"/SGD_Train.py": ["/Train.py", "/NN.py", "/data_process.py"], "/FNN.py": ["/data_process.py"], "/NN.py": ["/FNN.py", "/Neuron.py"], "/Train.py": ["/data_process.py", "/FNN.py"], "/CNN.py": ["/NN.py"]} |
64,141 | khatangatao/khatangatao | refs/heads/master | /blog/tests/test_models.py | from django.test import TestCase
from django.contrib.auth import get_user_model
from ..models import Entry, Comment
class EntryModelTest(TestCase):
def test_entry_create(self):
pass
def test_string_representation(self):
entry = Entry(title="My entry title")
self.assertEqual(str(entry), entry.title)
def test_verbose_name_plural(self):
self.assertEqual(str(Entry._meta.verbose_name_plural), "entries")
def test_get_absolute_url(self):
user = get_user_model().objects.create(username='some_user')
entry = Entry.objects.create(title="My entry title", author=user)
self.assertIsNotNone(entry.get_absolute_url())
class CommentModelTest(TestCase):
def test_string_representation(self):
comment = Comment(body="My comment body")
self.assertEqual(str(comment), comment.body) | {"/blog/tests/test_models.py": ["/blog/models.py"], "/blog/tests/test_views.py": ["/blog/models.py"], "/blog/tests/test_forms.py": ["/blog/forms.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/api/urls.py": ["/blog/views.py"]} |
64,142 | khatangatao/khatangatao | refs/heads/master | /khatangatao/routing.py | from channels.routing import route
channel_routing = [
]
| {"/blog/tests/test_models.py": ["/blog/models.py"], "/blog/tests/test_views.py": ["/blog/models.py"], "/blog/tests/test_forms.py": ["/blog/forms.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/api/urls.py": ["/blog/views.py"]} |
64,143 | khatangatao/khatangatao | refs/heads/master | /client/cli_client.py | import sys
import requests
# try:
# servername = 'http://' + sys.argv[1:]
# except:
# servername = 'http://khatangatao.com'
#
# print(servername)
target = 'http://khatangatao.com/api/entry/'
headers = {"Authorization": "Bearer 56a4f78b7f4a6990d3ae6c223a73249b8765c0c1"}
r = requests.get(target, headers=headers)
print(r.status_code)
print(r.text) | {"/blog/tests/test_models.py": ["/blog/models.py"], "/blog/tests/test_views.py": ["/blog/models.py"], "/blog/tests/test_forms.py": ["/blog/forms.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/api/urls.py": ["/blog/views.py"]} |
64,144 | khatangatao/khatangatao | refs/heads/master | /blog/tests/test_views.py | from django.test import TestCase
from django.contrib.auth import get_user_model
from blog.models import Entry, Comment
from django_webtest import WebTest
class HomePageTests(TestCase):
"""Test whether our blog entries show up on the homepage"""
def setUp(self):
self.user = get_user_model().objects.create(username='some_user')
def test_one_entry(self):
Entry.objects.create(title='1-title', body='1-body', author=self.user)
response = self.client.get('/')
self.assertContains(response, '1-title')
self.assertContains(response, '1-body')
def test_two_entries(self):
Entry.objects.create(title='1-title', body='1-body', author=self.user)
Entry.objects.create(title='2-title', body='2-body', author=self.user)
response = self.client.get('/')
self.assertContains(response, '1-title')
self.assertContains(response, '1-body')
self.assertContains(response, '2-title')
def test_no_entries(self):
response = self.client.get('/')
self.assertContains(response, 'Пока ничего нет.')
def test_homepage(self):
"""
Tests main page
:return: boolean
"""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
class EntryViewTest(WebTest):
def setUp(self):
self.user = get_user_model().objects.create(username='some_user')
self.entry = Entry.objects.create(title='1-title', body='1-body', author=self.user)
def test_basic_view(self):
response = self.client.get(self.entry.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_title_in_entry(self):
response = self.client.get(self.entry.get_absolute_url())
self.assertContains(response, self.entry.title)
def test_body_in_entry(self):
response = self.client.get(self.entry.get_absolute_url())
self.assertContains(response, self.entry.body)
def test_no_comments_in_entry(self):
response = self.client.get(self.entry.get_absolute_url())
self.assertContains(response, 'Комментариев пока нет.')
def test_one_comment_in_entry(self):
self.comment = Comment.objects.create(entry=self.entry, body='comments body', name=self.user)
response = self.client.get(self.entry.get_absolute_url())
self.assertContains(response, 'comments body')
def test_view_page(self):
page = self.app.get(self.entry.get_absolute_url())
self.assertEqual(len(page.forms), 1) | {"/blog/tests/test_models.py": ["/blog/models.py"], "/blog/tests/test_views.py": ["/blog/models.py"], "/blog/tests/test_forms.py": ["/blog/forms.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/api/urls.py": ["/blog/views.py"]} |
64,145 | khatangatao/khatangatao | refs/heads/master | /khatangatao/urls.py | from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic.base import TemplateView
# static files
from django.conf import settings
from django.conf.urls.static import static
import blog.urls
from blog import views
urlpatterns = [
url(r'^$', views.HomeView.as_view(), name='home'),
url(r'^', include(blog.urls)),
url(r'^authors/', views.AuthorList.as_view(), name='authors'),
url(r'^frontend/', views.index, name='frontend'),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include('blog.api.urls', namespace='api')),
]
urlpatterns += [
url(r'^catalog/', include('catalog.urls')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) | {"/blog/tests/test_models.py": ["/blog/models.py"], "/blog/tests/test_views.py": ["/blog/models.py"], "/blog/tests/test_forms.py": ["/blog/forms.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/api/urls.py": ["/blog/views.py"]} |
64,146 | khatangatao/khatangatao | refs/heads/master | /client/gui_client.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Implement a GUI for viewing and updating blog
"""
from tkinter import *
from tkinter.messagebox import showerror, showinfo
import threading
import logging
fieldnames = ('title', 'author', 'body', 'created_at', 'modified_at')
def makeWidgets():
global entries
window = Tk()
window.title('khatangatao blog')
form = Frame(window)
form.pack()
entries = {}
for (ix, label) in enumerate(('key',) + fieldnames):
print('Параметр ix: {} Параметр label: {}'.format(ix, label))
lab = Label(form, text=label)
ent = Entry(form)
lab.grid(row=ix, column=0)
ent.grid(row=ix, column=1)
entries[label] = ent
Button(window, text='Проверить доступность', command=checkAvaibility).pack(side=LEFT)
Button(window, text="Найти статью", command=fetchRecord).pack(side=LEFT)
Button(window, text="Обновить", command=updateRecord).pack(side=LEFT)
Button(window, text="Выход", command=window.quit).pack(side=RIGHT)
return window
def fetchRecord():
import requests
# todo нужна валидация полей, введенных юзером
recordNumber = entries['key'].get()
target = 'http://khatangatao.com/api/entry/' + recordNumber
headers = {"Authorization": "Bearer 56a4f78b7f4a6990d3ae6c223a73249b8765c0c1"}
r = requests.get(target, headers=headers)
print(r.status_code)
print(r.json())
if r.status_code == 200:
for field in fieldnames:
print(field)
entries[field].delete(0, END)
entries[field].insert(0, r.json().get(field, ''))
print(entries[field].get())
elif r.status_code == 404:
showerror(title='Error', message='Такой статьи нет!')
def updateRecord():
import requests
import json
data = {}
target = 'http://khatangatao.com/api/entry/'
headers = {"Authorization": "Bearer 56a4f78b7f4a6990d3ae6c223a73249b8765c0c1",
"Content-Type": "application/json"}
for field in fieldnames:
data[field] = entries[field].get()
data = json.dumps(data)
print(data)
r = requests.post(url=target, data=data, headers=headers)
print(r.status_code)
def checkAvaibility():
executeInNewThread(pingCheck())
def checkHomePage():
import requests
target = 'http://khatangatao.com/'
headers = {"Authorization": "Bearer 56a4f78b7f4a6990d3ae6c223a73249b8765c0c1"}
r = requests.get(target, headers=headers)
print(r.status_code)
def pingCheck():
import subprocess
# print(threading.current_thread().getName(), 'Starting')
logging.debug('Starting')
reply = subprocess.run(['ping', '-c', '3', '-n', 'khatangatao.com'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
)
if reply.returncode == 0:
showinfo(title='popup', message='Работает!')
else:
showinfo(title='popup', message='Не работает!')
# print(threading.current_thread().getName(), 'Exiting')
logging.debug('Exiting')
def executeInNewThread(function):
"""Multithreading"""
th = threading.Thread(name='pingCheck', target=function)
th.start()
print(th)
# th.join()
logging.basicConfig(
level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-10s) %(message)s',
)
window = makeWidgets()
window.mainloop()
| {"/blog/tests/test_models.py": ["/blog/models.py"], "/blog/tests/test_views.py": ["/blog/models.py"], "/blog/tests/test_forms.py": ["/blog/forms.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/api/urls.py": ["/blog/views.py"]} |
64,147 | khatangatao/khatangatao | refs/heads/master | /blog/tests/test_forms.py | from django.test import TestCase
from django.contrib.auth import get_user_model
from django_webtest import WebTest
from ..forms import CommentForm
from ..models import Entry, Comment
class CommentFormTest(WebTest):
def setUp(self):
user = get_user_model().objects.create_user('zoidberg')
self.entry = Entry.objects.create(author=user, title="My entry title")
def test_init(self):
CommentForm(entry=self.entry)
def test_init_without_entry(self):
with self.assertRaises(KeyError):
CommentForm()
def test_valid_data(self):
form = CommentForm({
'name': "Turanga Leela",
'email': "leela@example.com",
'body': "Hi there",
}, entry=self.entry)
self.assertTrue(form.is_valid())
comment = form.save()
self.assertEqual(comment.name, "Turanga Leela")
self.assertEqual(comment.email, "leela@example.com")
self.assertEqual(comment.body, "Hi there")
self.assertEqual(comment.entry, self.entry)
def test_blank_data(self):
form = CommentForm({}, entry=self.entry)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.'],
'email': ['This field is required.'],
'body': ['This field is required.']})
def test_form_error(self):
page = self.app.get(self.entry.get_absolute_url())
page = page.form.submit()
self.assertContains(page, "This field is required.")
def test_form_success(self):
page = self.app.get(self.entry.get_absolute_url())
page.form['name'] = "Phillip"
page.form['email'] = "phillip@example.com"
page.form['body'] = "Test comment body."
page = page.form.submit()
self.assertRedirects(page, self.entry.get_absolute_url()) | {"/blog/tests/test_models.py": ["/blog/models.py"], "/blog/tests/test_views.py": ["/blog/models.py"], "/blog/tests/test_forms.py": ["/blog/forms.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/api/urls.py": ["/blog/views.py"]} |
64,148 | khatangatao/khatangatao | refs/heads/master | /blog/forms.py | from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.entry = kwargs.pop('entry') # the blog entry instance
super().__init__(*args, **kwargs)
def save(self):
comment = super().save(commit=False)
comment.entry = self.entry
comment.save()
return comment
class Meta:
model = Comment
fields = ('name', 'email', 'body')
widgets = {
'name': forms.Textarea(attrs={'placeholder': 'Введите ваше имя', 'cols': 50, 'rows': 1}),
'email': forms.Textarea(attrs={'placeholder': 'email', 'cols': 50, 'rows': 1}),
'body': forms.Textarea(attrs={'placeholder': 'Введите текст комментария', 'cols': 50, 'rows': 10}),
}
| {"/blog/tests/test_models.py": ["/blog/models.py"], "/blog/tests/test_views.py": ["/blog/models.py"], "/blog/tests/test_forms.py": ["/blog/forms.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/api/urls.py": ["/blog/views.py"]} |
64,149 | khatangatao/khatangatao | refs/heads/master | /blog/views.py | from django.shortcuts import render
from django.views.generic import ListView
from django.views.generic import CreateView
from django.contrib.auth.models import User
from .forms import CommentForm
from .models import Entry
class HomeView(ListView):
template_name = 'home.html'
queryset = Entry.objects.order_by('-created_at')
paginate_by = 10
def get_context_data(self, **kwargs):
"""
Insert the form into the context dict.
"""
d = super().get_context_data(**kwargs)
num_visits = self.request.session.get('num_visits', 0)
self.request.session['num_visits'] = num_visits + 1
d['num_visits'] = self.request.session['num_visits']
return d
class AuthorList(ListView):
template_name = 'authors.html'
queryset = User.objects.order_by('pk')
paginate_by = 10
class EntryDetail(CreateView):
model = Entry
template_name = 'blog/entry_detail.html'
form_class = CommentForm
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = super().get_form_kwargs()
# context['blogger'] = get_object_or_404(BlogAuthor, pk=self.kwargs['pk'])
# kwargs['entry'] = self.object
kwargs['entry'] = self.get_object()
return kwargs
def get_context_data(self, **kwargs):
"""
Insert the form into the context dict.
"""
d = super().get_context_data(**kwargs)
d['entry'] = self.get_object()
num_visits = self.request.session.get('num_visits', 0)
self.request.session['num_visits'] = num_visits + 1
d['num_visits'] = self.request.session['num_visits']
return d
def get_success_url(self):
"""
Returns the supplied URL.
"""
return self.get_object().get_absolute_url()
# временное место для API View
from .serializers import EntrySerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import ModelViewSet
from blog.api.auth.bearer import BearerTokenAuthentication
class EntryApi(ModelViewSet):
"""Convert data to JSON format and vice versa"""
queryset = Entry.objects.all()
serializer_class = EntrySerializer
authentication_classes = (BearerTokenAuthentication,)
permission_classes = (
IsAuthenticated,
)
# временное место для Angular приложения
def index(request):
return render(request, 'index.html', )
| {"/blog/tests/test_models.py": ["/blog/models.py"], "/blog/tests/test_views.py": ["/blog/models.py"], "/blog/tests/test_forms.py": ["/blog/forms.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/api/urls.py": ["/blog/views.py"]} |
64,150 | khatangatao/khatangatao | refs/heads/master | /blog/api/urls.py | from django.conf.urls import include, url
from ..views import EntryApi
from rest_framework import routers
router = routers.DefaultRouter()
router.register('entry', EntryApi, base_name='entry')
urlpatterns = [
# API
url(r'^', include(router.urls)),
] | {"/blog/tests/test_models.py": ["/blog/models.py"], "/blog/tests/test_views.py": ["/blog/models.py"], "/blog/tests/test_forms.py": ["/blog/forms.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/api/urls.py": ["/blog/views.py"]} |
64,151 | khatangatao/khatangatao | refs/heads/master | /blog/models.py | from django.db import models
from django.core.urlresolvers import reverse
class Entry(models.Model):
title = models.CharField(max_length=500)
author = models.ForeignKey('auth.User')
body = models.TextField()
created_at = models.DateTimeField(auto_now_add=True, editable=False)
modified_at = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('entry_detail', kwargs={'pk': self.pk})
class Meta:
verbose_name_plural = "entries"
class Comment(models.Model):
entry = models.ForeignKey(Entry)
name = models.CharField(max_length=100, verbose_name='Имя')
email = models.EmailField(max_length=200)
body = models.TextField(max_length=1000, verbose_name='Комментарий')
created_at = models.DateTimeField(auto_now_add=True, editable=True)
modified_at = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return self.body | {"/blog/tests/test_models.py": ["/blog/models.py"], "/blog/tests/test_views.py": ["/blog/models.py"], "/blog/tests/test_forms.py": ["/blog/forms.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/api/urls.py": ["/blog/views.py"]} |
64,168 | fbdp1202/analysisIITP | refs/heads/master | /method.py | # method.py
def getScore(numVlnTest, numNonVlnTest, numHitVln, numHitNonVln):
return (numHitVln+numHitNonVln/10.)/float(numVlnTest+numNonVlnTest/10)
def getHitnum(prams, thres, score):
A = prams['HitOneVlnOne']
B = prams['HitOneNonVln']
C = thres
D = prams['totalNumTest'] - thres
result = (score+prams['numVlnTest']*B-B*D)/(A+B)
return result
def nonVlnFunc(x, inc, bias):
y = []
for i in x:
y.append(inc*(i-bias)*100)
return y
def basicVlnLossFunc(x, bias):
y = []
for i in x:
y.append(bias*100)
return y
def maxScoreFunc(nonVlnLoss, basicVlnLoss):
y = []
for i in range(len(nonVlnLoss)):
y.append(100 - nonVlnLoss[i] - basicVlnLoss[i])
return y
def additionLossFunc(x, inc):
y = []
for i in x:
y.append(inc*i*100)
return y
def getLoss(factor, score, data, prams):
A = prams['HitOneNonVln']
B = prams['HitOneVln']
alpha = prams['AvgOneHitOne']
numVln = prams['numVlnTest']
loss = 1 - A*(factor-numVln) - numVln*B + alpha*numVln*B - score
loss = loss/float(alpha*B + A)
return loss
print('method.py') | {"/analyzer.py": ["/method.py", "/canvas.py"], "/main.py": ["/model.py", "/canvas.py", "/analyzer.py"], "/model.py": ["/method.py"]} |
64,169 | fbdp1202/analysisIITP | refs/heads/master | /canvas.py | # canvas.py
import matplotlib.pyplot as plt
def drawPlot(x, y, shape, pltNum, pltTitle, pltlabel, pltXlabel, pltYlabel, color):
plt.figure(pltNum)
plt.title(label=pltTitle, loc='center', pad=10.)
plt.plot(x, y, shape, color=color, label=pltlabel)
plt.xlabel(pltXlabel)
plt.ylabel(pltYlabel)
plt.legend()
def showPlot():
plt.show()
print('canvas.py') | {"/analyzer.py": ["/method.py", "/canvas.py"], "/main.py": ["/model.py", "/canvas.py", "/analyzer.py"], "/model.py": ["/method.py"]} |
64,170 | fbdp1202/analysisIITP | refs/heads/master | /analyzer.py | # analyzer.py
import numpy as np
import math
from scipy.interpolate import interp1d
from method import *
import canvas
def drawKmeans(data, prams):
x = []
y = []
for factor, score in data['Kmeans']:
x.append(factor)
y.append(getLoss(factor, score, data, prams))
f1 = interp1d(x, y, kind='cubic', fill_value="extrapolate")
xnew = np.linspace(54, 250, num=205, endpoint=True)
canvas.drawPlot(x, y, 'o', 2, 'Kmeans Loss num', 'score', 'factor', 'num', 'blue')
canvas.drawPlot(xnew, f1(xnew), '-', 2, 'Kmeans Loss num', 'cubic', 'factor', 'num', 'red')
return f1
def drawPredictKmeans(data, prams, f1):
x = []
y = []
for factor, score in data['Kmeans']:
x.append(float(factor))
y.append(float(score)*100)
f2 = interp1d(x, y, kind='cubic', fill_value="extrapolate")
xnew = np.linspace(54, 250, num=205, endpoint=True)
nVf = nonVlnFunc(xnew, prams['HitOneNonVln'], prams['numVlnTest'])
bVLf = basicVlnLossFunc(xnew, prams['HitAllVlnZeroNonVln']*(1-prams['AvgOneHitOne']))
addLossf = additionLossFunc(f1(xnew), prams['HitOneVln']*prams['AvgOneHitOne']+prams['HitOneNonVln'])
maxScoref = maxScoreFunc(nVf, bVLf)
canvas.drawPlot(x, y, 'o', 1, 'Kmeans Score', 'score', 'factor', 'score', 'blue')
canvas.drawPlot(xnew, f2(xnew), '-', 1, 'Kmeans Score', 'cubic', 'factor', 'score', 'red')
canvas.drawPlot(xnew, nVf, '-', 1, 'Kmeans Score', 'NonVlnLoss', 'factor', 'score', 'green')
canvas.drawPlot(xnew, bVLf, '-', 1, 'Kmeans Score', 'basicVlnLoss', 'factor', 'score', 'purple')
canvas.drawPlot(xnew, addLossf, '-', 1, 'Kmeans Score', 'additionLoss', 'factor', 'score', 'orange')
canvas.drawPlot(xnew, maxScoref, '-', 1, 'Kmeans Score', 'maxScore', 'factor', 'score', 'black')
def analysisKmeans(data, prams):
f1 = drawKmeans(data, prams)
drawPredictKmeans(data, prams, f1)
def analysisHitNum(data, prams):
print('\n'+"="*18+" analysisHitNum "+"="*18)
for name in data:
for factor, score in data[name]:
if factor == None or factor == 'x' or factor == 0 or factor == 'factor':
continue
hitNum = getHitnum(prams, factor, score)
print('{:18s} : Hit Number is ({:2.3f} / {:>3d}) %{:2.3f}'.format(name, hitNum, factor, float(hitNum/factor*100)))
print('analyzer.py') | {"/analyzer.py": ["/method.py", "/canvas.py"], "/main.py": ["/model.py", "/canvas.py", "/analyzer.py"], "/model.py": ["/method.py"]} |
64,171 | fbdp1202/analysisIITP | refs/heads/master | /main.py | import model
import canvas
import analyzer
if __name__=="__main__":
excelFileName = 'scores.xlsx'
data, prams = model.makeDataSet(excelFileName)
analyzer.analysisKmeans(data, prams)
analyzer.analysisHitNum(data, prams)
canvas.showPlot()
| {"/analyzer.py": ["/method.py", "/canvas.py"], "/main.py": ["/model.py", "/canvas.py", "/analyzer.py"], "/model.py": ["/method.py"]} |
64,172 | fbdp1202/analysisIITP | refs/heads/master | /model.py | # model.py
from openpyxl import load_workbook
from openpyxl import Workbook
import method
def makeDataRow(data, row):
if not row[0] in data:
data[row[0]] = []
data[row[0]].append(row[1:3])
def load_excel_data(data, fileName):
load_wb = load_workbook(fileName, data_only=True)
load_ws = load_wb['Sheet1']
data = {}
for row in load_ws.rows:
row_value = []
for cell in row:
row_value.append(cell.value)
makeDataRow(data, row_value)
data['Kmeans'].sort()
return data
def setPrams(data):
prams = {}
totalNumTest = 300
numVlnTest = 54
numNonVlnTest = totalNumTest - numVlnTest
prams['type'] = 'value'
prams['totalNumTest'] = 300
prams['numVlnTest'] = 54
prams['numNonVlnTest'] = totalNumTest - numVlnTest
prams['HitZeroVlnAllNonVln'] = method.getScore(numVlnTest,numNonVlnTest,0,numNonVlnTest)
prams['HitOneNonVln'] = method.getScore(numVlnTest,numNonVlnTest, 0, 1)
prams['HitOneVln'] = method.getScore(numVlnTest,numNonVlnTest, 1, 0)
prams['HitAllVlnZeroNonVln'] = method.getScore(numVlnTest,numNonVlnTest,numVlnTest,0)
prams['AvgOneHitOne'] = float(data['AllOne'][0][1])/float(prams['HitAllVlnZeroNonVln'])
prams['HitOneVlnOne'] = prams['HitOneVln'] * prams['AvgOneHitOne']
return prams
def checkDataSet(data, prams):
print('\n'+"="*20+" Print data "+"="*20)
for name in data:
for factor, score in data[name]:
print("{0:22s} {1:10s} {2:10s}".format(name, str(factor), str(score)))
print('\n'+"="*20+" Print prams "+"="*20)
for name, value in prams.items():
print("{0:33s} {1:25s}".format(name, str(value)))
def makeDataSet(fileName):
data = {}
data = load_excel_data(data, fileName)
prams = setPrams(data)
checkDataSet(data, prams)
return data, prams
print('Load model.py')
| {"/analyzer.py": ["/method.py", "/canvas.py"], "/main.py": ["/model.py", "/canvas.py", "/analyzer.py"], "/model.py": ["/method.py"]} |
64,185 | SUPEngineer/webAuto | refs/heads/master | /testing/test_TouchAction.py | import time
from selenium import webdriver
from selenium.webdriver import TouchActions
from selenium.webdriver.common.by import By
class TestTouchAction:
def setup(self):
# 初始化webdriver时要加上这个option参数
# chromedriver都会默认把W3C协议设置为true
# 而TouchAction这种鼠标操作本质是不符合W3C标准协议的操作
option = webdriver.ChromeOptions()
option.add_experimental_option("w3c", False)
self.driver = webdriver.Chrome(options=option)
self.driver.maximize_window()
self.driver.implicitly_wait(3)
def teardown(self):
self.driver.quit()
def test_scroll(self):
self.driver.get('https://www.baidu.com')
actions = TouchActions(self.driver)
elem = self.driver.find_element(By.ID, "kw")
search_elem = self.driver.find_element(By.ID, "su")
elem.send_keys('selenium')
actions.tap(search_elem)
actions.perform()
time.sleep(3)
actions.scroll_from_element(elem, 0, 10000)
actions.perform()
time.sleep(3)
| {"/testing/test_file.py": ["/testing/base.py"], "/testing/test_js.py": ["/testing/base.py"], "/testing/test_alert.py": ["/testing/base.py"], "/testing/test_frame.py": ["/testing/base.py"], "/testing/addStuff/page/main_page.py": ["/testing/addStuff/page/address_page.py"], "/testing/addStuff/testCase/test_addStuff.py": ["/testing/addStuff/page/main_page.py"], "/testing/test_windows.py": ["/testing/base.py"]} |
64,186 | SUPEngineer/webAuto | refs/heads/master | /testing/test_file.py | # @Time : 2021/2/24 19:46
# @Author : qiulingfeng
# @File : test_file.py
from time import sleep
from selenium.webdriver.common.by import By
from testing.base import Base
class TestFile(Base):
def test_file(self):
self.driver.get("https://image.baidu.com/")
self.driver.find_element(By.XPATH, "//*[@id='sttb']/img[1]").click()
self.driver.find_element(By.ID, 'uploadImg').send_keys('C:/Users/qiulingfeng01/Desktop/测试上传文档/测试图片')
sleep(3)
| {"/testing/test_file.py": ["/testing/base.py"], "/testing/test_js.py": ["/testing/base.py"], "/testing/test_alert.py": ["/testing/base.py"], "/testing/test_frame.py": ["/testing/base.py"], "/testing/addStuff/page/main_page.py": ["/testing/addStuff/page/address_page.py"], "/testing/addStuff/testCase/test_addStuff.py": ["/testing/addStuff/page/main_page.py"], "/testing/test_windows.py": ["/testing/base.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.