index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
70,620 | pncnmnp/Movie-Recommendation | refs/heads/master | /recommendation.py | import pandas as pd
from file_paths import *
from ast import literal_eval
import sys
class Recommendation:
def __init__(self):
self.md = pd.read_csv(PATH_MOVIES)
def filter_productions(self):
self.md["production_companies"] = (
self.md["production_companies"]
.apply(literal_eval)
.apply(
lambda companies: [company["name"] for company in companies]
if isinstance(companies, list)
else list()
)
)
def filter_genres(self):
"""
Maps each genre to each movie title
"""
self.md["genres"] = (
self.md["genres"]
.apply(literal_eval)
.apply(
lambda genres: [genre["name"] for genre in genres]
if isinstance(genres, list)
else list()
)
)
def imdb_rating(self, film, C, m):
"""
param: film - DataFrame of the particular film
(should contain vote_count and vote_average)
C - mean of the average ratings
m - minimum votes required to be listed
return: rating of the film (float)
"""
vote_count = film["vote_count"]
vote_average = film["vote_average"]
return ((vote_count / (vote_count + m)) * vote_average) + ((m / (vote_count + m)) * C)
def top_stats(self, md, percentile, genre=None):
"""
param: md - movie pandas DataFrame
percentile - cutoff percentile (0.0 to 1.0)
genre - genre based recommendation (default = None)
(Genres in TMDB dataset are: Action, Adventure, Fantasy,
Science Fiction, Crime, Drama, Thriller, Animation, Family,
Western, Comedy, Romance, Horror, Mystery, History, War,
Music, Documentary, Foreign, TV Movie)
If genre is set to None, overall ranking will be returned.
return: top_filtered - pandas DataFrame filtered by percentile threshold
and genres. The attributes of the DataFrame are -
original_title, id, vote_average, vote_count,
popularity, release_date.
imdb_C - mean value of all the vote_averages
imdb_m - minimum votes required to be listed
"""
if genre != None:
genre_md = (
md.apply(lambda movie: pd.Series(movie["genres"]), axis=1)
.stack()
.reset_index(level=1, drop=True)
)
genre_md.name = "genre"
md = md.drop("genres", axis=1).join(genre_md)
genre_list = md["genre"].fillna("").unique().tolist()
if genre in genre_list:
md = md[md["genre"] == genre]
else:
categories = ", ".join(genre_list)
raise ValueError(CATEGORY_ERROR + categories)
filter_count = md[md["vote_count"].notnull()]["vote_count"].astype("float")
filter_average = md[md["vote_average"].notnull()]["vote_average"].astype("float")
imdb_C = filter_average.mean()
imdb_m = filter_count.quantile(percentile)
top_filtered = md[
(md["vote_count"] >= imdb_m)
& (md["vote_count"].notnull())
& (md["vote_average"].notnull())
][
[
"original_title",
"id",
"vote_average",
"vote_count",
"popularity",
"release_date",
]
]
top_filtered["vote_count"] = top_filtered["vote_count"].astype("float")
top_filtered["vote_average"] = top_filtered["vote_average"].astype("float")
return top_filtered, imdb_C, imdb_m
def top_movies(self, md, percentile, limit, offset, genre=None):
"""
param: md - movie pandas DataFrame
percentile - cutoff percentile (0.0 to 1.0)
limit - no. of movies to display
offset - base value
genre - genre based recommendation (default = None)
(Genres in TMDB dataset are: Action, Adventure, Fantasy,
Science Fiction, Crime, Drama, Thriller, Animation, Family,
Western, Comedy, Romance, Horror, Mystery, History, War,
Music, Documentary, Foreign, TV Movie)
If genre is set to None, overall ranking will be returned.
return: pandas DataFrame of top movies calculated using the imdb
formula.
"""
top_filtered, imdb_C, imdb_m = self.top_stats(md, percentile, genre)
top_filtered["rating"] = top_filtered.apply(
self.imdb_rating, args=(imdb_C, imdb_m), axis=1
)
top_filtered = top_filtered.sort_values("rating", ascending=False)
return top_filtered.iloc[offset : offset + limit]
if __name__ == "__main__":
obj = Recommendation()
obj.filter_genres()
try:
genre = sys.argv[1]
except:
genre = None
movies = obj.top_movies(obj.md, percentile=0.85, limit=10, offset=0, genre=genre)
print(movies)
| {"/collaborative_filtering.py": ["/file_paths.py", "/content_based.py"], "/fetch_posters.py": ["/file_paths.py"], "/recommendation.py": ["/file_paths.py"], "/get_posters.py": ["/file_paths.py"], "/server.py": ["/get_posters.py", "/content_based.py", "/file_paths.py", "/recommendation.py"], "/content_based.py": ["/recommendation.py", "/file_paths.py"], "/hybrid.py": ["/content_based.py", "/collaborative_filtering.py"]} |
70,621 | pncnmnp/Movie-Recommendation | refs/heads/master | /get_posters.py | import pandas as pd
from file_paths import *
POSTER_BASE_URL = "https://image.tmdb.org/t/p/w500"
POSTER_BASE_URL_SMALL = "https://image.tmdb.org/t/p/w185"
def get_poster_paths(movie_ids, movie_titles, small=False):
df = pd.read_csv(PATH_POSTERS)
paths = dict()
index = 0
for m_id in movie_ids:
try:
if small:
paths[movie_titles[index]] = POSTER_BASE_URL_SMALL + df["poster_path"][df.index[df["id"] == str(m_id)][0]]
else:
paths[movie_titles[index]] = POSTER_BASE_URL + df["poster_path"][df.index[df["id"] == str(m_id)][0]]
index += 1
except:
if small:
paths[movie_titles[index]] = "https://upload.wikimedia.org/wikipedia/commons/6/64/Poster_not_available.jpg"
else:
paths[movie_titles[index]] = "https://upload.wikimedia.org/wikipedia/commons/6/64/Poster_not_available.jpg"
index += 1
return paths | {"/collaborative_filtering.py": ["/file_paths.py", "/content_based.py"], "/fetch_posters.py": ["/file_paths.py"], "/recommendation.py": ["/file_paths.py"], "/get_posters.py": ["/file_paths.py"], "/server.py": ["/get_posters.py", "/content_based.py", "/file_paths.py", "/recommendation.py"], "/content_based.py": ["/recommendation.py", "/file_paths.py"], "/hybrid.py": ["/content_based.py", "/collaborative_filtering.py"]} |
70,622 | pncnmnp/Movie-Recommendation | refs/heads/master | /file_paths.py | # file paths
PATH_MOVIES = "./movies_meta/tmdb_5000_movies.csv"
PATH_RATINGS = "./movies_meta/ratings_small.csv"
PATH_RATINGS_FULL = "./movies_meta/ratings.csv"
PATH_INDIAN_MOVIES = "./movies_meta/indian_movies.csv"
PATH_CREDITS = "./movies_meta/tmdb_5000_credits.csv"
PATH_MOVIELENS_TO_TMDB = "./movies_meta/movielens_to_tmdb.csv"
PATH_POSTERS = "./movies_meta/posters.csv"
PATH_PICKLE_KEYWORDS = "./movies_meta/keywords.pkl"
PATH_PICKLE_DESC = "./movies_meta/desc.pkl"
PATH_MOVIE_QUOTES = "./movies_meta/movie_quotes.json"
PATH_COLL_FILTERING_CACHE = "./movies_meta/trainset.pkl"
# errors
CATEGORY_ERROR = "Category not found! Here are the genres: "
| {"/collaborative_filtering.py": ["/file_paths.py", "/content_based.py"], "/fetch_posters.py": ["/file_paths.py"], "/recommendation.py": ["/file_paths.py"], "/get_posters.py": ["/file_paths.py"], "/server.py": ["/get_posters.py", "/content_based.py", "/file_paths.py", "/recommendation.py"], "/content_based.py": ["/recommendation.py", "/file_paths.py"], "/hybrid.py": ["/content_based.py", "/collaborative_filtering.py"]} |
70,623 | pncnmnp/Movie-Recommendation | refs/heads/master | /server.py | from flask import Flask, render_template, request, redirect, abort
import pandas as pd
from get_posters import get_poster_paths
from content_based import ContentBased
from file_paths import *
from recommendation import Recommendation
from ast import literal_eval
from json import load
from random import randint
app = Flask(__name__, template_folder="./flask/templates/", static_folder="./flask/static/")
DEFAULT_LIMIT = 21
IMDB_ID_LEN = 7
def get_meta(title, m_id):
rec = Recommendation()
rec.filter_genres()
rec.filter_productions()
df_movies = rec.md
df_credits = pd.read_csv(PATH_CREDITS)
df_imdb_link = pd.read_csv(PATH_MOVIELENS_TO_TMDB)
attributes = ["id", "original_title", "genres", "homepage",
"overview", "release_date", "production_companies",
"runtime", "tagline", "vote_average", "vote_count"]
df_title = df_movies.iloc[df_movies.index[
df_movies["original_title"] == title][0]][attributes]
df_crew = df_credits.iloc[df_credits.index[df_credits["title"] == title][0]][["cast", "crew"]]
cast = [cast["name"] for cast in literal_eval(df_crew["cast"])[0:5]]
crew = [crew["name"] for crew in literal_eval(df_crew["crew"]) if crew["job"] in ["Director"]]
try:
imdb_link = str(df_imdb_link.iloc[df_imdb_link.index[df_imdb_link["tmdbId"] == int(m_id)][0]]["imdbId"])[:-2]
imdb_link = ("https://www.imdb.com/title/tt" + "0"*(IMDB_ID_LEN - len(imdb_link)) + imdb_link)
except:
imdb_link = "https://www.imdb.com/search/title/?title=" + title
return df_title, cast, crew, imdb_link
@app.route('/', methods=["GET"])
def home():
if "recommend" in request.args:
try:
title = request.args["recommend"]
rec = ContentBased()
did_you_mean = False
df = rec.recommend(title, DEFAULT_LIMIT, full_search=True, keywords_and_desc=False, critics=False)
poster_paths = get_poster_paths(df["id"].tolist(), df["original_title"].tolist())
if rec.changed_title != title and rec.changed_title != str():
did_you_mean = True
else:
rec.changed_title = title
rec_title_meta = get_meta(rec.changed_title, None)
rec_id = rec_title_meta[0]["id"]
return render_template('recommendations.html',
titles=df["original_title"].tolist(),
images=poster_paths,
votes=df["vote_average"].tolist(),
m_id=df["id"].tolist(),
rec_title=rec.changed_title,
rec_id=rec_id,
did_you_mean=did_you_mean)
except:
abort(404)
elif "genres" in request.args:
genre = request.args["genres"]
if genre == "All":
genre = None
offset = int(request.args["offset"])
gen_rec = Recommendation()
gen_rec.filter_genres()
df = gen_rec.top_movies(gen_rec.md, percentile=0.85, limit=DEFAULT_LIMIT, offset=offset, genre=genre)
poster_paths = get_poster_paths(df["id"].tolist(), df["original_title"].tolist())
return render_template('recommendations.html',
titles=df["original_title"].tolist(),
images=poster_paths,
votes=df["vote_average"].tolist(),
m_id=df["id"].tolist(),
rec_title=request.args["genres"],
offset=offset,
next_offset=offset+DEFAULT_LIMIT,
prev_offset=offset-DEFAULT_LIMIT,
rec_id=None,
did_you_mean=None)
else:
return render_template('homepage.html')
@app.route('/movie', methods=["GET"])
def movie_meta():
if "title" in request.args:
try:
title = request.args["title"]
m_id = request.args["id"]
df_meta = get_meta(title, m_id)
poster_path = get_poster_paths([int(m_id)], [title])[title]
rec = ContentBased()
df_rec = rec.recommend(title, 5, full_search=True, keywords_and_desc=False, critics=False)
rec_poster_paths = get_poster_paths(df_rec["id"].tolist(), df_rec["original_title"].tolist(), small=True)
return render_template('meta.html',
title=df_meta[0]["original_title"],
genres=df_meta[0]["genres"],
homepage=df_meta[0]["homepage"],
overview=df_meta[0]["overview"],
release=df_meta[0]["release_date"],
production=df_meta[0]["production_companies"],
runtime=df_meta[0]["runtime"],
tagline=df_meta[0]["tagline"],
vote_average=df_meta[0]["vote_average"],
vote_count=df_meta[0]["vote_count"],
cast=df_meta[1],
director=df_meta[2],
poster_path=poster_path,
rec_posters=rec_poster_paths,
rec_titles=df_rec["original_title"].tolist(),
rec_m_ids=df_rec["id"].tolist(),
imdb_id=df_meta[3])
except:
abort(404)
else:
abort(404)
@app.errorhandler(404)
def page_not_found(error):
exception = 'Error 404: The page does not exist!'
quotes = load(open(PATH_MOVIE_QUOTES))
quote = quotes[randint(0, len(quotes))]
return render_template('error.html', exception=exception, quote=quote["quote"], quote_movie=quote["movie"]), 404
@app.after_request
def apply_caching(response):
response.headers['Strict-Transport-Security'] = 'max-age=31536000; includeSubDomains'
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers['X-Frame-Options'] = 'SAMEORIGIN'
response.headers['X-XSS-Protection'] = '1; mode=block'
return response
| {"/collaborative_filtering.py": ["/file_paths.py", "/content_based.py"], "/fetch_posters.py": ["/file_paths.py"], "/recommendation.py": ["/file_paths.py"], "/get_posters.py": ["/file_paths.py"], "/server.py": ["/get_posters.py", "/content_based.py", "/file_paths.py", "/recommendation.py"], "/content_based.py": ["/recommendation.py", "/file_paths.py"], "/hybrid.py": ["/content_based.py", "/collaborative_filtering.py"]} |
70,624 | pncnmnp/Movie-Recommendation | refs/heads/master | /content_based.py | from recommendation import Recommendation
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel, cosine_similarity
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
import pandas as pd
from file_paths import *
from numpy import argsort
from ast import literal_eval
from os.path import isfile
from difflib import get_close_matches
import sys
SCAN_SIZE = 30
ACTOR_LIMIT = 5
CREW = ["Director"]
CREW_WT = 2
CAST_WT = 1
class ContentBased:
def __init__(self):
self.md_credits = pd.read_csv(PATH_CREDITS)
self.changed_title = str() # used in case Levenstein distance matches a value close to another string
def make_desc(self, df):
"""
param: df - movies pandas DataFrame
return: pandas DataFrame with overview and tagline combined
"""
df["tagline"] = df["tagline"].fillna("")
df["overview"] = df["overview"] + df["tagline"]
df["overview"] = df["overview"].fillna("")
return df
def make_keywords(self, df):
"""
param: df - movies pandas DataFrame
return: pandas DataFrame with attribute 'all_keys',
which combines crew andcast members, movie-keywords, genres.
"""
stemmer = SnowballStemmer("english")
df["keywords"] = (
df["keywords"]
.apply(literal_eval)
.apply(
lambda keywords: [stemmer.stem(k["name"]) for k in keywords]
if isinstance(keywords, list)
else list()
)
)
df = df.merge(self.md_credits, on="id")
df["cast"] = (
df["cast"]
.apply(literal_eval)
.apply(
lambda actors: [
# To count actor name as one word like 'tomcruise'
actor["name"].lower().replace(" ", "")
for actor in actors[:ACTOR_LIMIT]
]
if isinstance(actors, list)
else list()
)
)
df["crew"] = (
df["crew"]
.apply(literal_eval)
.apply(
lambda crews: [
# To count director name as one word like "stanleykubrick"
crew["name"].lower().replace(" ", "")
for crew in crews
if crew["job"] in CREW
]
if isinstance(crews, list)
else list()
)
)
df["all_keys"] = (
df["keywords"] + df["cast"] * CAST_WT + df["crew"] * CREW_WT + df["genres"]
)
df["all_keys"] = df["all_keys"].apply(
lambda keywords: " ".join(keywords) if isinstance(keywords, list) else str()
)
return df
def tfidf(self, df):
"""
param: df - movies pandas DataFrame
return: cosine similarity matrix based on overview and description
"""
tfidf = TfidfVectorizer(
analyzer="word", stop_words=stopwords.words("english"), ngram_range=(1, 2)
)
tfidf_mat = tfidf.fit_transform(df["overview"])
cosine_sim = linear_kernel(tfidf_mat, tfidf_mat)
return cosine_sim
def countvectorize(self, df):
"""
param: df - movies pandas DataFrame
return: cosine similarity matrix based on crew, cast, keywords and genre
"""
count = CountVectorizer(
analyzer="word", ngram_range=(1, 2), stop_words=stopwords.words("english")
)
count_matrix = count.fit_transform(df["all_keys"])
cosine_sim = cosine_similarity(count_matrix, count_matrix)
return cosine_sim
def verify_title(self, df, title):
"""
param: df - movies pandas DataFrame
title - movie title (as in TMDB dataset)
return: if title found - returns index value of the title from df
else - raises ValueError
"""
try:
return df.index[df["title"] == title][0]
except:
try:
title = (get_close_matches(title, [movie for movie in df["title"].tolist()])[0])
self.changed_title = title
return df.index[df["title"] == title][0]
except:
raise ValueError("No film : " + title + " found!")
def recommend(
self, title, limit, critics=False, full_search=False, use_pickle=True, keywords_and_desc=False
):
"""
param: title - movie title (as in TMDB dataset)
limit - no. of movies to display
critics - True - will display critically acclaimed movies
False - will not sort movies on basis of their imdb rankings
(DEFAULT - False)
full_search - True - will search using cast, crew, keywords
and genre as metadata
False - will search using overview and tagline
as metadata
(DEFAULT - False)
use_pickle - True - will use pickled results
False - will compute the results from scratch
(DEFAULT - True)
keywords_and_desc - True - will merge results of keywords
and description
False - will not merge results of keywords
and description
return: pandas DataFrame object with attributes -
original_title, id, vote_average, vote_count, popularity, release_date
"""
rec = Recommendation()
rec.filter_genres()
title_index = self.verify_title(rec.md, title)
if keywords_and_desc:
if isfile(PATH_PICKLE_KEYWORDS) and isfile(PATH_PICKLE_DESC) and use_pickle:
df_keywords = pd.read_pickle(PATH_PICKLE_KEYWORDS)
df_desc = pd.read_pickle(PATH_PICKLE_DESC)
rec_matrix_keywords = self.countvectorize(df_keywords)
rec_matrix_desc = self.tfidf(df_desc)
rec_matrix = rec_matrix_keywords + rec_matrix_desc
df = df_keywords
elif full_search:
if isfile(PATH_PICKLE_KEYWORDS) and use_pickle:
df = pd.read_pickle(PATH_PICKLE_KEYWORDS)
else:
df = self.make_keywords(rec.md)
df.to_pickle(PATH_PICKLE_KEYWORDS)
rec_matrix = self.countvectorize(df)
else:
if isfile(PATH_PICKLE_DESC) and use_pickle:
df = pd.read_pickle(PATH_PICKLE_DESC)
else:
df = self.make_desc(rec.md)
df.to_pickle(PATH_PICKLE_DESC)
rec_matrix = self.tfidf(df)
rec_movie = rec_matrix[title_index]
ids = rec_movie.argsort()[::-1][1 : SCAN_SIZE + 1]
if critics:
return rec.top_movies(df.iloc[ids], percentile=0.50, limit=limit, offset=0)
else:
return df.iloc[ids[:limit]][
[
"original_title",
"id",
"vote_average",
"vote_count",
"popularity",
"release_date",
]
]
if __name__ == "__main__":
rec = ContentBased()
print(
rec.recommend(sys.argv[1], 14, critics=True, full_search=False, use_pickle=False, keywords_and_desc=False)
)
| {"/collaborative_filtering.py": ["/file_paths.py", "/content_based.py"], "/fetch_posters.py": ["/file_paths.py"], "/recommendation.py": ["/file_paths.py"], "/get_posters.py": ["/file_paths.py"], "/server.py": ["/get_posters.py", "/content_based.py", "/file_paths.py", "/recommendation.py"], "/content_based.py": ["/recommendation.py", "/file_paths.py"], "/hybrid.py": ["/content_based.py", "/collaborative_filtering.py"]} |
70,625 | pncnmnp/Movie-Recommendation | refs/heads/master | /hybrid.py | from content_based import ContentBased
from collaborative_filtering import CollaborativeFiltering
from collections import Counter
import pandas as pd
import sys
from numpy import array
from ast import literal_eval
class Hybrid:
def __init__(self):
self.LIMIT = 20
def convert_literal_eval(self, json_str):
"""
param: json_str - pandas DataFrame converted to JSON format
return: Literal Eval of the JSON string in List format
"""
return literal_eval("[" + json_str.replace("\n", ",") + "]")
def get_movie_json(self, title, rec_coll, rec_content):
"""
param: title - movie title to search (as mentioned in TMDB dataset)
rec_coll - movies recommended by Collaborative Filtering
rec_content - movies recommended by ContentBased
NOTE: Parse both the results (rec_coll and rec_content)
in convert_literal_eval before passing the parameters.
See get_recommendation() for example.
return: movie data in JSON format
"""
for movie in rec_coll:
if movie["title"] == title:
return movie
for movie in rec_content:
if movie["original_title"] == title:
return movie
def get_recommendation(
self, movie, review, critics=False, full_search=False, use_pickle=True
):
"""
For hybrid recommendations: LIMIT (instance var) determines no. of movies outputted
param: movie - title of the movie (as mentioned in DB)
review - rating of the movie on the scale of 1-5
critics - (True or False type) Critically acclaimed recommendations
full_search - True: Recommendations generated using keywords, cast, crew and genre
False: Recommendations generated on basis of tagline and overview
return: pandas DataFrame object with attributes -
title, id, vote_average, vote_count, popularity, release_date
Recommendations which have frequency greater than 1 in both
collaborative and content based filtering results are chosen
as result. If the total result found are less than limit than
the difference is divided into a ratio of 2:1, for content based
and collaborative results. i.e Out of the remaining results,
2x of them will be content based and 1x collaborative based.
"""
rec_content_obj, rec_coll_obj = ContentBased(), CollaborativeFiltering()
rec_content = rec_content_obj.recommend(
movie, self.LIMIT, critics, full_search, use_pickle
)
rec_content = self.convert_literal_eval(
rec_content.to_json(orient="records", lines=True)
)
print("Content Filtering completed.....")
rec_coll_obj.LIMIT = 1000
rec_coll = rec_coll_obj.user_model({movie: review})
rec_coll = self.convert_literal_eval(
rec_coll.to_json(orient="records", lines=True)
)
print("Collaborative Filtering completed.....")
movies_freq = Counter(
list([movie["title"] for movie in rec_coll])
+ list([movie["original_title"] for movie in rec_content])
).most_common(self.LIMIT)
# accepting movies whose frequency is greater than 1 from collaborative and content based results
total_movies_rec = [movie[0] for movie in movies_freq if movie[1] > 1]
# print(total_movies_rec)
movie_df = pd.DataFrame(
columns=[
"title",
"id",
"vote_average",
"vote_count",
"popularity",
"release_date",
]
)
index = 0
for movie in total_movies_rec:
movie_json = self.get_movie_json(movie, rec_coll, rec_content)
movie_df.loc[index] = array(list(movie_json.values()))
index += 1
if len(total_movies_rec) < self.LIMIT:
rec_content_cutoff = ((self.LIMIT - len(total_movies_rec)) * 2) // 3
start_index = index
rec_title_name = {0: "original_title", 1: "title"}
curr_rec = 0 # as we start with content based results
for rec in [rec_content, rec_coll]:
for movie in rec:
if (
movie[rec_title_name[curr_rec]]
not in movie_df["title"].tolist()
):
movie_df.loc[index] = array(list(movie.values())[:6])
index += 1
if start_index + rec_content_cutoff == index and rec == rec_content:
curr_rec = 1
break
elif index == self.LIMIT:
break
if index == self.LIMIT:
break
print("Hybrid Filtering completed.....")
return movie_df
if __name__ == "__main__":
obj = Hybrid()
print(obj.get_recommendation(sys.argv[1], 5, True, True, True))
| {"/collaborative_filtering.py": ["/file_paths.py", "/content_based.py"], "/fetch_posters.py": ["/file_paths.py"], "/recommendation.py": ["/file_paths.py"], "/get_posters.py": ["/file_paths.py"], "/server.py": ["/get_posters.py", "/content_based.py", "/file_paths.py", "/recommendation.py"], "/content_based.py": ["/recommendation.py", "/file_paths.py"], "/hybrid.py": ["/content_based.py", "/collaborative_filtering.py"]} |
70,626 | yowmamasita/lastrun | refs/heads/master | /lastrun/__init__.py | from __future__ import print_function
import datetime
import pickle
def when():
try:
f = open('.lastrun', 'r')
last = pickle.load(f)
f.close()
return last
except:
return None
def record():
with open('.lastrun', 'w') as f:
now = datetime.datetime.now()
pickle.dump(now, f)
return now
def recorder(first_run_log="It hasn't been run yet",
last_run_log="%s since last run",
printer=print):
try:
now = datetime.datetime.now()
last = when()
diff = now - last
printer(last_run_log % diff)
return diff
except Exception as e:
printer(first_run_log)
finally:
record()
| {"/test.py": ["/lastrun/__init__.py"]} |
70,627 | yowmamasita/lastrun | refs/heads/master | /test.py | import unittest
from lastrun import when, record, recorder
import datetime
import os
class TestLastrun(unittest.TestCase):
def test_first_run(self):
self.assertEqual(when(), None)
self.assertEqual(recorder(), None)
def test_next_run(self):
self.assertIsInstance(when(), datetime.datetime)
self.assertIsInstance(recorder(), datetime.timedelta)
@classmethod
def tearDownClass(cls):
os.remove('.lastrun')
if __name__ == '__main__':
unittest.main()
| {"/test.py": ["/lastrun/__init__.py"]} |
70,628 | yowmamasita/lastrun | refs/heads/master | /setup.py | from setuptools import setup
setup(name='lastrun',
version='0.1',
description='Record the date and time a program was last run',
url='http://github.com/yowmamasita/lastrun',
author='Ben Sarmiento',
author_email='me@bensarmiento.com',
license='MIT',
packages=['lastrun'],
zip_safe=True)
| {"/test.py": ["/lastrun/__init__.py"]} |
70,637 | WanjohiWanjohi/djangoprac | refs/heads/master | /vehicles/admin.py | from django.contrib import admin
from .models import Vehicle
# Register your models here.
class VehicleAdmin(admin.ModelAdmin):
list_display = ['vehicle_registration_number' , 'vehicle_image' , 'vehicle_brand' , 'vehicle_manufactured']
admin.site.register(Vehicle , VehicleAdmin) | {"/vehicles/admin.py": ["/vehicles/models.py"]} |
70,638 | WanjohiWanjohi/djangoprac | refs/heads/master | /vehicles/views.py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
data = ()
return render(request, 'index.html', data)
def login(request):
return HttpResponse("<pre> Learning django views with HTTP Response</pre>") | {"/vehicles/admin.py": ["/vehicles/models.py"]} |
70,639 | WanjohiWanjohi/djangoprac | refs/heads/master | /vehicles/models.py | from django.db import models
# Create your models here.
BRANDS = [
('HN' , 'Honda'),
('TY' , 'Toyota'),
('MD' , 'Merecedes'),
('BMW' , 'BMW'),
]
class Vehicle(models.Model):
vehicle_registration_number = models.CharField(max_length=16 , blank=False)
vehicle_color = models.CharField(max_length=128)
vehicle_image = models.ImageField(upload_to='media/')
vehicle_brand = models.CharField(max_length=32 , choices=BRANDS)
vehicle_manufactured = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.vehicle_registration_number | {"/vehicles/admin.py": ["/vehicles/models.py"]} |
70,642 | tolgagokmen/newsp | refs/heads/master | /core/app/news/handlers.py | import datetime
import uuid
from tornado import gen
from ..common import handlers
from ..common.request import application_json, get_json
from ..common.validation import schema
from ..common.utils import paginated_response, slugify
from ..search.config import ESIndex, ESNewsDocType
from .entities import PostEntity
class PostsHandler(handlers.JSONHandler):
@application_json
@schema('/news/create.json')
@gen.coroutine
def post(self, *args, **kwargs):
"""
Creates a new 'Post'.
"""
body = get_json(self.request)
body.update({
'uuid': str(uuid.uuid4()),
'created_at': datetime.datetime.utcnow(),
'updated_at': datetime.datetime.utcnow(),
'slug': slugify(body['subject'])
})
result = yield self.application.db.news.insert_one(body, callback=None)
if result:
self.application.es.index(
index=ESIndex.news.value, doc_type=ESNewsDocType.post.value,
body=PostEntity(**body).to_es_payload(), id=body['uuid']
)
self.write_json({'_id': str(result.inserted_id)})
def get(self, id=None, *args, **kwargs):
if not id:
return self._get_all_posts()
return self._get_specific_post(id)
@gen.coroutine
def _get_all_posts(self):
"""
Retrieves all 'Posts'.
:return:
:rtype: dict
"""
offset = self.get_query_argument('offset', 0)
limit = self.get_query_argument('limit', 10)
result = []
cursor = self.application.db.news.find({}).skip(int(offset)).limit(int(limit))
count = yield self.application.db.news.find({}).count()
while (yield cursor.fetch_next):
post = cursor.next_object()
result.append(PostEntity.from_persisted(post).to_dict())
self.write_json(paginated_response(result, count, offset, limit))
self.finish()
@gen.coroutine
def _get_specific_post(self, id):
"""
Retrieves a specific 'Post'.
:param id: 'uuid' of the 'Post' to be retrieved
:return:
:rtype: dict
"""
post = yield self.application.db.news.find_one({'uuid': id})
self.write_json(PostEntity.from_persisted(post).to_dict())
| {"/core/app/news/handlers.py": ["/core/app/common/request.py", "/core/app/common/validation.py", "/core/app/common/utils.py", "/core/app/search/config.py", "/core/app/news/entities.py"], "/core/app/common/validation.py": ["/core/app/common/request.py"], "/core/app/search/handlers.py": ["/core/app/common/handlers.py", "/core/app/common/utils.py", "/core/app/common/validation.py", "/core/app/search/config.py", "/core/app/search/service.py"], "/core/app/__init__.py": ["/core/app/common/database.py", "/core/app/search/service.py"]} |
70,643 | tolgagokmen/newsp | refs/heads/master | /core/run.py | import json
import tornado.ioloop
import tornado.web
from app import Application
class JSONHandler(tornado.web.RequestHandler):
def jsonify(self, data):
self.add_header('Content-Type', 'application/json')
self.write(json.dumps(data, sort_keys=True, indent=4))
if __name__ == "__main__":
Application().listen(5000, '0.0.0.0')
tornado.ioloop.IOLoop.current().start()
| {"/core/app/news/handlers.py": ["/core/app/common/request.py", "/core/app/common/validation.py", "/core/app/common/utils.py", "/core/app/search/config.py", "/core/app/news/entities.py"], "/core/app/common/validation.py": ["/core/app/common/request.py"], "/core/app/search/handlers.py": ["/core/app/common/handlers.py", "/core/app/common/utils.py", "/core/app/common/validation.py", "/core/app/search/config.py", "/core/app/search/service.py"], "/core/app/__init__.py": ["/core/app/common/database.py", "/core/app/search/service.py"]} |
70,644 | tolgagokmen/newsp | refs/heads/master | /core/app/common/validation.py | import os
import json
from functools import wraps
import jsonschema
from .request import get_request_payload
SCHEMAS_DIR = 'app/specifications/schemas'
SCHEMAS_PATH = os.path.join(os.getcwd(), SCHEMAS_DIR)
def validate_schema(payload, schema):
"""
Validates the payload against the defined json schema.
:param payload: incoming request data
:param schema: json schema, the payload should be validated against
:return: errors if there are any, otherwise None
"""
errors = []
validator = jsonschema.Draft4Validator(schema, format_checker=jsonschema.FormatChecker())
for error in sorted(validator.iter_errors(payload), key=str):
errors.append(error.message)
return errors
def get_schema(path):
"""
Read the .json schema and returns its content
:param path: path to schema
:return: schema content
:rtype: dict
"""
with open(path, 'r') as f:
return json.load(f)
def schema(path=None):
"""
Validate request data against a json schema.
This is a decorator that will be used to specify
the path to the schema that the endpoint should be
validated against.
:param path: path to schema file
"""
def decorator(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
_path = path.lstrip('/')
payload = get_request_payload(self.request)
schema_path = os.path.join(SCHEMAS_PATH, _path)
errors = validate_schema(payload, get_schema(schema_path))
if errors:
raise Exception(str(errors))
return func(self, *args, **kwargs)
return wrapped
return decorator
| {"/core/app/news/handlers.py": ["/core/app/common/request.py", "/core/app/common/validation.py", "/core/app/common/utils.py", "/core/app/search/config.py", "/core/app/news/entities.py"], "/core/app/common/validation.py": ["/core/app/common/request.py"], "/core/app/search/handlers.py": ["/core/app/common/handlers.py", "/core/app/common/utils.py", "/core/app/common/validation.py", "/core/app/search/config.py", "/core/app/search/service.py"], "/core/app/__init__.py": ["/core/app/common/database.py", "/core/app/search/service.py"]} |
70,645 | tolgagokmen/newsp | refs/heads/master | /core/app/search/handlers.py | from ..common.handlers import JSONHandler
from ..common.utils import paginated_response
from ..common.validation import schema
from .config import ESIndex, ESNewsDocType
from .service import perform_search
class NewsSearchHandler(JSONHandler):
def _news_query(self, query_str):
return {
'query': {
'query_string': {
'fields': ['subject', 'content'],
'query': '**{}**'.format(query_str)
}
}
}
@schema('/search/news.json')
def get(self, *args, **kwargs):
query = self.get_query_argument('query')
limit = self.get_query_argument('limit', None)
offset = self.get_query_argument('offset', 0)
hits, total = perform_search(
client=self.application.es, query=self._news_query(query),
indexes=ESIndex.news.value, types=ESNewsDocType.post.value,
offset=offset, limit=limit
)
result = [hit['_source'] for hit in hits]
self.write_json(paginated_response(result, total, offset, limit))
| {"/core/app/news/handlers.py": ["/core/app/common/request.py", "/core/app/common/validation.py", "/core/app/common/utils.py", "/core/app/search/config.py", "/core/app/news/entities.py"], "/core/app/common/validation.py": ["/core/app/common/request.py"], "/core/app/search/handlers.py": ["/core/app/common/handlers.py", "/core/app/common/utils.py", "/core/app/common/validation.py", "/core/app/search/config.py", "/core/app/search/service.py"], "/core/app/__init__.py": ["/core/app/common/database.py", "/core/app/search/service.py"]} |
70,646 | tolgagokmen/newsp | refs/heads/master | /core/app/common/utils.py | from unidecode import unidecode
def slugify(string):
"""
Slugifies a string.
:param string: string to be slugified
:return: slugified string
:rtype: str
"""
ascii_repr = unidecode(string)
str_ = ' '.join(ascii_repr.split())
return str_.lower().replace(' ', '-')
def paginated_response(items, total, offset, limit):
"""
Generates paginated response.
:param items: items to be paginated
:param total: total count of items to be paginated
:param offset: pagination offset
:param limit: pagination limit
:return:
"""
return {
'items': items,
'pagination': {
'limit': limit,
'offset': offset,
'total': total
}
}
| {"/core/app/news/handlers.py": ["/core/app/common/request.py", "/core/app/common/validation.py", "/core/app/common/utils.py", "/core/app/search/config.py", "/core/app/news/entities.py"], "/core/app/common/validation.py": ["/core/app/common/request.py"], "/core/app/search/handlers.py": ["/core/app/common/handlers.py", "/core/app/common/utils.py", "/core/app/common/validation.py", "/core/app/search/config.py", "/core/app/search/service.py"], "/core/app/__init__.py": ["/core/app/common/database.py", "/core/app/search/service.py"]} |
70,647 | tolgagokmen/newsp | refs/heads/master | /core/app/common/database.py | import motor.motor_tornado
def init_motor_client():
"""
Initializes an asynchronous Motor client.
It is recommended to use this function only once,
thus reusing the same client for every request.
:return: Motor client
"""
conn = motor.motor_tornado.MotorClient('mongo', 27017)
return conn.newsp
| {"/core/app/news/handlers.py": ["/core/app/common/request.py", "/core/app/common/validation.py", "/core/app/common/utils.py", "/core/app/search/config.py", "/core/app/news/entities.py"], "/core/app/common/validation.py": ["/core/app/common/request.py"], "/core/app/search/handlers.py": ["/core/app/common/handlers.py", "/core/app/common/utils.py", "/core/app/common/validation.py", "/core/app/search/config.py", "/core/app/search/service.py"], "/core/app/__init__.py": ["/core/app/common/database.py", "/core/app/search/service.py"]} |
70,648 | tolgagokmen/newsp | refs/heads/master | /core/app/__init__.py | import tornado.web
from .common.database import init_motor_client
from .news import handlers as news_handlers
from .search import handlers as search_handlers
from .search.service import init_es_client
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/news', news_handlers.PostsHandler),
(r'/news/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})', news_handlers.PostsHandler),
(r'/search', search_handlers.NewsSearchHandler)
]
self.db = init_motor_client()
self.es = init_es_client()
settings = {
'debug': True
}
tornado.web.Application.__init__(self, handlers, **settings)
| {"/core/app/news/handlers.py": ["/core/app/common/request.py", "/core/app/common/validation.py", "/core/app/common/utils.py", "/core/app/search/config.py", "/core/app/news/entities.py"], "/core/app/common/validation.py": ["/core/app/common/request.py"], "/core/app/search/handlers.py": ["/core/app/common/handlers.py", "/core/app/common/utils.py", "/core/app/common/validation.py", "/core/app/search/config.py", "/core/app/search/service.py"], "/core/app/__init__.py": ["/core/app/common/database.py", "/core/app/search/service.py"]} |
70,649 | tolgagokmen/newsp | refs/heads/master | /core/app/news/entities.py | class PostEntity():
def __init__(self, **kwargs):
allowed_keys = [
'_id', 'content', 'created_at', 'image',
'slug', 'subject', 'updated_at', 'uuid'
]
for key in allowed_keys:
self.__dict__[key] = kwargs[key]
@classmethod
def from_persisted(cls, persisted):
return cls(
_id=str(persisted['_id']),
content=persisted['content'],
created_at=str(persisted['created_at']),
image=persisted['image'],
slug=persisted['slug'],
subject=persisted['subject'],
updated_at=str(persisted['updated_at']),
uuid=str(persisted['uuid'])
)
def to_dict(self):
return self.__dict__
def to_es_payload(self):
dict_ = self.__dict__
dict_.pop('_id')
return dict_
| {"/core/app/news/handlers.py": ["/core/app/common/request.py", "/core/app/common/validation.py", "/core/app/common/utils.py", "/core/app/search/config.py", "/core/app/news/entities.py"], "/core/app/common/validation.py": ["/core/app/common/request.py"], "/core/app/search/handlers.py": ["/core/app/common/handlers.py", "/core/app/common/utils.py", "/core/app/common/validation.py", "/core/app/search/config.py", "/core/app/search/service.py"], "/core/app/__init__.py": ["/core/app/common/database.py", "/core/app/search/service.py"]} |
70,650 | tolgagokmen/newsp | refs/heads/master | /core/app/search/service.py | import os
import json
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError, RequestError
from . import config
MAPPINGS_DIR = os.getcwd() + '/app/search/mappings'
ASCII_FOLDING_TOKEN_FILTER = {
"analysis": {
"analyzer": {
"default": {
"tokenizer": "standard",
"filter": ["lowercase", "asciifolding"]
}
}
}
}
def get_index_mappings(index):
"""
Returns mappings for a specific index.
:param index: index name
:return: index mappings
:rtype: dict
"""
with open(MAPPINGS_DIR + index + '.json', 'r') as f:
return json.load(f)
def set_index_settings(mappings, settings=None):
return {
'settings': settings or ASCII_FOLDING_TOKEN_FILTER,
'mappings': mappings
}
def _setup_indexes_settings(client):
if not client.indices.exists(index=config.ESIndex.news.value):
index_settings = set_index_settings(mappings=get_index_mappings(config.ESIndex.news.value))
client.indices.create(index=config.ESIndex.news.value, body=index_settings)
def init_es_client():
"""
Initializes an Elasticsearch client.
It is recommended to use this function only once,
thus reusing the same client for every request.
:return: Elasticsearch client
"""
client = Elasticsearch(
['elasticsearch'], port=9200,
http_auth=('elastic', 'changeme')
)
_setup_indexes_settings(client)
return client
def perform_search(client, query, indexes=None, types=None, offset=None, limit=10):
"""
Returns hits and hits count for specific query.
:return:
"""
try:
res = client.search(
index=indexes,
doc_type=types,
from_=offset,
size=limit,
body=query
)
return res['hits']['hits'], res['hits']['total']
except (NotFoundError, RequestError) as e:
raise Exception(str(e))
| {"/core/app/news/handlers.py": ["/core/app/common/request.py", "/core/app/common/validation.py", "/core/app/common/utils.py", "/core/app/search/config.py", "/core/app/news/entities.py"], "/core/app/common/validation.py": ["/core/app/common/request.py"], "/core/app/search/handlers.py": ["/core/app/common/handlers.py", "/core/app/common/utils.py", "/core/app/common/validation.py", "/core/app/search/config.py", "/core/app/search/service.py"], "/core/app/__init__.py": ["/core/app/common/database.py", "/core/app/search/service.py"]} |
70,651 | tolgagokmen/newsp | refs/heads/master | /core/app/search/config.py | from enum import Enum
class ESIndex(Enum):
news = 'news'
class ESNewsDocType(Enum):
post = 'post'
| {"/core/app/news/handlers.py": ["/core/app/common/request.py", "/core/app/common/validation.py", "/core/app/common/utils.py", "/core/app/search/config.py", "/core/app/news/entities.py"], "/core/app/common/validation.py": ["/core/app/common/request.py"], "/core/app/search/handlers.py": ["/core/app/common/handlers.py", "/core/app/common/utils.py", "/core/app/common/validation.py", "/core/app/search/config.py", "/core/app/search/service.py"], "/core/app/__init__.py": ["/core/app/common/database.py", "/core/app/search/service.py"]} |
70,652 | tolgagokmen/newsp | refs/heads/master | /core/app/common/request.py | from tornado.escape import json_decode, to_unicode
def get_content_type(request):
"""
Returns the Content-Type of current request.
:param request: tornado request handler
:return: value of Content-Type header
:rtype: string
"""
return request.headers.get('Content-Type')
def application_json(func):
"""
Ensures that the subsequent request has the
'Content-Type' header set to 'application/json'
:param func: tornado request method handler
"""
def wrapper(self, *args, **kwargs):
if get_content_type(self.request) != 'application/json':
raise Exception('Content-Type is not application/json')
return func(self, *args, **kwargs)
return wrapper
def get_json(request):
"""
Returns the request body as a dict.
:param request: tornado request handler
:return: posted request body
:rtype: dict
"""
return json_decode(request.body)
def get_query_params(request):
"""
Returns query params for a specific request object.
:param request: request object
:return: query params
:rtype: dict
"""
params = request.arguments
result = {}
for param, val in params.items():
result[param] = serialize_number(to_unicode(val[0]))
return result
def get_request_params(request):
"""
Combines both URL and body parameters
"""
query_params = get_query_params(request)
body_params = get_json(request)
return {**body_params, **query_params}
def get_request_payload(request):
"""
Returns request payload: both query and body parameters,
depending on the request method.
:param method: request method (GET, PUT, POST, DELETE)
:param request: request object
"""
return {
'GET': get_query_params,
'POST': get_request_params
}[request.method](request)
def serialize_number(value):
"""
Tries to convert `string` to `int`, if it can't -
tries to convert to `float`, if it fails again -
the `string` itself is returned.
"""
try:
_val = int(value)
return _val
except ValueError:
pass
try:
_val = float(value)
return _val
except ValueError:
pass
return value
| {"/core/app/news/handlers.py": ["/core/app/common/request.py", "/core/app/common/validation.py", "/core/app/common/utils.py", "/core/app/search/config.py", "/core/app/news/entities.py"], "/core/app/common/validation.py": ["/core/app/common/request.py"], "/core/app/search/handlers.py": ["/core/app/common/handlers.py", "/core/app/common/utils.py", "/core/app/common/validation.py", "/core/app/search/config.py", "/core/app/search/service.py"], "/core/app/__init__.py": ["/core/app/common/database.py", "/core/app/search/service.py"]} |
70,653 | tolgagokmen/newsp | refs/heads/master | /core/app/common/handlers.py | import json
import tornado.web
class JSONHandler(tornado.web.RequestHandler):
"""
Base Request Handler.
Every other handler should inherit this handler.
Assures the process of returning JSON to the clients.
"""
def set_default_headers(self):
self.set_header('Content-Type', 'application/json')
def write_json(self, data):
self.write(json.dumps(data, sort_keys=True, indent=4))
| {"/core/app/news/handlers.py": ["/core/app/common/request.py", "/core/app/common/validation.py", "/core/app/common/utils.py", "/core/app/search/config.py", "/core/app/news/entities.py"], "/core/app/common/validation.py": ["/core/app/common/request.py"], "/core/app/search/handlers.py": ["/core/app/common/handlers.py", "/core/app/common/utils.py", "/core/app/common/validation.py", "/core/app/search/config.py", "/core/app/search/service.py"], "/core/app/__init__.py": ["/core/app/common/database.py", "/core/app/search/service.py"]} |
70,654 | vickyvishal/analytics4App | refs/heads/main | /app.py | from flask import Flask, request, jsonify
from bertKeras import getScores
app = Flask(__name__)
@app.route('/get_bert_result', methods=['GET', 'POST'])
def checkSimilarity():
if request.method == 'POST':
if request.form:
sentence1 = request.form.getlist("sentence1")
sentence2 = request.form.getlist("sentence2")
score = getScores(sentence1, sentence2)
return jsonify({'ok': True, 'data': score}), 200
if __name__ == "__main__":
app.run(debug=True, port=7898)
| {"/app.py": ["/bertKeras.py"]} |
70,655 | vickyvishal/analytics4App | refs/heads/main | /bertKeras.py | import numpy as np
import pandas as pd
import tensorflow as tf
import transformers
import os
import sys
max_length = 128 # Maximum length of input sentence to the model.
batch_size = 32
epochs = 2
def getScores(sentence1, sentence2):
# Labels in our dataset.
labels = ["Five", "Four", "Three", "Two", "One"]
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
df = pd.read_excel(os.path.join(
__location__, 'labeled_score/MALSTM_V3-1.xlsx'))
train_df, valid_df, test_df = np.split(df.sample(frac=1, random_state=42), [
int(.6*len(df)), int(.8*len(df))])
train_df["label"] = train_df["Score_name"].apply(
lambda x: 0 if x == "One" else 1 if x == "Two" else 2 if x == "Three" else 3 if x == "Four" else 4
)
y_train = tf.keras.utils.to_categorical(train_df.label, num_classes=5)
valid_df["label"] = valid_df["Score_name"].apply(
lambda x: 0 if x == "One" else 1 if x == "Two" else 2 if x == "Three" else 3 if x == "Four" else 4
)
y_val = tf.keras.utils.to_categorical(valid_df.label, num_classes=5)
test_df["label"] = test_df["Score_name"].apply(
lambda x: 0 if x == "One" else 1 if x == "Two" else 2 if x == "Three" else 3 if x == "Four" else 4
)
y_test = tf.keras.utils.to_categorical(test_df.label, num_classes=5)
print(y_test)
class BertSemanticDataGenerator(tf.keras.utils.Sequence):
"""Generates batches of data.
Args:
sentence_pairs: Array of premise and hypothesis input sentences.
labels: Array of labels.
batch_size: Integer batch size.
shuffle: boolean, whether to shuffle the data.
include_targets: boolean, whether to incude the labels.
Returns:
Tuples `([input_ids, attention_mask, `token_type_ids], labels)`
(or just `[input_ids, attention_mask, `token_type_ids]`
if `include_targets=False`)
"""
def __init__(
self,
sentence_pairs,
labels,
batch_size=batch_size,
shuffle=True,
include_targets=True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.shuffle = shuffle
self.batch_size = batch_size
self.include_targets = include_targets
# Load our BERT Tokenizer to encode the text.
# We will use base-base-uncased pretrained model.
self.tokenizer = transformers.BertTokenizer.from_pretrained(
"bert-base-uncased", do_lower_case=True
)
self.indexes = np.arange(len(self.sentence_pairs))
self.on_epoch_end()
def __len__(self):
# Denotes the number of batches per epoch.
return len(self.sentence_pairs) // self.batch_size
def __getitem__(self, idx):
# Retrieves the batch of index.
indexes = self.indexes[idx * self.batch_size : (idx + 1) * self.batch_size]
sentence_pairs = self.sentence_pairs[indexes]
# With BERT tokenizer's batch_encode_plus batch of both the sentences are
# encoded together and separated by [SEP] token.
encoded = self.tokenizer.batch_encode_plus(
sentence_pairs.tolist(),
add_special_tokens=True,
max_length=max_length,
return_attention_mask=True,
return_token_type_ids=True,
pad_to_max_length=True,
return_tensors="tf",
)
# Convert batch of encoded features to numpy array.
input_ids = np.array(encoded["input_ids"], dtype="int32")
attention_masks = np.array(encoded["attention_mask"], dtype="int32")
token_type_ids = np.array(encoded["token_type_ids"], dtype="int32")
# Set to true if data generator is used for training/validation.
if self.include_targets:
labels = np.array(self.labels[indexes], dtype="int32")
return [input_ids, attention_masks, token_type_ids], labels
else:
return [input_ids, attention_masks, token_type_ids]
def on_epoch_end(self):
# Shuffle indexes after each epoch if shuffle is set to True.
if self.shuffle:
np.random.RandomState(42).shuffle(self.indexes)
# Create the model under a distribution strategy scope.
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
# Encoded token ids from BERT tokenizer.
input_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="input_ids"
)
# Attention masks indicates to the model which tokens should be attended to.
attention_masks = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="attention_masks"
)
# Token type ids are binary masks identifying different sequences in the model.
token_type_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="token_type_ids"
)
# Loading pretrained BERT model.
bert_model = transformers.TFBertModel.from_pretrained("bert-base-uncased")
# Freeze the BERT model to reuse the pretrained features without modifying them.
bert_model.trainable = False
sequence_output, pooled_output = bert_model(
input_ids, attention_mask=attention_masks, token_type_ids=token_type_ids
)
# Add trainable layers on top of frozen layers to adapt the pretrained features on the new data.
bi_lstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(64, return_sequences=True)
)(sequence_output)
# Applying hybrid pooling approach to bi_lstm sequence output.
avg_pool = tf.keras.layers.GlobalAveragePooling1D()(bi_lstm)
max_pool = tf.keras.layers.GlobalMaxPooling1D()(bi_lstm)
concat = tf.keras.layers.concatenate([avg_pool, max_pool])
dropout = tf.keras.layers.Dropout(0.3)(concat)
output = tf.keras.layers.Dense(5, activation="softmax")(dropout)
model = tf.keras.models.Model(
inputs=[input_ids, attention_masks, token_type_ids], outputs=output
)
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="categorical_crossentropy",
metrics=["acc"],
)
model.summary()
train_data = BertSemanticDataGenerator(
train_df[["Resume", "JD"]].values.astype("str"),
y_train,
batch_size=batch_size,
shuffle=True,
)
valid_data = BertSemanticDataGenerator(
valid_df[["Resume", "JD"]].values.astype("str"),
y_val,
batch_size=batch_size,
shuffle=False,
)
history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
# Unfreeze the bert_model.
bert_model.trainable = True
# Recompile the model to make the change effective.
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-5),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.summary()
model.save(os.path.join(
__location__), 'trained')
sentence_pairs = np.array([[str(sentence1), str(sentence2)]])
test_data = BertSemanticDataGenerator(
sentence_pairs, labels=None, batch_size=1, shuffle=False, include_targets=False,
)
proba = model.predict(test_data)[0]
idx = np.argmax(proba)
proba = f"{proba[idx]: .2f}%"
pred = labels[idx]
return pred, proba
# sentence1 = "Involved with problem-solving, developing design specifications and providingsystem solutions"
# sentence2 = "Strong communication skills and ability to articulate complex solutions to customer(s)"
# print(check_similarity(sentence1, sentence2)) | {"/app.py": ["/bertKeras.py"]} |
70,656 | vickyvishal/analytics4App | refs/heads/main | /bert-trained.py | import numpy as np
import pandas as pd
import tensorflow as tf
import transformers
import os
import sys
max_length = 128 # Maximum length of input sentence to the model.
batch_size = 32
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
labels = ["Five", "Four", "Three", "Two", "One"]
class BertSemanticDataGenerator(tf.keras.utils.Sequence):
"""Generates batches of data.
Args:
sentence_pairs: Array of premise and hypothesis input sentences.
labels: Array of labels.
batch_size: Integer batch size.
shuffle: boolean, whether to shuffle the data.
include_targets: boolean, whether to incude the labels.
Returns:
Tuples `([input_ids, attention_mask, `token_type_ids], labels)`
(or just `[input_ids, attention_mask, `token_type_ids]`
if `include_targets=False`)
"""
def __init__(
self,
sentence_pairs,
labels,
batch_size=batch_size,
shuffle=True,
include_targets=True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.shuffle = shuffle
self.batch_size = batch_size
self.include_targets = include_targets
# Load our BERT Tokenizer to encode the text.
# We will use base-base-uncased pretrained model.
self.tokenizer = transformers.BertTokenizer.from_pretrained(
"bert-base-uncased", do_lower_case=True
)
self.indexes = np.arange(len(self.sentence_pairs))
self.on_epoch_end()
def __len__(self):
# Denotes the number of batches per epoch.
return len(self.sentence_pairs) // self.batch_size
def __getitem__(self, idx):
# Retrieves the batch of index.
indexes = self.indexes[idx * self.batch_size : (idx + 1) * self.batch_size]
sentence_pairs = self.sentence_pairs[indexes]
# With BERT tokenizer's batch_encode_plus batch of both the sentences are
# encoded together and separated by [SEP] token.
encoded = self.tokenizer.batch_encode_plus(
sentence_pairs.tolist(),
add_special_tokens=True,
max_length=max_length,
return_attention_mask=True,
return_token_type_ids=True,
pad_to_max_length=True,
return_tensors="tf",
)
# Convert batch of encoded features to numpy array.
input_ids = np.array(encoded["input_ids"], dtype="int32")
attention_masks = np.array(encoded["attention_mask"], dtype="int32")
token_type_ids = np.array(encoded["token_type_ids"], dtype="int32")
# Set to true if data generator is used for training/validation.
if self.include_targets:
labels = np.array(self.labels[indexes], dtype="int32")
return [input_ids, attention_masks, token_type_ids], labels
else:
return [input_ids, attention_masks, token_type_ids]
def on_epoch_end(self):
# Shuffle indexes after each epoch if shuffle is set to True.
if self.shuffle:
np.random.RandomState(42).shuffle(self.indexes)
def check_similarity(sentence1, sentence2):
model = tf.keras.models.load_model( os.path.join(__location__, 'saved_model.pb'))
model.summary()
sentence_pairs = np.array([[str(sentence1), str(sentence2)]])
test_data = BertSemanticDataGenerator(
sentence_pairs, labels=None, batch_size=1, shuffle=False, include_targets=False,
)
proba = model.predict(test_data)[0]
idx = np.argmax(proba)
proba = f"{proba[idx]: .2f}%"
pred = labels[idx]
return pred, proba
sentence1 = "JavaScript"
sentence2 = "TypeScript"
print(check_similarity(sentence1, sentence2)) | {"/app.py": ["/bertKeras.py"]} |
70,664 | KarlMarkFuncion/Portfolio_1 | refs/heads/main | /animations.py | import pygame
from Assets import *
class playerAnimations():
standingMC = pygame.image.load('Assets/CharSprites/MC_walkDown1.png')
standingMCseries = [
pygame.image.load('Assets/CharSprites/MC_walkDown1.png'),
pygame.image.load('Assets/CharSprites/MC_walkUp1.png'),
pygame.image.load('Assets/CharSprites/MC_walkLeft1.png'),
pygame.image.load('Assets/CharSprites/MC_walkRight1.png')
]
walkMCRight = [
pygame.image.load('Assets/CharSprites/MC_walkRight1.png'),
pygame.image.load('Assets/CharSprites/MC_walkRight2.png'),
pygame.image.load('Assets/CharSprites/MC_walkRight3.png'),
pygame.image.load('Assets/CharSprites/MC_walkRight4.png'),
]
walkMCLeft = [
pygame.image.load('Assets/CharSprites/MC_walkLeft1.png'),
pygame.image.load('Assets/CharSprites/MC_walkLeft2.png'),
pygame.image.load('Assets/CharSprites/MC_walkLeft3.png'),
pygame.image.load('Assets/CharSprites/MC_walkLeft4.png')
]
walkMCUp = [
pygame.image.load('Assets/CharSprites/MC_walkUp1.png'),
pygame.image.load('Assets/CharSprites/MC_walkUp2.png'),
pygame.image.load('Assets/CharSprites/MC_walkUp3.png'),
pygame.image.load('Assets/CharSprites/MC_walkUp4.png')
]
walkMCDown = [
pygame.image.load('Assets/CharSprites/MC_walkDown1.png'),
pygame.image.load('Assets/CharSprites/MC_walkDown2.png'),
pygame.image.load('Assets/CharSprites/MC_walkDown3.png'),
pygame.image.load('Assets/CharSprites/MC_walkDown4.png')
]
bg1 = pygame.image.load('Assets/Bgs/bg.png')
| {"/main.py": ["/objectPlayers.py", "/animations.py"], "/objectPlayers.py": ["/animations.py"]} |
70,665 | KarlMarkFuncion/Portfolio_1 | refs/heads/main | /main.py | import pygame
from objectPlayers import *
from animations import *
pygame.init()
winSizeX = 480
winSizeY = 480
win = pygame.display.set_mode((winSizeX, winSizeY))
clock = pygame.time.Clock()
player = trainer(300, 400, 64, 64)
def winDrawChanger():
win.blit(bg1, (0, 0))
player.draw(win)
pygame.display.update()
run = True
while run:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_d] and player.x < winSizeX - player.height - player.vel:
player.x += player.vel
player.right = True
player.left = False
player.up = False
player.down = False
elif keys[pygame.K_a] and player.x > player.vel:
player.x -= player.vel
player.left = True
player.right = False
player.up = False
player.down = False
elif keys[pygame.K_w] and player.y > player.vel:
player.y -= player.vel
player.up = True
player.down = False
player.left = False
player.right = False
elif keys[pygame.K_s] and player.y < winSizeY - player.height - player.vel:
player.y += player.vel
player.up = False
player.down = True
player.left = False
player.right = False
else:
player.right = False
player.left = False
player.left = False
player.right = False
player.walkCount = 0
winDrawChanger()
pygame.quit | {"/main.py": ["/objectPlayers.py", "/animations.py"], "/objectPlayers.py": ["/animations.py"]} |
70,666 | KarlMarkFuncion/Portfolio_1 | refs/heads/main | /objectPlayers.py | from pygame import *
from animations import *
class trainer():
def __init__(self, x, y, width, height):
# Pos. attributes
self.x = x
self.y = y
self.width = width
self.height = height
self.vel = 5
# Attributes for walk direction
self.left = False
self.right = False
self.up = False
self.down = False
self.walkCount = 0
def draw(self, win):
walkComplement = 3 #This is the divisor of the walkCount index
if self.walkCount + 1 >= 12:
self.walkCount = 0
elif self.right:
win.blit(playerAnimations.walkMCRight[self.walkCount // walkComplement], (self.x, self.y))
self.walkCount += 1
elif self.left:
win.blit(playerAnimations.walkMCLeft[self.walkCount // walkComplement], (self.x, self.y))
self.walkCount += 1
elif self.up:
win.blit(playerAnimations.walkMCUp[self.walkCount // walkComplement], (self.x, self.y))
self.walkCount += 1
elif self.down:
win.blit(playerAnimations.walkMCDown[self.walkCount // walkComplement], (self.x, self.y))
self.walkCount += 1
else:
if self.right:
win.blit(playerAnimations.standingMCseries[3], (self.x, self.y))
elif self.up:
win.blit(playerAnimations.standingMCseries[1], (self.x, self.y))
elif self.down:
win.blit(playerAnimations.standingMCseries[0], (self.x, self.y))
else:
win.blit(playerAnimations.standingMCseries[2], (self.x, self.y)) | {"/main.py": ["/objectPlayers.py", "/animations.py"], "/objectPlayers.py": ["/animations.py"]} |
70,669 | tossowski/CSCI1460Final | refs/heads/master | /preprocess.py | from torch.utils.data import Dataset
import torch
import argparse
import os
from tqdm import tqdm
from transformers import GPT2Tokenizer
class SummaryDataset(Dataset):
'''
This processes the CNN DailyMail Dataset
Sections are split by \n\n
Sentence and labels are split by \t\t\t
I chose to use the GPT2Tokenizer because it uses BPE trained on a large dataset
'''
def __init__(self, input_folder, tokenizer, max_sentence_size):
self.tokenizer = tokenizer
self.documents = []
self.labels = []
self.lengths = []
self.max_len = 0
# 200 threshold has a 97% retention rate and prevents misparsed documents
# with 200+ line words from getting added to the dataset
self.max_sentence_size = max_sentence_size
files = os.listdir(input_folder)
for file in tqdm(files):
try:
with open(input_folder + '/' + file) as f:
raw = f.read()
document, label, lengths, skipped = self.parse_document(raw)
if len(document) < 1 or skipped > 0.1:
continue
self.documents.append(pad_sequence(document, batch_first=True, max_len=self.max_sentence_size))
self.labels.append(label)
self.lengths.append(lengths)
# print(len(self.documents))
except Exception as e:
# print("failed to read", file, e)
continue
def parse_document(self, document):
document = document.split('\n\n')
# link = document[0]
document = self.insert_names(document[1], document[3])
lines = []
lengths = []
labels = []
max_len = 0
skipped = 0
for line_label in document:
line_label = line_label.split('\t\t\t')
line = line_label[0]
encoded = self.tokenizer.encode(line, add_special_tokens=False)
if len(encoded) > self.max_sentence_size:
skipped += 1
continue
lengths.append(len(encoded))
max_len = len(encoded) if len(encoded) > max_len else max_len
lines.append(torch.tensor(encoded))
label = int(line_label[1])
labels.append(0.5 if label == 2 else label)
return lines, torch.tensor(labels), torch.tensor(lengths), skipped / len(document)
def insert_names(self, document, names):
'''
This reinserts the names back into the texts. We can use the names without
unking them because we are using GPT2's bpe
'''
parsed_doc = []
document = document.split('\n')
names = names.split('\n')
entity_2_name = dict()
for name in names:
names = name.split(':')
entity = names[0]
name = ''.join(names[1:])
entity_2_name[entity] = name
for line in document:
parsed_line = []
for token in line.split(' '):
if token in entity_2_name:
parsed_line.append(entity_2_name[token])
else:
parsed_line.append(token)
parsed_doc.append(' '.join(parsed_line))
return parsed_doc
def __len__(self):
return len(self.documents)
def __getitem__(self, idx):
# TODO: Convert to tensor once we know what the model looks like
item = {
"paragraph_length": self.documents[idx].size()[0],
"sentence_length": self.lengths[idx],
"document": self.documents[idx],
"labels": self.labels[idx],
}
return item
def pad_sequence(sequences, batch_first=False, padding_value=0, max_len=None):
r"""Pad a list of variable length Tensors with ``padding_value``
``pad_sequence`` stacks a list of Tensors along a new dimension,
and pads them to equal length. For example, if the input is list of
sequences with size ``L x *`` and if batch_first is False, and ``T x B x *``
otherwise.
`B` is batch size. It is equal to the number of elements in ``sequences``.
`T` is length of the longest sequence.
`L` is length of the sequence.
`*` is any number of trailing dimensions, including none.
Example:
>>> from torch.nn.utils.rnn import pad_sequence
>>> a = torch.ones(25, 300)
>>> b = torch.ones(22, 300)
>>> c = torch.ones(15, 300)
>>> pad_sequence([a, b, c]).size()
torch.Size([25, 3, 300])
Note:
This function returns a Tensor of size ``T x B x *`` or ``B x T x *``
where `T` is the length of the longest sequence. This function assumes
trailing dimensions and type of all the Tensors in sequences are same.
Arguments:
sequences (list[Tensor]): list of variable length sequences.
batch_first (bool, optional): output will be in ``B x T x *`` if True, or in
``T x B x *`` otherwise
padding_value (float, optional): value for padded elements. Default: 0.
max_len (int): value to pad sequence to (T)
Returns:
Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``.
Tensor of size ``B x T x *`` otherwise
"""
# assuming trailing dimensions and type of all the Tensors
# in sequences are same and fetching those from sequences[0]
max_size = sequences[0].size()
trailing_dims = max_size[1:]
if max_len is None:
max_len = max([s.size(0) for s in sequences])
if batch_first:
out_dims = (len(sequences), max_len) + trailing_dims
else:
out_dims = (max_len, len(sequences)) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
# use index notation to prevent duplicate references to the tensor
if batch_first:
out_tensor[i, :length, ...] = tensor
else:
out_tensor[:length, i, ...] = tensor
return out_tensor
if __name__ == "__main__":
# python preprocess.py neuralsum/dailymail/test
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
parser = argparse.ArgumentParser()
parser.add_argument("folder")
args = parser.parse_args()
train_dataset = SummaryDataset(args.folder, tokenizer, 75)
| {"/main.py": ["/model.py", "/preprocess.py"]} |
70,670 | tossowski/CSCI1460Final | refs/heads/master | /model.py | from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class SummaryModel(nn.Module):
def __init__(self, vocab_size, word_rnn_size, sentence_rnn_size, embedding_size, max_sentence_size):
"""
The Model class for SummaRuNNer
:param vocab_size: The number of unique tokens in the data
:param word_rnn_size: The size of hidden cells in the world level GRU
:param sentence_rnn_size: The size of hidden cells in the sentence level GRU
:param embedding_size: The dimension of embedding space
:max_sentence_size: The maximum sentence size
"""
super().__init__()
# TODO: initialize the vocab_size, rnn_size, embedding_size
self.num_layers = 1
self.num_directions = 2
self.vocab_size = vocab_size
self.word_rnn_size = word_rnn_size
self.sentence_rnn_size = sentence_rnn_size
self.embedding_size = embedding_size
self.max_sentence_size = max_sentence_size
# TODO: initialize embeddings, LSTM, and linear layers
self.embedding = torch.nn.Embedding(vocab_size, self.embedding_size)
self.word_gru = torch.nn.GRU(self.embedding_size, word_rnn_size, num_layers=self.num_layers, bidirectional=True, batch_first=True)
self.sentence_gru = torch.nn.GRU(
self.word_rnn_size * max_sentence_size, sentence_rnn_size,
num_layers=self.num_layers,
bidirectional=True, batch_first=True
)
self.AvgPool1 = torch.nn.AvgPool1d(2)
self.AvgPool2 = torch.nn.AvgPool1d(2)
self.linear = torch.nn.Linear(sentence_rnn_size, 1)
self.sigmoid = nn.Sigmoid()
def word_level_gru(self, paragraph, sentence_lengths):
"""
Given a list of sentences, run the word level bidirectional RNN to compute the sentence level representations
for each sentence. The sentence level representation for a sentence is obtained by computing the hidden state
representations at each word position sequentially, based on the current word embeddings and the previous
hidden state. These hidden states are then concatenated together and average pooled to form the sentence
level representation.
:param paragraph: A tensor (paragraph_size, sentence_size) of sentences. Each sentence is a list of token ids.
:param sentence_lengths: A tensor (paragraph_size,) representing the actual lengths (no padding) of each sentence in the paragraph
"""
# Word level GRU applied to each sentence in paragraph
paragraph_size = paragraph.size()[0]
embeddings = self.embedding(paragraph) # Should have shape (max paragraph size, max sentence size, embedding size)
packed_seq = pack_padded_sequence(embeddings, sentence_lengths, batch_first=True, enforce_sorted=False)
# (paragraph_size, sentences_len, num_directions * hidden_size)
word_level_representations, _ = self.word_gru(packed_seq)
word_level_representations, lens_unpacked = pad_packed_sequence(
word_level_representations,
batch_first=True,
total_length=self.max_sentence_size
)
# Average pool results
word_level_representations = self.AvgPool1(word_level_representations)
# Concatenate results (paragraph_size, sentences_len * hidden_size)
# Gives 1 representation per sentence to give to GRU
word_level_representations = word_level_representations.view(paragraph_size, -1)
return word_level_representations
def sentence_level_gru(self, paragraphs, paragraph_lengths):
"""
Given a list of sentence reperesentations, run the sentence level bidirectional RNN to compute the paragraph
level representations.
:param hidden_states: A tensor containing paragraphs (batch_size, paragraph_size, sentence_representation_size)
:param sentence_lengths: A tensor (batch_size,) containing the actual lengths of each sentence in the paragraph
"""
# (batch_size, paragraph_len, num_directions * hidden_size)
out, _ = self.sentence_gru(paragraphs)
return self.AvgPool1(out)
def forward(self, inputs, paragraph_lengths, sentence_lengths):
"""
Runs the forward pass of the model.
:param inputs: word ids (tokens) of shape (batch_size, sentence_window_size, word_window_size)
:param sentence_lengths: array of shape (batch_size, sentence_window_size) representing
the actual lengths (no padding) of each sentence in the paragraph
:param paragraph_lengths: an array of shape (batch_size) representing the number of sentences in the paragraph
:return: the logits, a tensor of shape
(batch_size, window_size, vocab_size)
"""
# TODO: Parallelize the model
# To parallelize model we need to pass all sentences into a single call word_level_gru
# and group their embeddings by paragraph. Then pass that to sentence_level_gru
# Compute word level representations over all sentences
# Result should have shape (batch_size, paragraph_size, word_rnn_size)
word_level_outputs = [self.word_level_gru(inputs[i], sentence_lengths[i]) for i in range(len(inputs))]
word_level_outputs = torch.stack(word_level_outputs)
# Parallelization attempt
# inputs = inputs.view(-1, self.max_sentence_size)
# sentence_lengths = sentence_lengths.view(-1)
# word_level_outputs = self.word_level_gru(inputs, sentence_lengths)
# have to recombine to paragraphs
# index = 0
# paragraphs = []
# for length in paragraph_lengths:
# paragraphs.append(torch.narrow(word_level_outputs, 0, index, index + length))
# index += length
#
# word_level_outputs = torch.stack(paragraphs)
# print(paragraphs.size())
# Run sentence level bidirectional GRU over the resulting hidden states
# Should have shape (batch_size, paragraph_size, sentence_rnn_size)
sentence_level_outputs = self.sentence_level_gru(word_level_outputs, paragraph_lengths)
# TODO: Apply non-linear transformation as described in paper to get representation of document
# Add logistic layer to make binary decision
l1 = self.linear(sentence_level_outputs).squeeze(2)
return self.sigmoid(l1)
| {"/main.py": ["/model.py", "/preprocess.py"]} |
70,671 | tossowski/CSCI1460Final | refs/heads/master | /main.py | from comet_ml import Experiment
from model import SummaryModel
from torch.utils.data import DataLoader, random_split
from torch import nn, optim
from torch.nn import functional as F
import torch
import argparse
import numpy as np
from preprocess import SummaryDataset, pad_sequence
from tqdm import tqdm # optional progress bar
from transformers import GPT2Tokenizer
import nltk
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# TODO: If we want more than a batch size of 1, we need to pad whole documents, so each document is the same size
# bc sizes of dataloader tensors must match except in dimension 0
hyperparams = {
"rnn_size": 128,
"embedding_size": 512,
"num_epochs": 1,
"batch_size": 1,
"learning_rate": 0.0005,
"max_sentence_size": 75,
}
# Get summary for ith example in a dataset
def generate_summary(idx, dataset, model, tokenizer):
batch = dataset[idx]
paragraph_length = batch["paragraph_length"]
sentence_length = batch["sentence_length"]
x = batch["document"]
paragraph_lengths = torch.LongTensor(np.array([paragraph_length])[None,:]).to(device)
sentence_lengths = torch.LongTensor(np.array(sentence_length)[None,:]).to(device)
x = torch.LongTensor(np.array(x)[None,:]).to(device)
y_pred = torch.squeeze(model(x, paragraph_lengths, sentence_lengths))
print("ORIGINAL:\n")
for i in range(paragraph_length):
sentence = x[0,i]
print(tokenizer.decode(sentence[0:sentence_length[i]].detach().cpu().numpy()) + "\n")
print("-----------------------------------------------")
print("SUMMARY:\n")
for i in range(paragraph_length):
sentence = x[0,i]
if (y_pred[i] > 0.5):
print(tokenizer.decode(sentence[0:sentence_length[i]].detach().cpu().numpy()) + " [CONFIDENCE: {}]\n".format(y_pred[i]))
def train(model, train_loader, hyperparams):
loss_fn = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), hyperparams['learning_rate'])
model = model.train()
for epoch in range(hyperparams['num_epochs']):
for batch in tqdm(train_loader):
paragraph_length = batch["paragraph_length"]
sentence_length = batch["sentence_length"]
x = batch["document"]
y = batch['labels']
paragraph_length = paragraph_length.to(device)
sentence_length = sentence_length.to(device)
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
y_pred = model(x, paragraph_length, sentence_length)
loss = loss_fn(y_pred.float(), y.float())
loss.backward() # calculate gradients
optimizer.step() # update model weights
print("loss:", loss.item())
torch.save(model.state_dict(), './model.pt')
def test(model, test_loader):
model = model.eval()
loss_fn = nn.BCELoss()
for batch in tqdm(test_loader):
paragraph_length = batch["paragraph_length"]
sentence_length = batch["sentence_length"]
x = batch["document"]
y = batch['labels']
paragraph_length = paragraph_length.to(device)
sentence_length = sentence_length.to(device)
x = x.to(device)
y = y.to(device)
y_pred = model(x, paragraph_length, sentence_length)
loss = loss_fn(y_pred.float(), y.float())
print("loss", loss.item())
def validate(model, test_loader):
model = model.eval()
loss_fn = nn.BCELoss()
for batch in tqdm(test_loader):
paragraph_length = batch["paragraph_length"]
sentence_length = batch["sentence_length"]
x = batch["document"]
y = batch['labels']
paragraph_length = paragraph_length.to(device)
sentence_length = sentence_length.to(device)
x = x.to(device)
y = y.to(device)
y_pred = model(x, paragraph_length, sentence_length)
loss = loss_fn(y_pred.float(), y.float())
print("loss", loss.item())
def summarize_file(model, file, tokenizer, max_sentence_size):
model = model.eval()
with open(file) as f:
raw = f.read().replace('“', '"').replace('”', '"')
sentences = nltk.tokenize.sent_tokenize(raw)
print(sentences)
document, sentence_length = parse_file(sentences, tokenizer, max_sentence_size)
paragraph_length = document.size()[0]
sentence_length = sentence_length.unsqueeze(0).to(device)
paragraph_length = torch.tensor(paragraph_length).unsqueeze(0).to(device)
document = document.unsqueeze(0).to(device)
y_pred = model(document, paragraph_length, sentence_length)
y_pred = y_pred.detach().cpu()[0].numpy()
summary = []
for i in range(len(sentences)):
if y_pred[i] > 0.7:
summary.append(sentences[i])
return summary, len(summary), len(sentences)
def parse_file(document, tokenizer, max_sentence_size):
# link = document[0]
lines = []
lengths = []
for line in document:
encoded = tokenizer.encode(line, add_special_tokens=False)
if len(encoded) > max_sentence_size or len(encoded) < 1:
print("SKIP", line)
continue
lengths.append(len(encoded))
lines.append(torch.tensor(encoded))
return pad_sequence(lines, batch_first=True, max_len=max_sentence_size), torch.tensor(lengths)
# python main.py -l -T data/dailymail/train
# python main.py -l -t data/dailymail/test
# python main.py -l -F data/examples/apclimate.txt
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--load", action="store_true",
help="load model.pt")
parser.add_argument("-s", "--save", action="store_true",
help="save model.pt")
parser.add_argument("-T", "--train", action="store",
help="run training loop")
parser.add_argument("-F", "--file", action="store",
help="run training loop")
parser.add_argument("-V", "--validate", action="store",
help="run training loop")
parser.add_argument("-t", "--test", action="store",
help="run testing loop")
parser.add_argument("-S", "--summary", action="store_true",
help="generate summaries")
args = parser.parse_args()
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
print("GATHERING DATA")
train_dataset = None
if args.train:
train_dataset = SummaryDataset(
input_folder=args.train,
tokenizer=tokenizer,
max_sentence_size=hyperparams['max_sentence_size']
)
validate_dataset = None
if args.validate:
validate_dataset = SummaryDataset(
input_folder=args.validate,
tokenizer=tokenizer,
max_sentence_size=hyperparams['max_sentence_size']
)
test_dataset = None
if args.test:
test_dataset = SummaryDataset(
input_folder=args.test,
tokenizer=tokenizer,
max_sentence_size=hyperparams['max_sentence_size']
)
train_loader = None
if args.train:
train_loader = DataLoader(
train_dataset, batch_size=hyperparams['batch_size'], shuffle=True)
validate_loader = None
if args.validate:
train_loader = DataLoader(
validate_dataset, batch_size=hyperparams['batch_size'])
test_loader = None
if args.test:
test_loader = DataLoader(test_dataset, batch_size=hyperparams['batch_size'])
model = SummaryModel(
tokenizer.vocab_size + 1,
hyperparams['rnn_size'],
hyperparams['rnn_size'],
hyperparams['embedding_size'],
hyperparams['max_sentence_size']
).to(device)
if args.load:
print("loading saved model...")
model.load_state_dict(torch.load('./model.pt', map_location=device))
if args.file:
print("summarizing file...")
summary, sum_len, doc_len = summarize_file(model, args.file, tokenizer, hyperparams['max_sentence_size'])
print("Used", sum_len, "of", doc_len, "lines")
print(' '.join(summary))
if args.summary:
print("summarizing test data...")
generate_summary(0, test_dataset, model, tokenizer)
if args.train:
print("running training loop...")
train(model, train_loader, hyperparams)
if args.save:
print("saving model...")
torch.save(model.state_dict(), './model.pt')
if args.validate:
print("running training loop...")
validate(model, validate_loader)
if args.test:
print("testing models...")
test(model, test_loader)
# TODO: Make sure you modify the `.comet.config` file
# experiment = Experiment(log_code=False)
# experiment.log_parameters(hyperparams)
# TODO: Load dataset
# Hint: Use random_split to split dataset into train and validate datasets
#
# vocab_size = 1000
# paragraph_size = 30
#
#
#
# input = torch.LongTensor(np.random.randint(0, 999, (5, 100, 10))).to(device)
#
# # (batch_size,)
# paragraph_lengths = torch.Tensor(np.random.randint(1, 100, (5,))).to(device)
#
# # (batch_size, paragraph_size)
# sentence_lengths = torch.Tensor(np.random.randint(1, 10, (5, 100))).to(device)
# output = model(input, paragraph_lengths, sentence_lengths)
# print(output.shape)
| {"/main.py": ["/model.py", "/preprocess.py"]} |
70,683 | SebStrug/quotes-index | refs/heads/main | /tests/test_api.py | from src.api import split_quote, Quote
def test_split_quote_lead_in():
input = """On chess...
'[to succeed] you must study the endgame before everything else.'
Jose Raul Capablanca"""
res = split_quote(input)
expected_res = Quote(
lead_in="On chess...",
content="'[to succeed] you must study the endgame before everything else.'",
source="Jose Raul Capablanca",
)
assert res == expected_res
def test_split_quote_source():
input = """'Sometimes even good Homer nods off.'
Horace, Ars Poetica"""
res = split_quote(input)
expected_res = Quote(
lead_in="",
content="'Sometimes even good Homer nods off.'",
source="Horace, Ars Poetica",
)
assert res == expected_res
| {"/tests/test_api.py": ["/src/api.py"], "/tests/test_handler.py": ["/src/handler.py"], "/src/local_index.py": ["/src/index.py", "/src/handler.py"], "/src/api.py": ["/src/handler.py"]} |
70,684 | SebStrug/quotes-index | refs/heads/main | /tests/test_handler.py | from pathlib import Path
import json
import os
import pytest
import boto3
from moto import mock_s3
from src.handler import LocalHandler, AWSHandler, form_quote
def test_form_quote():
input = {
"content": "some other quote",
"lead_in": None,
"source": "Seb, This test",
}
expected = "'some other quote'\nSeb, This test"
assert expected == form_quote(input.pop("content"), **input)
def test_local_iterate_text_pairs(tmp_path):
path_1 = tmp_path / "1.txt"
quote_1 = "'Some quote'"
origin_1 = "Author"
path_1.write_text("\n".join((quote_1, origin_1)))
path_2 = tmp_path / "2.txt"
quote_2 = "'Some other quote'"
origin_2 = "Musician"
path_2.write_text("\n".join((quote_2, origin_2)))
index_handler = LocalHandler(tmp_path)
res_it = index_handler.iterate_text_pairs()
expected_res = [(1, quote_1), (1, origin_1), (2, quote_2), (2, origin_2)]
res = []
while True:
try:
res.append(next(res_it))
except StopIteration:
break
assert not (set(expected_res) ^ set(res))
def test_local_write_index(tmp_path):
index_handler = LocalHandler(tmp_path)
index = {"a": 1, "b": 2}
index_handler.write_index("index", index)
index_glob = Path(tmp_path).glob("index*.json")
index_files = list(index_glob)
assert len(index_files) == 1
with index_files[0].open("r") as f:
res_index = json.load(f)
assert res_index == index
def test_local_load_index(tmp_path):
obj = {"a": 1, "b": 2}
path = Path(tmp_path) / "test_file.json"
with open(path, "w") as f:
json.dump(obj, f)
index_handler = LocalHandler(tmp_path)
data = index_handler.load_index("test")
assert data == obj
def test_local_add_quote(tmp_path):
initial_quote = "'Some quote\nSeb, This Test'"
initial_quote_path = Path(tmp_path) / "1.txt"
with open(initial_quote_path, "w") as f:
f.write(initial_quote)
quote_to_add = {
"content": "some other quote",
"lead_in": None,
"source": "Seb, This test",
}
handler = LocalHandler(tmp_path)
handler.add_quote(**quote_to_add)
expected_path = Path(tmp_path / "2.txt")
print(list(Path(tmp_path).glob("*")))
assert expected_path.exists()
with open(expected_path, "r") as f:
res = f.read()
assert "'some other quote'\nSeb, This test\n" == res
@pytest.fixture
def s3_resource():
with mock_s3():
s3 = boto3.resource("s3")
yield s3
@pytest.fixture
def bucket_name():
return "sebstrug-test"
@pytest.fixture
def region():
return "eu-west-1"
@pytest.fixture
def aws_credentials(bucket_name, region):
"""Mocked AWS Credentials for moto."""
os.environ["QUOTES_INDEX_S3_BUCKET"] = bucket_name
os.environ["S3_BUCKET"] = bucket_name
os.environ["AWS_REGION"] = region
@pytest.fixture
def s3_test(s3_resource, bucket_name, region, aws_credentials):
bucket = s3_resource.Bucket(bucket_name)
bucket.create(CreateBucketConfiguration={"LocationConstraint": region})
yield
@pytest.fixture
def s3_index(s3_resource, bucket_name, s3_test):
"""Preload index onto S3"""
object = s3_resource.Object(bucket_name, "index_test.json")
object.put(Body=json.dumps({"1": ["1", "2", "3"], "2": ["2", "3"]}))
object = s3_resource.Object(bucket_name, "index_zzz.json")
object.put(Body=json.dumps({"1": ["1", "2", "3"], "2": ["2", "3"]}))
yield
def test_aws_iterate_text_pairs(s3_resource, bucket_name, s3_test):
object = s3_resource.Object(bucket_name, "1.txt")
object.put(Body="foo\nbar")
object = s3_resource.Object(bucket_name, "2.txt")
object.put(Body="baz")
index_handler = AWSHandler(s3_resource)
it = index_handler.iterate_text_pairs()
assert (1, b"foo") == next(it)
assert (1, b"bar") == next(it)
assert (2, b"baz") == next(it)
# Make sure we don't keep returning s3 keys with no content!
with pytest.raises(StopIteration) as _:
next(it)
def test_aws_write_index(s3_resource, bucket_name, s3_test):
input_dict = {"1": ["1", "2", "3"], "2": ["2", "3"]}
index_handler = AWSHandler(s3_resource)
index_handler.write_index("index.json", input_dict)
body = s3_resource.Object(bucket_name, "index.json").get()["Body"]
assert input_dict == json.load(body)
def test_aws_load_index(s3_resource, bucket_name, s3_test, s3_index):
index_handler = AWSHandler(s3_resource)
index = index_handler.load_index("index_test.json")
assert index == {"1": ["1", "2", "3"], "2": ["2", "3"]}
def test_aws_load_latest_index(s3_resource, bucket_name, s3_test, s3_index):
index_handler = AWSHandler(s3_resource)
index = index_handler.load_index("index")
assert index == {"1": ["1", "2", "3"], "2": ["2", "3"]}
def test_aws_add_quote(s3_resource, bucket_name, s3_test):
index_handler = AWSHandler(s3_resource)
index_handler.add_quote(content="Test quote")
bucket = s3_resource.Bucket(bucket_name)
for f in bucket.objects.all():
obj = f.get()
assert obj["Body"].read().decode() == "'Test quote'\nAnonymous"
| {"/tests/test_api.py": ["/src/api.py"], "/tests/test_handler.py": ["/src/handler.py"], "/src/local_index.py": ["/src/index.py", "/src/handler.py"], "/src/api.py": ["/src/handler.py"]} |
70,685 | SebStrug/quotes-index | refs/heads/main | /src/local_index.py | """Generate inverted index using localhost"""
from pathlib import Path
from src.index import create_inverted_index, WORD_ID_MAP
from src.handler import LocalHandler
def main():
quotes_path = Path(__file__).parent.parent / "quotes"
handler = LocalHandler(quotes_path)
file_text_it = handler.iterate_text_pairs()
inverted_index = create_inverted_index(file_text_it)
handler.write_index("index", inverted_index)
handler.write_index("word-ids", WORD_ID_MAP)
if __name__ == "__main__":
main()
| {"/tests/test_api.py": ["/src/api.py"], "/tests/test_handler.py": ["/src/handler.py"], "/src/local_index.py": ["/src/index.py", "/src/handler.py"], "/src/api.py": ["/src/handler.py"]} |
70,686 | SebStrug/quotes-index | refs/heads/main | /src/index.py | """Index.py contais functions for creating an inverted index."""
from collections import defaultdict
from typing import Iterator, Tuple, List, Dict, Union
import itertools
import string
# Create a mapping from words to unique IDs
# ~470,000 words in English, should be fine to store in memory
WORD_ID_MAP: Dict[str, int] = defaultdict(itertools.count().__next__)
# Type alias for clarity
WordFilePair = Tuple[int, int]
WordLinePair = Tuple[int, str]
def get_text_word_ids(text: str) -> Iterator[int]:
"""Get all the word IDs from a given text.
Punctuation is removed from the text, case is ignored.
Stopwords are not removed, this was not mentioned in the spec.
Args:
text: Some string to get word IDs for
Returns:
iterator of all word IDs in text
"""
for word in text.split():
if word == "":
continue
# Remove punctuation, make string uniform
word = word.translate(str.maketrans("", "", string.punctuation)).lower()
yield WORD_ID_MAP[word]
def get_word_file_pairs(
file_id: int, text: Union[str, bytes]
) -> Iterator[WordFilePair]:
"""Given a file-id its text contents, return an iterator with file-id
and word-id pairs.
Args:
file_id: identifier for file
text: text contents of file
Returns:
iterator of (word-id, file-id) pairs
"""
file_id_it = itertools.repeat(file_id)
# S3 serves lines as bytes
try:
text = text.decode()
except (UnicodeDecodeError, AttributeError):
pass
word_id_it = get_text_word_ids(text)
return zip(word_id_it, file_id_it)
def create_inverted_index(
file_line_it: Iterator[WordLinePair],
) -> Dict[int, List[int]]:
"""Given an iterator producing pairs of (<file-id>, <line-from-file>), produce
an inverted index containing a mapping for each word ID to the set of all file IDs
that contain that word.
Args:
file_line_it: Iterator producing consecutive pairs of
(<file-id>, <line-from-file>)
Returns:
mapping of word IDs to set of all file IDs that contain that word
i.e. <word-id>: <all-associated-file-ids>
"""
word_file_map = defaultdict(list)
for fname, line in file_line_it:
word_file_it = get_word_file_pairs(fname, line)
for word_id, file_id in word_file_it:
word_file_map[word_id].append(file_id)
# Sorting once complete is faster than maintaining a sorted list with bisect
for word_id in word_file_map.keys():
word_file_map[word_id].sort()
return word_file_map
| {"/tests/test_api.py": ["/src/api.py"], "/tests/test_handler.py": ["/src/handler.py"], "/src/local_index.py": ["/src/index.py", "/src/handler.py"], "/src/api.py": ["/src/handler.py"]} |
70,687 | SebStrug/quotes-index | refs/heads/main | /manual/upload_quotes.py | from pathlib import Path
import os
import logging
import sys
import boto3
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
def main():
bucket = os.getenv("QUOTES_INDEX_S3_BUCKET")
s3_client = boto3.client("s3")
quotes_files = list((Path(__file__).parent.parent / "quotes").glob("*txt"))
for ind, f in enumerate(quotes_files, start=1):
_ = s3_client.upload_file(f.as_posix(), bucket, f.name)
if ind % 10 == 0:
logger.info(f"Uploaded {ind} files")
if __name__ == "__main__":
main()
| {"/tests/test_api.py": ["/src/api.py"], "/tests/test_handler.py": ["/src/handler.py"], "/src/local_index.py": ["/src/index.py", "/src/handler.py"], "/src/api.py": ["/src/handler.py"]} |
70,688 | SebStrug/quotes-index | refs/heads/main | /manual/split_quotes.py | from pathlib import Path
import itertools
COUNTER = itertools.count()
def main():
Path("quotes").mkdir(parents=True, exist_ok=True)
quote = ""
fname = Path(__file__).parent / "main.txt"
with open(fname, "r") as f:
for line in f.readlines():
quote += line
if quote.strip() == "":
continue
if line == "\n":
fname = Path("quotes") / f"{next(COUNTER)}.txt"
with open(fname, "w") as f:
f.write(quote.strip())
quote = ""
if __name__ == "__main__":
main()
| {"/tests/test_api.py": ["/src/api.py"], "/tests/test_handler.py": ["/src/handler.py"], "/src/local_index.py": ["/src/index.py", "/src/handler.py"], "/src/api.py": ["/src/handler.py"]} |
70,689 | SebStrug/quotes-index | refs/heads/main | /src/handler.py | from abc import abstractmethod
from typing import Any, Iterator, Tuple, Dict, Optional
from datetime import datetime
from pathlib import Path
import json
import itertools
import re
import os
import logging
import sys
from botocore.exceptions import BotoCoreError
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
def form_quote(
content: str,
lead_in: Optional[str] = None,
source: str = "Anonymous",
) -> str:
"""Create a quote out of several provided fields
Args:
content: body of the quote
attribution: who said/wrote the quote. Defaults to 'Anonymous'
lead_in: initial/background info on quote e.g. 'On the meaning of life'
source: Origin of the quote
Returns:
Quote of the form
```
<lead_in>...
'<content>'
<source>
"""
if not content:
raise ValueError("No quote provided")
lead_in = lead_in + "..." if lead_in else ""
return "\n".join((lead_in, f"'{content}'", source)).strip()
class Handler:
"""Interface for loading files into iterator of file IDs and lines"""
@abstractmethod
def iterate_text_pairs(self, *args: Any) -> Iterator[Tuple[int, str]]:
"""Generate successive pairs of (<file id>, <line from file>) tuples
where the file ID is a consecutive integer
"""
raise NotImplementedError
@abstractmethod
def write_index(self, *args: Any):
"""Write a dictionary containing an inverted index to path"""
raise NotImplementedError
@abstractmethod
def load_object(self, *args: Any):
"""Load in any object from the environment"""
raise NotImplementedError
@abstractmethod
def load_index(self, *args: Any):
"""Load in an dictionary containing an inverted index from path"""
raise NotImplementedError
@abstractmethod
def add_quote(self, *args: Any, **kwargs: Any):
"""Add a quote, do NOT update the index"""
raise NotImplementedError
class LocalHandler(Handler):
"""Interact with local files to handle index"""
def __init__(self, local_path: Path):
"""
Args:
local_path: Path containing quotes as consecutive text files.
Index will also be saved to local path
"""
self.local_path = local_path
def iterate_text_pairs(self) -> Iterator[Tuple[int, str]]:
for fname in Path(self.local_path).glob("*.txt"):
file_id = int(fname.stem)
logger.debug(f"Found file ID: {file_id}")
file_id_iterator = itertools.repeat(file_id)
with fname.open("r") as f:
yield from zip(file_id_iterator, f.read().splitlines())
def write_index(self, prefix: str, index: Dict):
"""Write a dictionary to a file with the filename as
<path>/<prefix>-YYYY-MM-DD--HH:MM.json
"""
dt_str = datetime.now().strftime("%Y-%m-%d--%H:%M")
path = Path(self.local_path) / f"{prefix}-{dt_str}.json"
with path.open("w") as f:
json.dump(index, f)
logger.info(f"Wrote dictionary to path: {path}")
def load_object(self, prefix: str) -> str:
"""Load an object by prefix from local path.
If several objects have the same prefix, load the
last object
"""
objs = Path(self.local_path).glob(f"{prefix}*")
for obj in objs:
pass
try:
with obj.open("r") as f:
data = f.read()
except NameError:
logging.error(f"No object with prefix: {prefix}")
return ""
return data
def load_index(self, prefix: str) -> Dict:
"""Searches the local path for a prefix, loads in the latest
associated object from JSON as a dictionary.
"""
data_str = self.load_object(prefix)
return json.loads(data_str)
def add_quote(self, **kwargs):
last_quote_index = list(self.local_path.glob("*.txt"))[-1].stem
next_quote_index = int(last_quote_index.rstrip(".txt")) + 1
next_quote_fname = self.local_path / f"{next_quote_index}.txt"
quote = form_quote(kwargs.pop("content"), **kwargs)
with open(next_quote_fname, "w") as f:
f.write(quote + "\n")
class AWSHandler(Handler):
def __init__(self, s3_res: Any):
"""
Args:
s3_res: instantiated s3 resource object
"""
self.bucket = os.getenv("QUOTES_INDEX_S3_BUCKET")
self.region = os.getenv("QUOTES_INDEX_AWS_REGION", "eu-west-1")
self.s3_res = s3_res
def iterate_text_pairs(self) -> Iterator[Tuple[int, str]]:
"""Generate successive pairs of (<s3-key>, <line-from-s3-file>) tuples from files
in a s3 bucket which are labeled by their order in the bucket.
i.e. '1.txt', '2.txt', ...
Returns:
iterator yielding (<file-id>, <line-from-file>) pairs
"""
bucket = self.s3_res.Bucket(self.bucket)
logger.info(f"Iterating through items in bucket: {bucket}")
for f in bucket.objects.all():
if not re.match(r"\d+.txt", f.key):
continue
# Assume s3 keys are named by order in bucket, as per spec
file_id = int(Path(f.key).stem)
logger.debug(f"Found file ID: {file_id}")
file_id_iterator = itertools.repeat(file_id)
try:
obj = f.get()
except BotoCoreError:
logger.error(f"Failed to get s3 object: {f.key}")
yield from zip(file_id_iterator, obj["Body"].iter_lines())
def write_index(self, s3_key: str, index: Dict):
"""Write an dictionary to a s3 path
Args:
s3_res: instantiated s3 resource object
bucket: name of bucket to write to
s3_key: key to s3 path to write to, including filename
e.g. `index_20210527.json`
index: dictionary to write to S3
"""
if not index:
return
try:
object = self.s3_res.Object(self.bucket, s3_key)
object.put(Body=json.dumps(index))
logger.info(f"Wrote dictionary to key: {s3_key}")
except BotoCoreError:
logger.error(f"Failed to upload dictionary to key: {s3_key}", exc_info=True)
raise
def load_object(self, s3_key: str) -> str:
"""Serve the data in an S3 object
Args:
s3_key: s3 path to read from. If a prefix is used,
load the last object
"""
bucket = self.s3_res.Bucket(self.bucket)
for obj in bucket.objects.filter(Prefix=s3_key):
object = obj
try:
logger.info(f"Loaded object: {object}")
response = object.get()
data = response["Body"].read()
except BotoCoreError:
logger.error(f"Failed to load object from key: {s3_key}", exc_info=True)
raise
# Handle returning bytes not string
try:
return data.decode()
except (UnicodeDecodeError, AttributeError):
return data
def load_index(self, s3_key: str) -> Dict:
"""Load an index, or any dictionary from an S3 object
Args:
s3_key: s3 path to read from. If a prefix is passed,
load the last object.
"""
try:
object = self.load_object(s3_key)
return json.loads(object)
except BotoCoreError:
logger.error(f"Failed to load dictionary from key: {s3_key}", exc_info=True)
raise
def add_quote(self, **kwargs):
tstamp = int(datetime.now().timestamp())
s3_key = f"{tstamp}.txt"
quote = form_quote(kwargs.pop("content"), **kwargs)
object = self.s3_res.Object(self.bucket, s3_key)
object.put(Body=quote)
| {"/tests/test_api.py": ["/src/api.py"], "/tests/test_handler.py": ["/src/handler.py"], "/src/local_index.py": ["/src/index.py", "/src/handler.py"], "/src/api.py": ["/src/handler.py"]} |
70,690 | SebStrug/quotes-index | refs/heads/main | /src/lambda_function.py | """Generate inverted index using AWS"""
from datetime import datetime
import boto3
from index import create_inverted_index, WORD_ID_MAP
from handler import AWSHandler
def lambda_handler(event: None, context):
s3 = boto3.resource("s3")
handler = AWSHandler(s3)
file_text_it = handler.iterate_text_pairs()
inverted_index = create_inverted_index(file_text_it)
dt_str = datetime.now().strftime("%Y-%m-%d--%H:%M")
handler.write_index(f"index-{dt_str}", inverted_index)
handler.write_index(f"word-ids-{dt_str}", WORD_ID_MAP)
| {"/tests/test_api.py": ["/src/api.py"], "/tests/test_handler.py": ["/src/handler.py"], "/src/local_index.py": ["/src/index.py", "/src/handler.py"], "/src/api.py": ["/src/handler.py"]} |
70,691 | SebStrug/quotes-index | refs/heads/main | /src/api.py | from typing import List, Tuple, Dict
from pathlib import Path
from random import choices, choice
import logging
import re
import os
from itertools import chain
from pydantic import BaseModel
from starlette.exceptions import HTTPException as StarletteHTTPException
from fastapi.staticfiles import StaticFiles
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
from fastapi import FastAPI, Request, Form, HTTPException
from cachetools import TTLCache, cached
import boto3
from src.handler import LocalHandler, AWSHandler, Handler
@cached(cache=TTLCache(maxsize=5000, ttl=3 * 60 * 60))
def get_handler() -> Handler:
"""Load the local or AWS handler, caching for 3 hours.
Default to loading local handler if environment variable
is not defined.
"""
if os.getenv("QUOTES_ENV") == "aws":
s3 = boto3.resource("s3")
handler = AWSHandler(s3)
else:
local_path = Path("quotes")
handler = LocalHandler(local_path)
return handler
@cached(cache=TTLCache(maxsize=5000, ttl=3 * 60 * 60))
def get_indexes() -> Tuple[Dict, Dict]:
"""Load the inverted index and word ID map, caching for 3 hours"""
handler = get_handler()
inverted_index = handler.load_index("index")
word_id_map = handler.load_index("word-ids")
return inverted_index, word_id_map
INVERTED_INDEX, WORD_ID_MAP = get_indexes()
app = FastAPI()
app.mount("/static", StaticFiles(directory="src/static"), name="static")
templates = Jinja2Templates(directory="src/templates")
class Quote(BaseModel):
lead_in: str = ""
content: str
source: str = "Anonymous"
@app.get("/", response_class=HTMLResponse)
async def serve_home(request: Request):
return templates.TemplateResponse("home.html", {"request": request})
@app.post("/", response_class=HTMLResponse)
async def search_word(request: Request, word: str = Form(...)):
quotes = get_quotes(word)
if not quotes:
raise HTTPException(status_code=404, detail=f"No quote with word '{word}'")
single_quote = choice(quotes)
quote = split_quote(single_quote)
return templates.TemplateResponse(
"quote.html",
{
"request": request,
"lead_in": quote.lead_in,
"content": quote.content,
"source": quote.source,
},
)
@app.exception_handler(StarletteHTTPException)
async def custom_http_exception_handler(request: Request, exc: StarletteHTTPException):
"""Handle exceptions by returning a 404 page.
By default FastAPI returns a simple dictionary
"""
quote_str = get_random_quote()
quote = split_quote(quote_str)
return templates.TemplateResponse(
"404.html",
{
"request": request,
"lead_in": quote.lead_in,
"content": quote.content,
"source": quote.source,
},
)
@app.get("/add", response_class=HTMLResponse)
async def add_quote(request: Request):
return templates.TemplateResponse("add_quote.html", {"request": request})
@app.post("/add", response_model=Quote)
def add_quotes_post(
request: Request,
lead_in: str = Form(...),
content: str = Form(...),
source: str = Form(...),
):
quote = Quote(lead_in=lead_in, content=content, source=source)
return templates.TemplateResponse(
"quote.html",
{
"request": request,
"lead_in": quote.lead_in,
"content": quote.content,
"source": quote.source,
},
)
def get_random_quote() -> str:
all_file_ids = list(chain.from_iterable(INVERTED_INDEX.values()))
file_id = choice(all_file_ids)
handler = get_handler()
return handler.load_object(str(file_id))
def get_quotes(word: str) -> List[str]:
"""Given a word, search the inverted index and retrieve
all quotes containing that word. Case insensitive.
Args:
word: word to search inverted index for.
Returns:
list of all quotes containing given word.
"""
word_id = WORD_ID_MAP.get(word.lower())
if not word_id:
logging.debug(f"Word: {word} not in inverted index.")
return []
all_file_ids = INVERTED_INDEX.get(str(word_id))
handler = get_handler()
quotes = set()
for file_id in choices(all_file_ids, k=1):
quote = handler.load_object(str(file_id))
quotes.add(quote)
return list(quotes)
def split_quote(quote: str) -> Quote:
"""Turn a quote from raw string form to Quote model class"""
quotes_split = quote.split("\n")
quote_dict = {}
for ind, line in enumerate(quotes_split):
if ind == 0 and line.endswith("..."):
quote_dict["lead_in"] = line
if re.match(r"\'.+\'", line, flags=re.DOTALL):
quote_dict["content"] = line
if ind == len(quotes_split) - 1:
quote_dict["source"] = line
return Quote(
lead_in=quote_dict.get("lead_in", ""),
content=quote_dict.get("content", ""),
source=quote_dict.get("source", ""),
)
| {"/tests/test_api.py": ["/src/api.py"], "/tests/test_handler.py": ["/src/handler.py"], "/src/local_index.py": ["/src/index.py", "/src/handler.py"], "/src/api.py": ["/src/handler.py"]} |
70,692 | SebStrug/quotes-index | refs/heads/main | /tests/test_index.py | import unittest
from collections import defaultdict
import itertools
from src import index
class BasicTestCase(unittest.TestCase):
"""Base test class to override the module level variable
WORD_ID_MAP for each test
"""
def setUp(self):
index.WORD_ID_MAP = defaultdict(itertools.count().__next__)
class TestWordToId(BasicTestCase):
def test(self):
self.assertEqual(0, index.WORD_ID_MAP["foo"])
self.assertEqual(1, index.WORD_ID_MAP["bar"])
self.assertEqual(0, index.WORD_ID_MAP["foo"])
self.assertEqual(0, index.WORD_ID_MAP["foo"])
self.assertEqual(2, index.WORD_ID_MAP["baz"])
class TestGetTextWordIds(BasicTestCase):
def test_empty(self):
it = index.get_text_word_ids("")
with self.assertRaises(StopIteration):
next(it)
def test(self):
it = index.get_text_word_ids("this is a line index this line")
for ind in range(0, 5):
self.assertEqual(ind, next(it))
def test_dirty(self):
it = index.get_text_word_ids('This is a line. Index this "line"')
for ind in range(0, 5):
self.assertEqual(ind, next(it))
class TestGetWordFilePairs(BasicTestCase):
def test_empty(self):
it = index.get_word_file_pairs(1, "")
with self.assertRaises(StopIteration):
next(it)
def test(self):
it = index.get_word_file_pairs(1, "this is a line")
for ind in range(4):
self.assertEqual((ind, 1), next(it))
def test_multiline(self):
it = index.get_word_file_pairs(
2,
"""this is a line
this is another line""",
)
self.assertEqual((0, 2), next(it))
self.assertEqual((1, 2), next(it))
self.assertEqual((2, 2), next(it))
self.assertEqual((3, 2), next(it))
self.assertEqual((0, 2), next(it))
self.assertEqual((1, 2), next(it))
self.assertEqual((4, 2), next(it))
self.assertEqual((3, 2), next(it))
class TestCreateInvertedIndex(BasicTestCase):
def test(self):
file_line_it = iter(
(
(4, "this is a file containing some lines"),
(1, "this is a file containing some lines"),
(2, "yet more lines here"),
(3, "here is another file"),
)
)
expected_output = {
0: [1, 4],
1: [1, 3, 4],
2: [1, 4],
3: [1, 3, 4],
4: [1, 4],
5: [1, 4],
6: [1, 2, 4],
7: [2],
8: [2],
9: [2, 3],
10: [3],
}
self.assertDictEqual(
expected_output, dict(index.create_inverted_index(file_line_it))
)
| {"/tests/test_api.py": ["/src/api.py"], "/tests/test_handler.py": ["/src/handler.py"], "/src/local_index.py": ["/src/index.py", "/src/handler.py"], "/src/api.py": ["/src/handler.py"]} |
70,694 | Abdelrhman-Gamal99/Music-Recognizer- | refs/heads/main | /starter_file.py | from PyQt5 import QtWidgets ,QtCore, QtGui
from PyQt5.QtWidgets import QMainWindow, QApplication, QLabel, QFileDialog, QAction,QTableWidget
from GUI import Ui_MainWindow
from sound_class import sound
import os
import sys
import matplotlib.pyplot as plot
import librosa
from pydub import AudioSegment
from tempfile import mktemp
import librosa.display
import numpy as np
from PIL import Image
import imagehash
import pylab
import textdistance
import pandas as pd
from math import floor, ceil
import operator
import sounddevice as sd
from difflib import SequenceMatcher
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
super(ApplicationWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.two_song=False
self.outputSong=None
self.Hashes = pd.read_csv('Hashes.csv')
self.ui.open_song1.clicked.connect(lambda:self.getfile(1))
self.ui.open_song2.clicked.connect(lambda:self.getfile(2))
self.ui.open_song2.setDisabled(True)
self.ui.Mixing_Slider.sliderReleased.connect(lambda:self.mixer())
def getfile(self,songNumber):
fname= QFileDialog.getOpenFileName( self, 'choose the signal', os.getenv('HOME') ,"mp3(*.mp3)" )
self.path = fname[0]
Audio=sound(self.path)
if self.path =="" :
return
else:
if songNumber==1 :
self.ui.song1_label.setText(os.path.splitext(os.path.basename(self.path))[0])
self.ui.open_song2.setDisabled(False)
self.mp3_audio1=Audio.mp3_audio
self.samplingFrequency1 =Audio.samplingFrequency
elif songNumber==2 :
self.ui.song2_label.setText(os.path.splitext(os.path.basename(self.path))[0])
self.mp3_audio2=Audio.mp3_audio
self.samplingFrequency2 =Audio.samplingFrequency
self.two_song=True
def mixer(self) :
if self.two_song:
sliderRatio = self.ui.Mixing_Slider.value()/100
self.outputSong = self.mp3_audio1 * sliderRatio + self.mp3_audio2 * (1-sliderRatio)
else:
self.outputSong=self.mp3_audio1
self.spectrogram_code=sound.spectrogram(self.outputSong)
self.features_code=sound.spectrogram_features(self.outputSong)
self.compare()
def jaro_distance(self,s1, s2):
# If the s are equal
if (s1 == s2):
return 1.0
# Length of two s
len1 = len(s1)
len2 = len(s2)
max_dist = floor(max(len1, len2) / 2) - 1
# Count of matches
match = 0
# Hash for matches
hash_s1 = [0] * len(s1)
hash_s2 = [0] * len(s2)
# Traverse through the first
for i in range(len1):
# Check if there is any matches
for j in range(max(0, i - max_dist),
min(len2, i + max_dist + 1)):
# If there is a match
if (s1[i] == s2[j] and hash_s2[j] == 0):
hash_s1[i] = 1
hash_s2[j] = 1
match += 1
break
# If there is no match
if (match == 0):
return 0.0
# Number of transpositions
t = 0
point = 0
for i in range(len1):
if (hash_s1[i]):
# Find the next matched character
# in second
while (hash_s2[point] == 0):
point += 1
if (s1[i] != s2[point]):
point += 1
t += 1
t = t//2
# Return the Jaro Similarity
return (match/ len1 + match / len2 +
(match - t + 1) / match)/ 3.0
def compare(self ) :
self.ui.tableWidget.clearContents()
songs_similarity = dict()
for i in range(0,len(self.Hashes)):
Name=self.Hashes.iloc[i,0]
feature1_similarity=self.jaro_distance(self.spectrogram_code,self.Hashes.iloc[i,1])
feature2_similarity=self.jaro_distance(self.features_code[0],self.Hashes.iloc[i,2])
feature3_similarity=self.jaro_distance(self.features_code[1],self.Hashes.iloc[i,3])
similarityindex=((feature1_similarity+feature2_similarity+feature3_similarity)/3)*100
songs_similarity.update({Name:similarityindex})
sorted_dict= sorted(songs_similarity.items(),key=operator.itemgetter(1),reverse=True)
top_ten=sorted_dict[0:11]
for row in range(1,11) :
name = QtWidgets.QTableWidgetItem(str(top_ten[row-1][0]))
similarity_index=QtWidgets.QTableWidgetItem(str(top_ten[row-1][1])+'%')
self.ui.tableWidget.setItem(row,0,name)
self.ui.tableWidget.setItem(row,1,similarity_index)
def main():
app = QtWidgets.QApplication(sys.argv)
application = ApplicationWindow()
application.show()
app.exec_()
if __name__ == "__main__":
main()
| {"/starter_file.py": ["/GUI.py", "/sound_class.py"]} |
70,695 | Abdelrhman-Gamal99/Music-Recognizer- | refs/heads/main | /sound_class.py | from PIL import Image
import PIL
import librosa
import numpy as np
import matplotlib.pyplot as plt
from pydub import AudioSegment
from tempfile import mktemp
import librosa.display
import imagehash
import pylab
import os
class sound:
def __init__(self,path):
self.mp3_audio,self.samplingFrequency =librosa.load(path,duration=60)
def spectrogram (song):
Spectro_Path = 'Spectrogram.png'
spectrogram= librosa.amplitude_to_db(np.abs(librosa.stft(song)), ref=np.max)
pylab.axis('off')
pylab.axes([0., 0., 1., 1.], frameon=False, xticks=[], yticks=[])
librosa.display.specshow(spectrogram, y_axis='linear')
pylab.savefig(Spectro_Path, bbox_inches=None, pad_inches=0)
pylab.close()
spectrogram_code = str(imagehash.phash(Image.open(Spectro_Path),hash_size=16))
return spectrogram_code
def spectrogram_features (song):
features_code=[]
for spectrogram_feature in ['melspectrogram','mfcc']:
feature=librosa.feature.__getattribute__(spectrogram_feature)(y=song,sr=22050)
Image_array=Image.fromarray( feature)
feature_code= str(imagehash.phash(Image_array,hash_size=16))
features_code.append(feature_code)
return features_code
| {"/starter_file.py": ["/GUI.py", "/sound_class.py"]} |
70,696 | Abdelrhman-Gamal99/Music-Recognizer- | refs/heads/main | /GUI.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui.ui'
#
# Created by: PyQt5 UI code generator 5.14.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(968, 737)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
spacerItem = QtWidgets.QSpacerItem(943, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_2.addItem(spacerItem)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.open_song1 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.open_song1.sizePolicy().hasHeightForWidth())
self.open_song1.setSizePolicy(sizePolicy)
self.open_song1.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("images/add-song.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.open_song1.setIcon(icon)
self.open_song1.setIconSize(QtCore.QSize(40, 40))
self.open_song1.setFlat(True)
self.open_song1.setObjectName("open_song1")
self.horizontalLayout.addWidget(self.open_song1)
self.song1_label = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.song1_label.sizePolicy().hasHeightForWidth())
self.song1_label.setSizePolicy(sizePolicy)
self.song1_label.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.song1_label.setText("")
self.song1_label.setObjectName("song1_label")
self.horizontalLayout.addWidget(self.song1_label)
self.open_song2 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.open_song2.sizePolicy().hasHeightForWidth())
self.open_song2.setSizePolicy(sizePolicy)
self.open_song2.setText("")
self.open_song2.setIcon(icon)
self.open_song2.setIconSize(QtCore.QSize(40, 40))
self.open_song2.setFlat(True)
self.open_song2.setObjectName("open_song2")
self.horizontalLayout.addWidget(self.open_song2)
self.song2_label = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.song2_label.sizePolicy().hasHeightForWidth())
self.song2_label.setSizePolicy(sizePolicy)
self.song2_label.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.song2_label.setText("")
self.song2_label.setObjectName("song2_label")
self.horizontalLayout.addWidget(self.song2_label)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem1)
self.lcdNumber = QtWidgets.QLCDNumber(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lcdNumber.sizePolicy().hasHeightForWidth())
self.lcdNumber.setSizePolicy(sizePolicy)
self.lcdNumber.setFrameShape(QtWidgets.QFrame.NoFrame)
self.lcdNumber.setFrameShadow(QtWidgets.QFrame.Plain)
self.lcdNumber.setSegmentStyle(QtWidgets.QLCDNumber.Flat)
self.lcdNumber.setObjectName("lcdNumber")
self.horizontalLayout_4.addWidget(self.lcdNumber)
self.Mixing_Slider = QtWidgets.QSlider(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Mixing_Slider.sizePolicy().hasHeightForWidth())
self.Mixing_Slider.setSizePolicy(sizePolicy)
self.Mixing_Slider.setMaximum(100)
self.Mixing_Slider.setPageStep(1)
self.Mixing_Slider.setTracking(True)
self.Mixing_Slider.setOrientation(QtCore.Qt.Horizontal)
self.Mixing_Slider.setTickPosition(QtWidgets.QSlider.NoTicks)
self.Mixing_Slider.setObjectName("Mixing_Slider")
self.horizontalLayout_4.addWidget(self.Mixing_Slider)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem2)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tableWidget.sizePolicy().hasHeightForWidth())
self.tableWidget.setSizePolicy(sizePolicy)
self.tableWidget.setLineWidth(1)
self.tableWidget.setMidLineWidth(1)
self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.tableWidget.setAutoScroll(True)
self.tableWidget.setAutoScrollMargin(16)
self.tableWidget.setDragEnabled(False)
self.tableWidget.setTextElideMode(QtCore.Qt.ElideNone)
self.tableWidget.setGridStyle(QtCore.Qt.SolidLine)
self.tableWidget.setWordWrap(True)
self.tableWidget.setCornerButtonEnabled(False)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(2)
self.tableWidget.setRowCount(10)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(9, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
item.setFont(font)
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
self.tableWidget.setHorizontalHeaderItem(1, item)
self.tableWidget.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidget.horizontalHeader().setDefaultSectionSize(304)
self.tableWidget.horizontalHeader().setHighlightSections(True)
self.tableWidget.horizontalHeader().setMinimumSectionSize(55)
self.tableWidget.horizontalHeader().setSortIndicatorShown(True)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.verticalHeader().setVisible(True)
self.tableWidget.verticalHeader().setCascadingSectionResizes(True)
self.tableWidget.verticalHeader().setDefaultSectionSize(39)
self.tableWidget.verticalHeader().setSortIndicatorShown(True)
self.tableWidget.verticalHeader().setStretchLastSection(False)
self.verticalLayout.addWidget(self.tableWidget)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout.addItem(spacerItem3)
self.verticalLayout_2.addLayout(self.verticalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 968, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.Mixing_Slider.valueChanged['int'].connect(self.lcdNumber.display)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.tableWidget.setSortingEnabled(True)
item = self.tableWidget.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "1"))
item = self.tableWidget.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "2"))
item = self.tableWidget.verticalHeaderItem(3)
item.setText(_translate("MainWindow", "3"))
item = self.tableWidget.verticalHeaderItem(4)
item.setText(_translate("MainWindow", "4"))
item = self.tableWidget.verticalHeaderItem(5)
item.setText(_translate("MainWindow", "5"))
item = self.tableWidget.verticalHeaderItem(6)
item.setText(_translate("MainWindow", "6"))
item = self.tableWidget.verticalHeaderItem(7)
item.setText(_translate("MainWindow", "7"))
item = self.tableWidget.verticalHeaderItem(8)
item.setText(_translate("MainWindow", "8"))
item = self.tableWidget.verticalHeaderItem(9)
item.setText(_translate("MainWindow", "10"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Song_Name"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Similarity_index"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| {"/starter_file.py": ["/GUI.py", "/sound_class.py"]} |
70,697 | Abdelrhman-Gamal99/Music-Recognizer- | refs/heads/main | /songs/Group02/database.py | import librosa
from librosa.feature.spectral import melspectrogram, spectral_bandwidth, spectral_rolloff
from matplotlib.pyplot import specgram
from pydub import AudioSegment
from tempfile import mktemp
import librosa.display
import numpy as np
import os
import pylab
from PIL import Image
import imagehash
import os
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import csv
from imagededup.methods import PHash
features=['melspectrogram','MFCC']
header = ['name', 'spectrogram_code','melspectrogram_code','MFCC_code']
with open('Hashes2.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
for filename in os.listdir():
if filename.endswith(".mp3"):
mp3_audio,samplingFrequency =librosa.load(filename,duration=60)
##spectrogram
Spectro_Path = 'spectrograms/'+os.path.splitext(os.path.basename(filename))[0]+'.png'
pylab.axis('off')
pylab.axes([0., 0., 1., 1.], frameon=False, xticks=[], yticks=[])
spectrogram= librosa.power_to_db((np.abs(librosa.stft(mp3_audio)))**2, ref=np.max)
librosa.display.specshow(spectrogram, y_axis='linear')
pylab.savefig(Spectro_Path, bbox_inches=None, pad_inches=0)
pylab.close()
Image_array=Image.fromarray(spectrogram)
spectrogram_code = imagehash.phash(Image_array,hash_size=16)
#spectrogram_code = imagehash.phash(Image.open(Spectro_Path),hash_size=16)
##extract features
for feature in features:
if feature=='melspectrogram':
melspectrogram= librosa.feature.melspectrogram(y=mp3_audio ,S=spectrogram, sr=samplingFrequency)
Image_array=Image.fromarray(melspectrogram)
melspectrogram_code = imagehash.phash(Image_array,hash_size=16)
elif feature=='MFCC':
MFCC = librosa.feature.mfcc(y=mp3_audio,sr=samplingFrequency)
Image_array=Image.fromarray( MFCC)
MFCC_code = imagehash.phash(Image_array,hash_size=16)
data=[str(filename),str(spectrogram_code),str(melspectrogram_code),str(MFCC_code)]
writer.writerow(data)
| {"/starter_file.py": ["/GUI.py", "/sound_class.py"]} |
70,871 | Ramalingasamy012/AccountFinderBot | refs/heads/main | /main.py | import constants as keys
from telegram.ext import *
import Responses as R
import requests
from bs4 import BeautifulSoup
print("Bot Started...")
def start_command(update,context):
update.message.reply_text("Ask Questions like who are you?,creator?")
def help_command(update, context):
update.message.reply_text("Type /info_{username} to search for the person")
def info(update,context):
id = update.message.text.replace('/info_', '')
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
try:
url="https://www.google.com/search?q="+id
response=requests.get(url,headers=headers)
socialmedia=["instagram","facebook","twitter","linkedin","github","scholar","hackerearth","hackerrank","hackerone","tiktok","youtube","books","researchgate","publons","orcid","maps"]
linklist=[]
soup=BeautifulSoup(response.content,'html.parser')
for g in soup.find_all('div', class_='g'):
anchors = g.find_all('a')
if 'href' in str(anchors[0]):
linklist.append(anchors[0]['href'])
c=0
foundedlinks=[]
update.message.reply_text("Social Media Links")
for i in socialmedia:
sm=str(i)
for j in linklist:
if sm in str(j):
c=c+1
foundedlinks.append(j)
update.message.reply_text(j)
update.message.reply_text("[-] Checking for any pdf documents associated with this name .....")
url="https://www.google.com/search?q=%22"+id+"%22+filetype%3Apdf&oq=%22"+id+"%22+filetype%3Apdf"
response=requests.get(url,headers=headers)
soup=BeautifulSoup(response.content,'html.parser')
f=0
for g in soup.find_all('div', class_='g'):
links = g.find_all('a')
if 'href' in str(links[0]):
update.message.reply_text(links[0]['href'])
if c == 0:
update.message.reply_text("No Info about this person")
except Exception as e:
print(e)
update.message.reply_text(id, parse_mode='Markdown')
def handle_message(update,context):
text=str(update.message.text).lower()
response=R.sample_response(text)
update.message.reply_text(response)
def error(update,context):
print(f"Update {update} caused error {context.error}")
def main():
updater=Updater(keys.API_KEY,use_context=True)
dp=updater.dispatcher
dp.add_handler(CommandHandler("start",start_command))
dp.add_handler(RegexHandler("^(/info_[\w]+)$", info))
dp.add_handler(CommandHandler("help",help_command))
dp.add_handler(MessageHandler(Filters.text,handle_message))
dp.add_error_handler(error)
updater.start_polling()
updater.idle()
main() | {"/main.py": ["/Responses.py"]} |
70,872 | Ramalingasamy012/AccountFinderBot | refs/heads/main | /Responses.py | def sample_response(input_text):
user_message=str(input_text).lower()
if user_message in ("hello","hi"):
return "Hey! Hello"
if user_message in ("who are you?","who are you"):
return "I am an BOT created By Ramalingasamy M K (Security Researcher)"
if user_message in ("creator?"):
return "Ramalingsamy M K"
| {"/main.py": ["/Responses.py"]} |
71,053 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/migrations/0004_auto_20200625_1646.py | # Generated by Django 3.0.7 on 2020-06-25 11:16
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('customer', '0003_auto_20200625_1315'),
]
operations = [
migrations.AlterField(
model_name='product',
name='slug',
field=autoslug.fields.AutoSlugField(default=9, editable=False, populate_from='farm', unique=True),
),
]
| {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,054 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/admin.py | from django.contrib import admin
from .models import Account,Profile,Product,Farm,Order,OrderItem,Address
# Register your models here.
admin.site.register(Account)
admin.site.register(Profile)
admin.site.register(Product)
admin.site.register(Farm)
admin.site.register(Order)
admin.site.register(OrderItem)
admin.site.register(Address) | {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,055 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/migrations/0003_auto_20200625_1315.py | # Generated by Django 3.0.7 on 2020-06-25 07:45
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('customer', '0002_auto_20200623_0932'),
]
operations = [
migrations.AlterField(
model_name='product',
name='slug',
field=autoslug.fields.AutoSlugField(default=12, editable=False, populate_from='farm', unique=True),
),
]
| {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,056 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/migrations/0001_initial.py | # Generated by Django 3.0.7 on 2020-06-21 05:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('email', models.EmailField(max_length=60, unique=True, verbose_name='email')),
('username', models.CharField(max_length=30, unique=True)),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('last_login', models.DateTimeField(auto_now=True, verbose_name='last login')),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Farm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('farm_name', models.CharField(max_length=50, unique=True)),
('pin_code', models.IntegerField()),
('address', models.TextField(max_length=50)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('contact', models.IntegerField(null=True)),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.FloatField()),
('category', models.CharField(choices=[('Fr_Ap', 'Fruits:Apple'), ('Fr_Mn', 'Fruits:Mango'), ('Fr_Or', 'Fruits:Orange'), ('Fr_Co', 'Fruits:Cocumber'), ('Vg_Ld', 'Vegetable:LadyFinger'), ('Vg_Tt', 'Vegetable:Tomato'), ('Vg_Pt', 'Vegetable:Patato'), ('gr', 'Grains'), ('ct', 'Cotton'), ('rw', 'Raw Material'), ('sp', 'spices')], max_length=6)),
('farm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customer.Farm')),
],
),
]
| {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,057 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/urls.py | from django.urls import path
from . import views
from .views import ProductView,ProductDetailView,CartView,OrderDetailView
urlpatterns = [
path('',views.home_page, name="home"),
path('working/',views.working,name="working"),
path('register/',views.registration_view,name="register"),
path('logout/',views.logout_view,name="logout"),
path('login/',views.login_view,name="log-in"),
path('accounts/login/',views.login_view,name="log-in"),
path('profile/',views.profile_view,name='profile'),
path('update-profile/',views.update_profile,name="update-profile"),
path('product/',ProductView.as_view(),name="product"),
path('product/<slug:slug>/',ProductDetailView.as_view(),name="product-detail"),
path('cart/',views.CartView,name="cart"),
path('add-to-cart/<slug:slug>/',views.add_to_cart,name='add-to-cart'),
path('remove-from-cart/<slug:slug>/',views.remove_from_cart,name='remove-from-cart'),
path('remove-single-item-from-cart/<slug:slug>/',views.remove_single_item_from_cart,name='remove-single-from-cart'),
path('checkout/',views.CheckoutView,name='checkout'),
path('order/',OrderDetailView.as_view(),name='order-detail-view'),
path('contact/',views.contact_view,name='contact-view')
#path('profile/update-profile/',views.update_profile,name="update_profile"),
]
| {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,058 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/migrations/0005_auto_20200630_0635.py | # Generated by Django 3.0.7 on 2020-06-30 01:05
import autoslug.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('customer', '0004_auto_20200625_1646'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street_address', models.CharField(max_length=100)),
('village_name', models.CharField(max_length=100)),
('pincode', models.IntegerField()),
('default', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Addresses',
},
),
migrations.CreateModel(
name='Coupon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=15)),
('amount', models.FloatField()),
],
),
migrations.AlterField(
model_name='product',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='farm', unique=True),
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ordered', models.BooleanField(default=False)),
('quantity', models.IntegerField(default=1)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customer.Product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ref_code', models.CharField(blank=True, max_length=20, null=True)),
('start_date', models.DateTimeField(auto_now_add=True)),
('ordered_date', models.DateTimeField()),
('ordered', models.BooleanField(default=False)),
('being_delivered', models.BooleanField(default=False)),
('received', models.BooleanField(default=False)),
('address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='shipping_address', to='customer.Address')),
('coupon', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='customer.Coupon')),
('items', models.ManyToManyField(to='customer.OrderItem')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,059 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/migrations/0002_auto_20200623_0932.py | # Generated by Django 3.0.7 on 2020-06-23 04:02
import autoslug.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='slug',
field=autoslug.fields.AutoSlugField(default=88, editable=False, populate_from='farm', unique=True),
),
migrations.AlterField(
model_name='farm',
name='farm_name',
field=models.CharField(max_length=15, unique=True),
),
migrations.AlterField(
model_name='product',
name='category',
field=models.CharField(choices=[('Fruit:Apple', 'Fruit:Apple'), ('Fruit:Mango', 'Fruit:Mango'), ('Fruit:Orange', 'Fruit:Orange'), ('Fruit:Cocumber', 'Fruit:Cocumber'), ('Vegetable:LadyFinger', 'Vegetable:LadyFinger'), ('Vegetable:Tomato', 'Vegetable:Tomato'), ('Vegetable:Patato', 'Vegetable:Patato'), ('Grain', 'Grain'), ('Cotton', 'Cotton'), ('Raw Material', 'Raw Material'), ('Spices', 'Spices')], max_length=20),
),
]
| {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,060 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/models.py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from autoslug import AutoSlugField
from random import randint as rd
from django.urls import reverse
import falling_fruits.settings as settings
class MyAccountManager(BaseUserManager):
def create_user(self, email, username, password=None):
if not email:
raise ValueError('Users must have an email address')
if not username:
raise ValueError('Users must have a username')
user = self.model(email=self.normalize_email(email),
username=username,)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, username, password):
user = self.create_user(email=self.normalize_email(email),
password=password,
username=username,)
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class Account(AbstractBaseUser):
email = models.EmailField(verbose_name="email", max_length=60, unique=True)
username = models.CharField(max_length=30, unique=True)
date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True)
last_login = models.DateTimeField(verbose_name='last login', auto_now=True)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
objects = MyAccountManager()
def __str__(self):
return self.email
# For checking permissions. to keep it simple all admin have ALL permissons
def has_perm(self, perm, obj=None):
return self.is_admin
# Does this user have permission to view this app? (ALWAYS YES FOR
# SIMPLICITY)
def has_module_perms(self, app_label):
return True
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
contact = models.IntegerField(null=True)
image = models.ImageField(default='default.jpg', upload_to='profile_pics')
def __str__(self):
return f'{self.user.username} Profile'
class Farm(models.Model):
farm_name = models.CharField(max_length=15,unique=True,null=False)
pin_code = models.IntegerField()
address = models.TextField(max_length=50)
def __str__(self):
return f'{self.farm_name}'
CATEGORY_CHOICES = (('Fruit:Apple','Fruit:Apple'),
('Fruit:Mango','Fruit:Mango'),
('Fruit:Orange','Fruit:Orange'),
('Fruit:Cocumber','Fruit:Cocumber'),
('Vegetable:LadyFinger','Vegetable:LadyFinger'),
('Vegetable:Tomato','Vegetable:Tomato'),
('Vegetable:Patato','Vegetable:Patato'),
('Grain','Grain'),
('Cotton','Cotton'),
('Raw Material','Raw Material'),
('Spices','Spices'),)
class Product(models.Model):
price = models.FloatField()
category = models.CharField(max_length=20,choices=CATEGORY_CHOICES)
farm = models.ForeignKey(Farm,on_delete=models.CASCADE)
slug = AutoSlugField(populate_from='farm', unique=True)
#get_absolute_url is used to take us on the product detail page
def get_absolute_url(self):
return reverse("customer:product-detail", kwargs={
'slug': self.slug
})
def get_add_to_cart_url(self):
return reverse("customer:add-to-cart", kwargs={
'slug': self.slug
})
def get_remove_from_cart_url(self):
return reverse("customer:remove-from-cart", kwargs={
'slug': self.slug
})
class OrderItem(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
ordered = models.BooleanField(default=False)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
quantity = models.IntegerField(default=1)
def __str__(self):
return f"{self.quantity} of {self.product.category}"
def get_total_item_price(self):
return self.quantity * self.product.price
class Order(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
ref_code = models.CharField(max_length=20, blank=True, null=True)
items = models.ManyToManyField(OrderItem)
start_date = models.DateTimeField(auto_now_add=True)
ordered_date = models.DateTimeField()
ordered = models.BooleanField(default=False)
address = models.OneToOneField('Address', related_name='shipping_address', on_delete=models.SET_NULL, blank=True, null=True)
#payment = models.ForeignKey('Payment', on_delete=models.SET_NULL, blank=True, null=True)
being_delivered = models.BooleanField(default=False)
received = models.BooleanField(default=False)
coupon = models.ForeignKey('Coupon', on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return self.user.username
def get_total(self):
total = 0
for order_item in self.items.all():
total += order_item.get_total_item_price()
if self.coupon:
total -= self.coupon.amount
return total
def set_order(self):
self.ordered=True
DELIVERY_SLOTS=(
('Morning','Morning(6AM-10AM)'),
('Evening','Evening(4PM-8PM)')
)
class Address(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
name=models.CharField(max_length=100)
street_address = models.CharField(max_length=100)
village_name = models.CharField(max_length=100)
pincode = models.IntegerField()
default = models.BooleanField(verbose_name="Set as Default Address",default=False)
contact_number=models.IntegerField()
delivery_time_slot=models.CharField(max_length=20,choices=DELIVERY_SLOTS)
slug = AutoSlugField(populate_from='village_name', unique=True)
def __str__(self):
return self.slug
class Meta:
verbose_name_plural = 'Addresses'
class Coupon(models.Model):
code = models.CharField(max_length=15)
amount = models.FloatField()
def __str__(self):
return self.code
| {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,061 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/migrations/0007_auto_20200706_1703.py | # Generated by Django 3.0.7 on 2020-07-06 11:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('customer', '0006_auto_20200705_1748'),
]
operations = [
migrations.AlterField(
model_name='order',
name='address',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='shipping_address', to='customer.Address'),
),
]
| {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,062 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/migrations/0006_auto_20200705_1748.py | # Generated by Django 3.0.7 on 2020-07-05 12:18
import autoslug.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0005_auto_20200630_0635'),
]
operations = [
migrations.AddField(
model_name='address',
name='contact_number',
field=models.IntegerField(default=123),
preserve_default=False,
),
migrations.AddField(
model_name='address',
name='name',
field=models.CharField(default='Ankit', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='address',
name='slug',
field=autoslug.fields.AutoSlugField(default=121233, editable=False, populate_from='village_name', unique=True),
preserve_default=False,
),
migrations.AlterField(
model_name='address',
name='default',
field=models.BooleanField(default=False, verbose_name='Set as Default Address'),
),
]
| {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,063 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/views.py | from django.shortcuts import render, redirect,get_object_or_404
from django.urls import reverse
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.decorators import login_required
from customer.forms import RegistrationForm, AccountAuthenticationForm,UserUpdateForm,ProfileUpdateForm,AddressForm
from django.contrib import messages
from .models import Product,OrderItem,Order,Address
from django.views.generic import ListView, DetailView
from django.utils import timezone
from django.contrib.auth.mixins import LoginRequiredMixin
def home_page(request):
return render(request,'customer/home.html')
def contact_view(request):
return render(request,'customer/contact.html')
def working(request):
return render(request,'customer/working-model.html')
def registration_view(request):
context = {}
if request.POST:
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
email = form.cleaned_data.get('email')
raw_password = form.cleaned_data.get('password1')
return render(request,'customer/register-success.html')
else:
context['registration_form'] = form
else:
form = RegistrationForm()
context['registration_form'] = form
return render(request, 'customer/register.html', context)
def logout_view(request):
logout(request)
return redirect('customer:home')
def login_view(request):
context = {}
user = request.user
if user.is_authenticated:
return redirect("customer:home")
if request.POST:
form = AccountAuthenticationForm(request.POST)
if form.is_valid():
email = request.POST['email']
password = request.POST['password']
user = authenticate(email=email, password=password)
if user:
login(request, user)
return redirect("customer:home")
else:
form = AccountAuthenticationForm()
context['login_form'] = form
# print(form)
return render(request, "customer/login.html", context)
def must_authenticate_view(request):
return render(request, 'account/must_authenticate.html', {})
@login_required
def profile_view(request):
context={}
context['email']=request.user.email
context['username']= request.user.username
context['f_name']=request.user.profile.first_name
context['l_name']=request.user.profile.last_name
context['contact']=request.user.profile.contact
context['image']=request.user.profile.image
return render(request,"customer/profile.html",context)
@login_required
def update_profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('customer:profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'customer/update-profile.html', context)
@login_required
def CartView(request):
context={}
order_qs=Order.objects.filter(user=request.user,ordered=False)
if order_qs.exists():
order = order_qs[0]
if (order.items.exists()):
context['orders']=order
print(order.items)
print(context)
return render(request,'customer/cart.html',context)
@login_required
def CheckoutView(request):
context={}
order_qs=Order.objects.filter(user=request.user,ordered=False)
if order_qs.exists():
order = order_qs[0]
context['orders']=order
if request.POST:
form=AddressForm(request.POST)
if form.is_valid():
c_name = form.cleaned_data.get('name')
street_addr=form.cleaned_data.get('street_address')
village=form.cleaned_data.get('village_name')
pin_code=form.cleaned_data.get('pincode')
contact=form.cleaned_data.get('contact_number')
default_addr=form.cleaned_data.get('default')
time_slot = form.cleaned_data.get('delivery_time_slot')
address_obj=Address(
user=request.user,
name=c_name,
street_address=street_addr,
village_name=village,
contact_number=contact,
pincode=pin_code,
default=default_addr,
delivery_time_slot = time_slot
)
address_obj.save()
Order.objects.filter(user=request.user,ordered=False).update(address=address_obj)
order_items = order.items.all()
order_items.update(ordered=True)
for item in order_items:
item.save()
Order.objects.filter(user=request.user,ordered=False).update(ordered=True)
return redirect('customer:order-detail-view',)
else:
form=AddressForm()
context['form']=form
else:
context={}
return render(request,'customer/checkout.html',context)
class OrderDetailView(ListView):
template_name='customer/order-detail.html'
context_object_name = 'orders'
model=Order
def get_queryset(self):
return Order.objects.filter(user=self.request.user,ordered=True)
class ProductView(ListView):
template_name='customer/product-list.html'
context_object_name = 'product'
paginate_by = 12
model=Product
class ProductDetailView(DetailView):
model = Product
template_name='customer/product-detail.html'
context_object_name = 'product'
def get_object(self, **kwargs):
return Product.objects.get(slug=self.kwargs['slug'])
@login_required
def add_to_cart(request, slug):
item = get_object_or_404(Product, slug=slug)
order_item, created = OrderItem.objects.get_or_create(
product=item,
user=request.user,
ordered=False
)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(product__slug=item.slug).exists():
order_item.quantity += 1
order_item.save()
#messages.info(request, "This item quantity was updated.")
return redirect("customer:cart")
else:
order.items.add(order_item)
messages.info(request, "This item was added to your cart.")
return redirect("customer:product")
else:
ordered_date = timezone.now()
order = Order.objects.create(
user=request.user, ordered_date=ordered_date)
order.items.add(order_item)
messages.info(request, "This item was added to your cart.")
return redirect("customer:product")
@login_required
def remove_from_cart(request, slug):
item = get_object_or_404(Product, slug=slug)
order_qs = Order.objects.filter(
user=request.user,
ordered=False
)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(product__slug=item.slug).exists():
order_item = OrderItem.objects.filter(
product=item,
user=request.user,
ordered=False
)[0]
order.items.remove(order_item)
order_item.delete()
#messages.info(request, "This item was removed from your cart.")
return redirect("customer:cart")
else:
#messages.info(request, "This item was not in your cart")
return redirect("customer:cart")
else:
messages.info(request, "You do not have an active order")
return redirect("customer:cart")
@login_required
def remove_single_item_from_cart(request, slug):
item = get_object_or_404(Product, slug=slug)
order_qs = Order.objects.filter(
user=request.user,
ordered=False
)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(product__slug=item.slug).exists():
order_item = OrderItem.objects.filter(
product=item,
user=request.user,
ordered=False
)[0]
if order_item.quantity > 1:
order_item.quantity -= 1
order_item.save()
else:
order.items.remove(order_item)
messages.info(request, "This item quantity was updated.")
return redirect("customer:cart")
else:
messages.info(request, "This item was not in your cart")
return redirect("customer:cart", slug=slug)
else:
messages.info(request, "You do not have an active order")
return redirect("customer:cart", slug=slug)
| {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,064 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/templatetags/cust_filters.py | from django import template
register=template.Library()
@register.filter(name='splitcat')
def splitcateogory(category):
return str(category.split(":")[1])+".jpg"
@register.filter(name='getcategory')
def getcategory(string):
x=string
return [str(x.split(':')[0]).capitalize(),str(string.split(':')[1]).capitalize()]
@register.filter(name='get_item_name')
def get_item_name(string):
return str(string.split(':')[1]).capitalize()
@register.filter(name='delivery_status')
def delivery_status(string):
if string:
return "Delivered"
else:
return "On the Way" | {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,065 | ankit-kumar-cs/Falling-Fruits-Major-Project | refs/heads/master | /customer/migrations/0008_address_delivery_time_slot.py | # Generated by Django 3.0.7 on 2020-07-07 17:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0007_auto_20200706_1703'),
]
operations = [
migrations.AddField(
model_name='address',
name='delivery_time_slot',
field=models.CharField(choices=[('Morning', 'Morning(6AM-10AM)'), ('Evening', 'Evening(4PM-8PM)')], default='Morning(6AM-10AM)', max_length=20),
preserve_default=False,
),
]
| {"/customer/admin.py": ["/customer/models.py"], "/customer/urls.py": ["/customer/views.py"], "/customer/views.py": ["/customer/models.py"]} |
71,066 | Xujan24/Cifar-10-Classifier | refs/heads/master | /test_model.py | import torch
import numpy as np
from utils import unpickle
from model import CNNModel
def main():
trained_model = './trained_model.pth'
test_batch_dir = './cifar-10/test_batch'
classifier = CNNModel()
classifier.load_state_dict(torch.load(trained_model))
classifier.cuda()
classifier.eval()
test_x, test_y = unpickle(test_batch_dir)
test_x, test_y = torch.tensor(np.reshape(test_x, (len(test_x), 3, 32, 32))).to('cuda', dtype=torch.float), torch.tensor(test_y).cuda()
classes = ['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck']
# calculating the accuracy of our classifier;
print("Calculating accuracy...")
correct = 0
total = len(test_x)
with torch.no_grad():
out = classifier(test_x)
_, predicted = torch.max(out, 1)
# calculate the total accuracy
correct += (predicted == test_y).sum().item()
print('Accuracy: %5d %%' % (correct / total * 100))
if __name__ == '__main__':
main()
| {"/test_model.py": ["/utils.py", "/model.py"], "/load_data.py": ["/utils.py"], "/train_model.py": ["/load_data.py", "/model.py"]} |
71,067 | Xujan24/Cifar-10-Classifier | refs/heads/master | /utils.py | import pickle
# Function to unpack the cifar-10 dataset.
def unpickle(file):
with open(file, 'rb') as file:
data = pickle.load(file, encoding='bytes')
return data[b'data'], data[b'labels'] | {"/test_model.py": ["/utils.py", "/model.py"], "/load_data.py": ["/utils.py"], "/train_model.py": ["/load_data.py", "/model.py"]} |
71,068 | Xujan24/Cifar-10-Classifier | refs/heads/master | /load_data.py | import os
from torch.utils.data import Dataset
from utils import unpickle
class LoadTrainingData(Dataset):
def __init__(self):
self.trainX = []
self.trainY = []
data_dir = './cifar-10/training batches'
batches = os.listdir(data_dir)
for batch in batches:
batch_data, batch_labels = unpickle(os.path.join(data_dir, batch))
self.trainX.extend(batch_data)
self.trainY.extend(batch_labels)
def __getitem__(self, item):
return self.trainX[item], self.trainY[item]
def __len__(self):
return len(self.trainX)
| {"/test_model.py": ["/utils.py", "/model.py"], "/load_data.py": ["/utils.py"], "/train_model.py": ["/load_data.py", "/model.py"]} |
71,069 | Xujan24/Cifar-10-Classifier | refs/heads/master | /model.py | import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
class CNNModel(nn.Module):
def __init__(self):
super(CNNModel, self).__init__()
self.layers = nn.Sequential(OrderedDict([
('layer1', nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(num_features=32),
nn.MaxPool2d(kernel_size=2, stride=2)
)),
('layer2', nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(num_features=64),
nn.MaxPool2d(kernel_size=2, stride=2)
)),
('fc1', nn.Sequential(
nn.Linear(in_features=8 * 8 * 64, out_features=1024),
nn.ReLU()
))
]))
self.out = nn.Linear(in_features=1024, out_features=10)
def forward(self, x):
for name, module in self.layers.named_children():
x = module(x)
if name == 'layer2':
x = x.view(x.size(0), -1)
x = self.out(x)
return F.softmax(x, dim=1)
| {"/test_model.py": ["/utils.py", "/model.py"], "/load_data.py": ["/utils.py"], "/train_model.py": ["/load_data.py", "/model.py"]} |
71,070 | Xujan24/Cifar-10-Classifier | refs/heads/master | /train_model.py | import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from load_data import LoadTrainingData
from model import CNNModel
def train_model(model, data, epoch, batch_size):
# define the loss function and back propagation algorithm
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.2, momentum=0.9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.01)
for e in range(epoch):
for param_group in optimizer.param_groups:
lr = param_group['lr']
print('[EPOCH: %d, Learning Rate: %f]' % (e+1, lr))
print()
for i, dataset in enumerate(data):
inputs, lbl = dataset
inputs, lbl = inputs.view(batch_size, 3, 32, 32).to('cuda', dtype=torch.float), lbl.view(-1).to('cuda')
if torch.cuda.is_available():
inputs, lbl = inputs.cuda(), lbl.cuda()
# set the gradient for each parameters zero
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, lbl)
loss.backward()
optimizer.step()
print('-[step: %d, loss: %f]' % (i+1, loss.item()))
scheduler.step()
print ('Finished Training')
if __name__ == '__main__':
cnn = CNNModel()
batch = 2000
if torch.cuda.is_available():
cnn.cuda()
trainingDataset = LoadTrainingData()
dataLoader = DataLoader(
dataset=trainingDataset,
batch_size=batch,
shuffle=True,
num_workers=2
)
train_model(cnn, dataLoader, epoch=40, batch_size=batch)
# save model
torch.save(cnn.state_dict(), './trained_model.pth')
| {"/test_model.py": ["/utils.py", "/model.py"], "/load_data.py": ["/utils.py"], "/train_model.py": ["/load_data.py", "/model.py"]} |
71,096 | yousofaly/Water-Body-Segmentation | refs/heads/main | /train.py | from datetime import date
import processing
import generator
from models import unet
from datetime import date
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, CSVLogger, EarlyStopping
image_paths, mask_pahts = processing.get_paths()
#train,validate,test(evaluate) split
t,v,e = 0.7,0.2,0.1
tstart, tstop = 0, int(len(image_paths)*t)
vstart, vstop = tstop, int(len(image_paths)*(t+v))
estart, estop = vstop, len(image_paths)
#define train and val data generator
n_classes = 1
(ih,iw) = (256,256) #define desired input_shape
batch_size = 16
n_epochs = 100
train_gen = generator(image_paths, mask_paths, ih,iw, start = tstart, stop = tstop, bs = batch_size)
val_gen = generator(image_paths, mask_paths, ih,iw, start = vstart, stop = vstop, bs = batch_size)
#define model parameters (new)
opt = Adam(lr = 1e-5)
loss = processing.dice_coef_loss
metrics = processing.dice_coef
#make a directory to save result
d = str(date.today())
experiment_name = 'experiment name' #set experiment name
save_folder = os.path.join('results',d,experiment_name)
if not os.path.exists(save_folder):
os.makedirs(save_folder)
#deifne call backs
checkpointer = ModelCheckpoint(os.path.join(save_folder,'best_model.h5'),
monitor = 'val_loss', verbose = 1, save_best_only = True)
csv_logger = CSVLogger(os.path.join(save_folder,'log.csv'), append=True, separator=';')
early_stopper = EarlyStopping(monitor = 'val_loss', patience = 10)
callbacks_list = [checkpointer, csv_logger, early_stopper]
#build model and compile
model = unet(input_shape=(ih,iw,3))
model.compile(loss = loss, optimizer = opt)
history = model.fit(train_gen, epochs = n_epochs,
steps_per_epoch = (tstop // batch_size),
validation_data = val_gen,
validation_steps = ((vstop - vstart) // batch_size),
callbacks = callbacks_list, verbose = 1)
| {"/train.py": ["/processing.py", "/generator.py", "/models.py"]} |
71,097 | yousofaly/Water-Body-Segmentation | refs/heads/main | /processing.py | import cv2
import os
import keras.backend as K
#define cropping function
def crop(path, mode = 'dims'):
# Read the image, convert it into grayscale, and make in binary image for threshold value of 1.
img = cv2.imread(path,0)
# use binary threshold, all pixel that are beyond 3 are made white
_, thresh = cv2.threshold(img, 3, 255, cv2.THRESH_BINARY)
# Now find contours in it.
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# get contours with highest height
lst_contours = []
for cnt in contours:
ctr = cv2.boundingRect(cnt)
lst_contours.append(ctr)
x,y,w,h = sorted(lst_contours, key=lambda coef: coef[3])[-1]
#get new image shape
if mode == 'dims':
return (x,y,w,h)
#get cropped image
else:
return(cv2.imread(path)[y:y+h,x:x+w,:])
| {"/train.py": ["/processing.py", "/generator.py", "/models.py"]} |
71,098 | yousofaly/Water-Body-Segmentation | refs/heads/main | /dice.py | import keras.backend as K
smooth = 1.
#dice coefficient
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
#dice loss
def dice_coef_loss(y_true, y_pred):
return (1-dice_coef(y_true, y_pred)
| {"/train.py": ["/processing.py", "/generator.py", "/models.py"]} |
71,099 | yousofaly/Water-Body-Segmentation | refs/heads/main | /generator.py | import cv2
import numpy as np
#image generator from files df
def generator(images, masks, ih = 1024, iw = 1024,
start = 0, stop = 4, bs = 32, aug = None,
interp = cv2.INTER_NEAREST, rs = 255):
image_array = []
mask_array = []
while True:
for i in range(start,stop):
image_array.append(cv2.resize(cv2.imread(images[i]), (ih,iw), interpolation = interp) / rs)
mask_array.append(cv2.resize(cv2.imread(masks[i]), (ih,iw), interpolation = interp) / rs)
if len(mask_array) == bs:
if aug is not None:
#to do implement augmentation
pass
yield np.array(image_array), np.array(mask_array)
image_array = []
mask_array = []
| {"/train.py": ["/processing.py", "/generator.py", "/models.py"]} |
71,100 | yousofaly/Water-Body-Segmentation | refs/heads/main | /models.py | from keras.models import Model
from keras.layers import *
def unet(n_calsses = 1, input_shape = (512,512,3)):
inputs = Input(input_shape)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(inputs = inputs, outputs = conv10)
return model
def unet_skinny(img_size, num_classes):
inputs = Input(shape=img_size + (3,))
### [First half of the network: downsampling inputs] ###
# Entry block
x = Conv2D(32, 3, strides=2, padding="same")(inputs)
x = BatchNormalization()(x)
x = Activation("relu")(x)
previous_block_activation = x # Set aside residual
# Blocks 1, 2, 3 are identical apart from the feature depth.
for filters in [64, 128, 256]:
x = Activation("relu")(x)
x = SeparableConv2D(filters, 3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = SeparableConv2D(filters, 3, padding="same")(x)
x = BatchNormalization()(x)
x = MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = Conv2D(filters, 1, strides=2, padding="same")(previous_block_activation)
x = add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
### [Second half of the network: upsampling inputs] ###
for filters in [256, 128, 64, 32]:
x = Activation("relu")(x)
x = Conv2DTranspose(filters, 3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2DTranspose(filters, 3, padding="same")(x)
x = BatchNormalization()(x)
x = UpSampling2D(2)(x)
# Project residual
residual = UpSampling2D(2)(previous_block_activation)
residual = Conv2D(filters, 1, padding="same")(residual)
x = add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
# Add a per-pixel classification layer
outputs = Conv2D(num_classes, 3, activation="softmax", padding="same")(x)
# Define the model
model = Model(inputs, outputs)
return model
| {"/train.py": ["/processing.py", "/generator.py", "/models.py"]} |
71,101 | yousofaly/Water-Body-Segmentation | refs/heads/main | /predict.py | import os
import matplotlib.pyplot as plt
import cv2
#function to open image, predict and display the original image, true mask and the predicted mask
image_folder = os.path.join('Data','Images')
mask_folder = os.path.join('Data','Masks')
def plot(array, title):
plt.figure()
plt.imshow(array)
plt.title(title)
plt.axis('off')
def evaluate_prediction(image_name, model, ih = 256, iw = 256,
mask_folder = mask_folder, image_folder = image_folder):
#define image array to predict and true mask
test_image = cv2.resize(cv2.imread(os.path.join(image_folder, image_name)), (ih,iw), interpolation = cv2.INTER_NEAREST)
test_mask = cv2.imread(os.path.join(mask_folder, image_name))[:,:,0]
output_shape = test_mask.shape[::-1]
#predict on image
prediction = model.predict(np.expand_dims(test_image, axis = 0))
#display image, true mask and prediction
plot(cv2.imread(os.path.join(image_folder, image_name)), 'Image')
plot(test_mask, 'True Mask')
plot(cv2.resize(prediction[0,:,:,0], (output_shape)), 'Prediction'
| {"/train.py": ["/processing.py", "/generator.py", "/models.py"]} |
71,105 | ErwinJunge/system-monitor | refs/heads/master | /measure.py | #!/usr/bin/env python3
from sqlalchemy.orm import sessionmaker
from models import (
Base,
Measurement,
CPUUtilization,
CPUStats,
MemoryStats,
SwapStats,
DiskStats,
Process,
ProcessMemory,
ProcessIONice,
ProcessIOCounters,
ProcessContextSwitches,
)
from engine import engine
import psutil
from time import sleep
def processes(measurement):
for proc in psutil.process_iter(attrs=['cpu_percent']):
pass
sleep(1)
attrs = [
'pid',
'name',
'exe',
'cmdline',
'username',
'nice',
'ionice',
'io_counters',
'num_ctx_switches',
'num_fds',
'num_threads',
'cpu_percent',
'memory_full_info',
'status',
]
for item in psutil.process_iter(attrs=attrs):
process = Process(
measurement=measurement,
pid=item.info['pid'],
name=item.info['name'],
exe=item.info['exe'],
cmdline=' '.join(item.info['cmdline']),
username=item.info['username'],
nice=item.info['nice'],
num_fds=item.info['num_fds'],
num_threads=item.info['num_threads'],
cpu_percent=item.info['cpu_percent'],
status=item.info['status'],
)
if item.info['memory_full_info'] is not None:
ProcessMemory(
process=process,
rss=item.info['memory_full_info'].rss,
vms=item.info['memory_full_info'].vms,
shared=item.info['memory_full_info'].shared,
text=item.info['memory_full_info'].text,
lib=item.info['memory_full_info'].lib,
data=item.info['memory_full_info'].data,
dirty=item.info['memory_full_info'].dirty,
uss=item.info['memory_full_info'].uss,
pss=item.info['memory_full_info'].pss,
swap=item.info['memory_full_info'].swap,
)
ProcessIONice(
process=process,
ioclass=int(item.info['ionice'].ioclass),
value=item.info['ionice'].value,
)
if item.info['io_counters'] is not None:
ProcessIOCounters(
process=process,
read_count=item.info['io_counters'].read_count,
write_count=item.info['io_counters'].write_count,
read_bytes=item.info['io_counters'].read_bytes,
write_bytes=item.info['io_counters'].write_bytes,
read_chars=item.info['io_counters'].read_chars,
write_chars=item.info['io_counters'].write_chars,
)
ProcessContextSwitches(
process=process,
voluntary=item.info['num_ctx_switches'].voluntary,
involuntary=item.info['num_ctx_switches'].involuntary,
)
def disk_stats(measurement):
stats = psutil.disk_io_counters(perdisk=True)
return [
DiskStats(
measurement=measurement,
partition_id=partition_id,
read_count=specifics.read_count,
write_count=specifics.write_count,
read_bytes=specifics.read_bytes,
write_bytes=specifics.write_bytes,
read_time=specifics.read_time,
write_time=specifics.write_time,
read_merged_count=specifics.read_merged_count,
write_merged_count=specifics.write_merged_count,
busy_time=specifics.busy_time,
)
for partition_id, specifics
in stats.items()
]
def swap_stats(measurement):
stats = psutil.swap_memory()
return [
SwapStats(
measurement=measurement,
total=stats.total,
used=stats.used,
sin=stats.sin,
sout=stats.sout,
)
]
def memory_stats(measurement):
stats = psutil.virtual_memory()
return [
MemoryStats(
measurement=measurement,
total=stats.total,
available=stats.available,
)
]
def cpu_stats(measurement):
stats = psutil.cpu_stats()
return [
CPUStats(
measurement=measurement,
ctx_switches=stats.ctx_switches,
interrupts=stats.interrupts,
soft_interrupts=stats.soft_interrupts,
syscalls=stats.syscalls,
)
]
def cpu_utilization(measurement):
psutil.cpu_percent(interval=None, percpu=True)
psutil.cpu_times_percent(interval=None, percpu=True)
sleep(1)
return [
CPUUtilization(
measurement=measurement,
index=index,
total=total,
user=specific.user,
nice=specific.nice,
system=specific.system,
idle=specific.idle,
iowait=specific.iowait,
irq=specific.irq,
softirq=specific.softirq,
steal=specific.steal,
guest=specific.guest,
guest_nice=specific.guest_nice,
)
for index, (total, specific)
in enumerate(
zip(
psutil.cpu_percent(interval=None, percpu=True),
psutil.cpu_times_percent(interval=None, percpu=True)
)
)
]
def create_measurement(Session):
session = Session()
measurement = Measurement()
cpu_utilization(measurement)
cpu_stats(measurement)
memory_stats(measurement)
swap_stats(measurement)
disk_stats(measurement)
processes(measurement)
session.add(measurement)
session.commit()
def show_measurements(Session):
session = Session()
for instance in session.query(Measurement).order_by(Measurement.id):
print(
instance.id,
instance.created_at,
instance.cpu_utilizations,
instance.cpu_stats,
instance.memory_stats,
instance.swap_stats,
instance.disk_stats,
instance.processes,
)
def run():
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
while True:
create_measurement(Session)
sleep(5)
if __name__ == '__main__':
run()
| {"/measure.py": ["/models.py", "/engine.py"], "/charts.py": ["/models.py", "/engine.py"]} |
71,106 | ErwinJunge/system-monitor | refs/heads/master | /models.py | from sqlalchemy import (
Column,
Integer,
DateTime,
ForeignKey,
String,
Numeric,
)
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.sql import func
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
Base = declarative_base(cls=Base)
class Measurement(Base):
created_at = Column(DateTime, default=func.now())
cpu_utilizations = relationship('CPUUtilization', back_populates='measurement')
cpu_stats = relationship('CPUStats', uselist=False, back_populates='measurement')
memory_stats = relationship('MemoryStats', uselist=False, back_populates='measurement')
swap_stats = relationship('SwapStats', uselist=False, back_populates='measurement')
disk_stats = relationship('DiskStats', back_populates='measurement')
processes = relationship('Process', back_populates='measurement')
class CPUUtilization(Base):
measurement_id = Column(Integer, ForeignKey('measurement.id'))
measurement = relationship('Measurement', back_populates='cpu_utilizations')
index = Column(Integer)
total = Column(Integer)
user = Column(Integer)
nice = Column(Integer)
system = Column(Integer)
idle = Column(Integer)
iowait = Column(Integer)
irq = Column(Integer)
softirq = Column(Integer)
steal = Column(Integer)
guest = Column(Integer)
guest_nice = Column(Integer)
def __repr__(self):
return '{}: {}, {}'.format(self.index, self.total, self.idle)
class CPUStats(Base):
measurement_id = Column(Integer, ForeignKey('measurement.id'))
measurement = relationship('Measurement', back_populates='cpu_stats')
ctx_switches = Column(Integer)
interrupts = Column(Integer)
soft_interrupts = Column(Integer)
syscalls = Column(Integer)
def __repr__(self):
return 'ctx_switches: {}, interrupts: {}, soft_interrupts: {}, syscalls: {}'.format(
self.ctx_switches,
self.interrupts,
self.soft_interrupts,
self.syscalls,
)
class MemoryStats(Base):
measurement_id = Column(Integer, ForeignKey('measurement.id'))
measurement = relationship('Measurement', back_populates='memory_stats')
total = Column(Integer)
available = Column(Integer)
def __repr__(self):
return 'memory available: {}'.format(
self.available,
)
class SwapStats(Base):
measurement_id = Column(Integer, ForeignKey('measurement.id'))
measurement = relationship('Measurement', back_populates='swap_stats')
total = Column(Integer)
used = Column(Integer)
sin = Column(Integer)
sout = Column(Integer)
def __repr__(self):
return 'swap used: {}, sin: {}, sout: {}'.format(
self.used,
self.sin,
self.sout,
)
class DiskStats(Base):
measurement_id = Column(Integer, ForeignKey('measurement.id'))
measurement = relationship('Measurement', back_populates='disk_stats')
partition_id = Column(String)
read_count = Column(Integer)
write_count = Column(Integer)
read_bytes = Column(Integer)
write_bytes = Column(Integer)
read_time = Column(Integer)
write_time = Column(Integer)
read_merged_count = Column(Integer)
write_merged_count = Column(Integer)
busy_time = Column(Integer)
def __repr__(self):
return '{} busy: {}'.format(
self.partition_id,
self.busy_time,
)
class Process(Base):
measurement_id = Column(Integer, ForeignKey('measurement.id'))
measurement = relationship('Measurement', back_populates='processes')
pid = Column(Integer)
name = Column(String)
exe = Column(String)
cmdline = Column(String)
username = Column(String)
nice = Column(Integer)
num_fds = Column(Integer)
num_threads = Column(Integer)
cpu_percent = Column(Numeric(3, 1))
status = Column(String)
memory = relationship('ProcessMemory', uselist=False, back_populates='process')
ionice = relationship('ProcessIONice', uselist=False, back_populates='process')
iocounters = relationship('ProcessIOCounters', uselist=False, back_populates='process')
ctx_switches = relationship('ProcessContextSwitches', uselist=False, back_populates='process')
def __repr__(self):
return '{}, {}, {}, {}, mem: {}, swap: {}'.format(
self.cpu_percent,
self.name,
self.exe,
self.cmdline,
self.memory.rss if self.memory else '?',
self.memory.swap if self.memory else '?',
)
class ProcessMemory(Base):
process_id = Column(Integer, ForeignKey('process.id'))
process = relationship('Process', back_populates='memory')
rss = Column(Integer)
vms = Column(Integer)
shared = Column(Integer)
text = Column(Integer)
lib = Column(Integer)
data = Column(Integer)
dirty = Column(Integer)
uss = Column(Integer)
pss = Column(Integer)
swap = Column(Integer)
class ProcessIONice(Base):
process_id = Column(Integer, ForeignKey('process.id'))
process = relationship('Process', back_populates='ionice')
ioclass = Column(Integer)
value = Column(Integer)
class ProcessIOCounters(Base):
process_id = Column(Integer, ForeignKey('process.id'))
process = relationship('Process', back_populates='iocounters')
read_count = Column(Integer)
write_count = Column(Integer)
read_bytes = Column(Integer)
write_bytes = Column(Integer)
read_chars = Column(Integer)
write_chars = Column(Integer)
class ProcessContextSwitches(Base):
process_id = Column(Integer, ForeignKey('process.id'))
process = relationship('Process', back_populates='ctx_switches')
voluntary = Column(Integer)
involuntary = Column(Integer)
| {"/measure.py": ["/models.py", "/engine.py"], "/charts.py": ["/models.py", "/engine.py"]} |
71,107 | ErwinJunge/system-monitor | refs/heads/master | /engine.py | from sqlalchemy import create_engine
engine = create_engine('sqlite:///data.db')
| {"/measure.py": ["/models.py", "/engine.py"], "/charts.py": ["/models.py", "/engine.py"]} |
71,108 | ErwinJunge/system-monitor | refs/heads/master | /charts.py | #!/usr/bin/env python3
from sqlalchemy import desc
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import func
from models import (
Measurement,
CPUUtilization,
Process,
)
from engine import engine
import pygal
from collections import Counter
def round_to_interval(number, interval):
return (
(interval * int(number / interval))
+ (interval / 2)
)
def get_mode(list_like, bin_size):
binned_list = [
round_to_interval(item, bin_size)
for item in list_like
]
return Counter(binned_list).most_common(1)[0][0]
def chop_list(list_like, step_sizes):
return [
list_like[sum(step_sizes[:i]):sum(step_sizes[:i+1])]
for i in range(len(step_sizes))
]
def get_step_sizes(total_length, number_of_steps):
step_sizes = [int(total_length / number_of_steps)] * number_of_steps
remainder = total_length - sum(step_sizes)
for i in range(remainder):
step_sizes[i] += 1
return step_sizes
def get_measurement_times(session):
times_query = (
session
.query(Measurement.created_at)
.order_by(Measurement.created_at)
)
return [obj.created_at for obj in times_query]
def create_cpu_chart(session, measurement_times, steps):
query = (
session
.query(
Measurement.created_at,
CPUUtilization.index,
CPUUtilization.total,
)
.filter(
Measurement.id == CPUUtilization.measurement_id,
)
)
measurements = {}
for measurement_time, cpu_index, total in query:
cpu_measurements = measurements.setdefault(cpu_index, {})
cpu_measurements[measurement_time] = total
chart = pygal.Line(
x_label_rotation=20,
x_labels_major_count=10,
show_minor_x_labels=False,
show_dots=False,
)
step_sizes = get_step_sizes(len(measurement_times), steps)
mid_points = [
sum(step_sizes[:i]) + int(step_sizes[i]/2)
for i in range(len(step_sizes))
]
chart.x_labels = [
measurement_times[i].isoformat()
for i in mid_points
]
for cpu_index, cpu_measurements in measurements.items():
data = [
cpu_measurements.get(measurement_time, 0)
for measurement_time
in measurement_times
]
chopped_data = chop_list(data, step_sizes)
modes = [
get_mode(part, 2)
for part in chopped_data
]
chart.add(
'{}'.format(cpu_index),
modes,
allow_interruptions=True,
)
chart.render_to_file('./cpu_chart.svg')
chart.render_to_png('./cpu_chart.png')
def create_process_chart(session, measurement_times, steps):
interesting_cmdlines = (
session
.query(Process.cmdline)
.group_by(Process.cmdline)
.order_by(desc(func.sum(Process.cpu_percent)))
.slice(0, 5)
)
process_query = (
session
.query(
Measurement.created_at,
Process.cmdline,
Process.cpu_percent,
)
.filter(
Process.cmdline.in_(interesting_cmdlines),
Measurement.id == Process.measurement_id,
)
)
measurements = {}
for measurement_time, cmdline, cpu_percent in process_query:
cpu_measurements = measurements.setdefault(cmdline, {})
cpu_measurements[measurement_time] = cpu_percent
chart = pygal.Line(
x_label_rotation=20,
x_labels_major_count=10,
show_minor_x_labels=False,
show_dots=False,
legend_at_bottom=True,
legend_at_bottom_columns=1,
)
step_sizes = get_step_sizes(len(measurement_times), steps)
mid_points = [
sum(step_sizes[:i]) + int(step_sizes[i]/2)
for i in range(len(step_sizes))
]
chart.x_labels = [
measurement_times[i].isoformat()
for i in mid_points
]
for cmdline, cpu_measurements in measurements.items():
data = [
cpu_measurements.get(measurement_time, 0)
for measurement_time
in measurement_times
]
chopped_data = chop_list(data, step_sizes)
modes = [
get_mode(part, 2)
for part in chopped_data
]
chart.add(
'{}'.format(cmdline),
modes,
allow_interruptions=True,
)
chart.render_to_file('./process_chart.svg')
chart.render_to_png('./process_chart.png')
def run():
Session = sessionmaker(bind=engine)
session = Session()
measurement_times = get_measurement_times(session)
steps = 400
create_cpu_chart(session, measurement_times, steps)
create_process_chart(session, measurement_times, steps)
if __name__ == '__main__':
run()
| {"/measure.py": ["/models.py", "/engine.py"], "/charts.py": ["/models.py", "/engine.py"]} |
71,126 | ColinMcNeil/DailyProgrammingChallenges | refs/heads/master | /main.py | __name__='Daily Program'
__author__ = 'Colin'
import easygui as gui
import June
#Debug Line 1 to skip menus, 0 for normal
debug=0
if debug==1:
#Put in specific day to skip menu to in format: Month.DayCode()
June.J8()
if debug ==0:
#Menu using easygui to select the day you want to run
year=gui.choicebox('Pick Year',__name__,['2015'])
month='Not Defined'
day='Not Defined'
if year == '2015':
month=gui.choicebox('Pick Month',__name__,['June'])
day=gui.choicebox('Pick Day',__name__,['8'])
if month=='June':
June.June(day) | {"/main.py": ["/June.py"]} |
71,127 | ColinMcNeil/DailyProgrammingChallenges | refs/heads/master | /June.py | __author__ = 'Colin'
__name__='Daily Programs for June'
import easygui as gui
def June(day):
if day == '8':
J8()
#--------JUNE 8TH: PALINDROMES----------#
def J8():
#Make any number into a palendrome.
#URL: http://www.reddit.com/r/dailyprogrammer/comments/38yy9s/20150608_challenge_218_easy_making_numbers/
number=gui.enterbox(title=__name__,msg='Input a number and I will make it a palindrome')
invnumber=int(number[::-1])
number=int(number)
i=1
while True:
number=invnumber+number
invnumber=int(str(number)[::-1])
if invnumber==number:
gui.textbox(title='Output',text=('It took '+str(i)+' iterations. Final #: '+number))
break
i=i+1 | {"/main.py": ["/June.py"]} |
71,393 | gestiweb/llampex-mini | refs/heads/master | /qt4client/masterform.py | # encoding: UTF-8
import os.path, traceback
import logging, imp
from PyQt4 import QtGui, QtCore, uic
def load_module(name, path):
fp = None
module = None
try:
fp, pathname, description = imp.find_module(name,[path])
module = imp.load_module(name, fp, pathname, description)
except Exception:
logging.exception("FATAL: Error trying to load module %s" % (repr(name)))
if fp:
fp.close()
return module
class LlampexMasterForm(QtGui.QWidget):
def __init__(self, project, windowkey, actionobj, prjconn):
QtGui.QWidget.__init__(self)
self.project = project
self.windowkey = windowkey
self.actionobj = actionobj
self.prjconn = prjconn
try:
ui_filepath = self.actionobj.filedir(self.actionobj.master["form"])
self.ui = uic.loadUi(ui_filepath,self)
except Exception:
self.layout = QtGui.QVBoxLayout()
self.layout.addStretch()
label = QtGui.QLabel("FATAL: An error ocurred trying to load the master form:")
self.layout.addWidget(label)
text = QtGui.QTextBrowser()
text.setText(traceback.format_exc())
self.layout.addWidget(text)
self.layout.addStretch()
self.setLayout(self.layout)
return
try:
if "script" in self.actionobj.master:
source_filepath = self.actionobj.filedir(self.actionobj.master["script"])
pathname , sourcename = os.path.split(source_filepath)
self.sourcemodule = load_module(sourcename, pathname)
self.masterscript = self.sourcemodule.MasterScript(self.project, self)
except Exception:
msgBox = QtGui.QMessageBox()
msgBox.setText("FATAL: An error ocurred trying to load the master script:\n" + traceback.format_exc())
msgBox.setIcon(QtGui.QMessageBox.Critical)
msgBox.exec_()
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,394 | gestiweb/llampex-mini | refs/heads/master | /projects/erp/generic/master/scripts/masterbasic4.py | # encoding: UTF-8
import os.path, traceback
from PyQt4 import QtGui, QtCore, uic
from PyQt4 import QtSql
from masterform import LlampexMasterForm
from recordform import loadActionFormRecord #LlampexRecordForm, LlampexQDialog
import time
import re
import qsqlrpcdriver.qtdriver as qtdriver
import threading
import traceback
from projectloader import LlampexTable
from qsqlmetadatamodel import QSqlMetadataModel, QMetadataModel
from llitemview import LlItemView1 as MyItemView
def h(*args): return os.path.realpath(os.path.join(os.path.dirname(os.path.abspath( __file__ )), *args))
class MyWidget(QtGui.QWidget):
def setup(self):
self.itemindex = None
S = QtGui.QStyle
self.option = QtGui.QStyleOptionViewItemV4()
self.option.rect = QtCore.QRect(0,0,300,24)
self.option.state = S.State_Active | S.State_Enabled
self.delegate = QtGui.QStyledItemDelegate(self)
def mouseDoubleClickEvent(self, event):
S = QtGui.QStyle
self.option.state |= S.State_Editing
self.update()
def sizeHint(self):
if self.itemindex:
return self.delegate.sizeHint(self.option, self.itemindex)
else:
sz = QtCore.QSize(120,24)
return sz
def setItemIndex(self, itemindex):
self.itemindex = itemindex
self.update()
def paintEvent(self, pEvent):
painter = QtGui.QPainter(self);
if self.itemindex:
self.delegate.paint(painter, self.option, self.itemindex)
class MasterScript(object):
def __init__(self, project, form):
self.project = project
self.form = form
self.rpc = self.form.prjconn
self.db = self.rpc.qtdb
self.table = self.form.actionobj.table
self.model = None
self.row = None
self.col = None
print
try:
tmd=LlampexTable.tableindex[self.table]
self.tmd = tmd
print tmd
print "Code:", tmd.code
print "Nombre:", tmd.name
print "PKey:", tmd.primarykey
print tmd.fieldlist
print tmd.fields
print "f0:", tmd.field[0]
print "f1:", tmd.field[1]
except Exception, e:
print "Error loading table metadata:"
print traceback.format_exc()
print
table = self.form.ui.table
table.setSortingEnabled( True )
try:
tableheader = table.horizontalHeader()
tableheader.setSortIndicator(0,0)
tableheader.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.headerMenu = QtGui.QMenu(tableheader)
action_addfilter = QtGui.QAction(
QtGui.QIcon(h("../../icons/page-zoom.png")),
"Add &Filter...", tableheader)
action_showcolumns = QtGui.QAction(
QtGui.QIcon(h("../../icons/preferences-actions.png")),
"Show/Hide &Columns...", tableheader)
action_hidecolumn = QtGui.QAction("&Hide this Column", tableheader)
action_addfilter.setIconVisibleInMenu(True)
action_showcolumns.setIconVisibleInMenu(True)
self.headerMenu.addAction(action_addfilter)
self.headerMenu.addAction(action_showcolumns)
self.headerMenu.addAction(action_hidecolumn)
tableheader.setStretchLastSection(True)
self.form.connect(tableheader, QtCore.SIGNAL("sortIndicatorChanged(int,Qt::SortOrder)"), self.table_sortIndicatorChanged)
self.form.connect(tableheader, QtCore.SIGNAL("customContextMenuRequested(const QPoint&)"),self.table_headerCustomContextMenuRequested)
self.form.connect(action_addfilter, QtCore.SIGNAL("triggered(bool)"), self.action_addfilter_triggered)
self.form.connect(action_hidecolumn, QtCore.SIGNAL("triggered(bool)"), self.action_hidecolumn_triggered)
except Exception, e:
print e
# set search invisible
self.form.ui.searchFrame.setVisible(False)
self.form.connect(table, QtCore.SIGNAL("activated(QModelIndex)"),self.table_cellActivated)
self.form.connect(table, QtCore.SIGNAL("clicked(QModelIndex)"),self.table_cellActivated)
self.form.connect(self.form.ui.btnNew, QtCore.SIGNAL("clicked()"), self.btnNew_clicked)
self.form.connect(self.form.ui.btnEdit, QtCore.SIGNAL("clicked()"), self.btnEdit_clicked)
self.form.connect(self.form.ui.btnBrowse, QtCore.SIGNAL("clicked()"), self.btnBrowse_clicked)
self.form.connect(self.form.ui.btnCopy, QtCore.SIGNAL("clicked()"), self.btnCopy_clicked)
self.form.connect(self.form.ui.searchBox, QtCore.SIGNAL("textChanged(const QString&)"), self.searchBox_changed)
self.form.connect(self.form.ui.searchCombo, QtCore.SIGNAL("currentIndexChanged(const QString&)"), self.searchCombo_changed)
#self.model = QMetadataModel(None,self.db, tmd)
self.model = QSqlMetadataModel(None,self.db, tmd)
self.model.decorations[None] = QtGui.QIcon(h("../../icons/null.png"))
self.model.decorations[True] = QtGui.QIcon(h("../../icons/true.png"))
self.model.decorations[False] = QtGui.QIcon(h("../../icons/false.png"))
# Add fields to combobox
self.form.ui.searchCombo.addItems(self.model.getHeaderAlias())
self.modelReady = threading.Event()
self.modelSet = threading.Event()
self.reload_data()
self.select_data()
self.settablemodel()
"""
layout = self.form.ui.layout()
self.fieldlayout = QtGui.QHBoxLayout()
self.fieldlayout.setSpacing(1)
self.fieldviews = []
for i, name in enumerate(self.tmd.fieldlist):
myitemview = MyItemView(self.form.ui)
myitemview.setup()
myitemview.setModel(self.model)
myitemview.setCol(i)
self.fieldlayout.addWidget(myitemview)
self.fieldviews.append(myitemview)
layout.addLayout( self.fieldlayout )
"""
def table_cellActivated(self, itemindex):
self.row, self.col = itemindex.row(), itemindex.column()
print "Cell:", self.row, self.col
"""
for fieldview in self.fieldviews:
fieldview.setRow(self.row)
"""
def btnNew_clicked(self):
print "Button New clicked"
load = loadActionFormRecord(self.project, self.form, 'INSERT', self.form.actionobj, self.rpc, self.tmd, self.model, self.row)
"""
print "Button New clicked"
dialog = QtGui.QDialog(self.form)
dialog.setWindowTitle("Insert new record")
ret = dialog.exec_()
print ret
"""
def btnEdit_clicked(self):
print "Button Edit clicked --> Row: ", self.row
load = loadActionFormRecord(self.project, self.form, 'EDIT', self.form.actionobj, self.rpc, self.tmd, self.model, self.row)
def btnBrowse_clicked(self):
print "Button Browse clicked"
#change visibility of searchFrame
self.form.ui.searchFrame.setVisible(not self.form.ui.searchFrame.isVisible())
def btnCopy_clicked(self):
print "Button Copy clicked"
if self.row is None: return False
self.model.commitDirtyRow(self.row)
def searchBox_changed(self,text):
print "Search Box changed to ", unicode(text)
self.model.setBasicFilter(self.form.ui.searchCombo.currentText(),text)
self.model.refresh()
def searchCombo_changed(self,alias):
if self.form.ui.searchBox.text():
print "Search Combo changed to ", unicode(alias)
self.model.setBasicFilter(alias,self.form.ui.searchBox.text())
self.model.refresh()
def action_addfilter_triggered(self, checked):
print "Add Filter triggered:", checked
rettext, ok = QtGui.QInputDialog.getText(self.form, "Add New Filter",
"Write New WHERE expression:", QtGui.QLineEdit.Normal, self.model.getFilter())
if ok:
self.form.ui.searchBox.setText("")
self.model.setFilter(rettext)
self.model.refresh()
def action_hidecolumn_triggered(self, checked):
print "Hide Column triggered:", checked
self.model.tmd.fieldlist.pop(self.lastColumnClicked)
self.model.refresh()
self.form.ui.searchCombo.clear()
self.form.ui.searchCombo.addItems(self.model.getHeaderAlias())
def table_headerCustomContextMenuRequested(self, pos):
print pos
self.lastColumnClicked = self.form.ui.table.horizontalHeader().logicalIndexAt(pos)
print "We are in column: " + str(self.lastColumnClicked)
self.headerMenu.exec_( self.form.ui.table.horizontalHeader().mapToGlobal(pos) )
def table_sortIndicatorChanged(self, column, order):
print column, order
self.model.setSort(column,order)
self.model.refresh()
def reload_data(self):
self.model.setSort(0,0)
def select_data(self):
self.model.select()
def settablemodel(self):
self.form.ui.table.setModel(self.model)
self.model.autoDelegate(self.form.ui.table)
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,395 | gestiweb/llampex-mini | refs/heads/master | /engine/manage_projects.py | # encoding: UTF-8
from bjsonrpc.exceptions import ServerError
from bjsonrpc.handlers import BaseHandler
import model
from model import RowProject, RowUser, RowProjectUser
from project_manager import compute_password
class ManageProjects(BaseHandler):
def __init__(self, rpc):
BaseHandler.__init__(self,rpc)
def do_Commit(self):
try:
model.session.commit()
except:
model.session.rollback()
return False
else:
return True
########### USERS ###########
def getUsers(self):
userslist = []
query = model.session.query(RowUser).order_by(RowUser.id)
for rowuser in query:
userslist.append([rowuser.username,rowuser.active,rowuser.admin])
return userslist
def modifyUser(self, user_row):
user = model.session.query(RowUser).filter(RowUser.username == user_row[3]).first()
user.active = user_row[1]
user.admin = user_row[2]
if user_row[0] != user_row[3]:
user.username = user_row[0]
return self.do_Commit()
def modifyUserPass(self, username, newPass):
user = model.session.query(RowUser).filter(RowUser.username == username).first()
user.password = compute_password(newPass)
return self.do_Commit()
def newUser(self, username, password, active, admin):
user = RowUser()
user.username = username
user.password = compute_password(password)
user.active = active
user.admin = admin
model.session.add(user)
return self.do_Commit()
def delUser(self, username):
user = model.session.query(RowUser).filter(RowUser.username == username).first()
#TODO implementate cascade
query = model.session.query(RowProjectUser).filter(RowProjectUser.user == user)
for row in query:
model.session.delete(row)
model.session.delete(user)
return self.do_Commit()
########### PROJECTS ###########
def getProjects(self):
projectslist = []
query = model.session.query(RowProject).order_by(RowProject.id)
for rowproj in query:
projectslist.append([rowproj.code,rowproj.description,rowproj.db,
rowproj.path,rowproj.host,rowproj.port,
rowproj.user,rowproj.active])
return projectslist
def modifyProject(self, proj_row):
proj = model.session.query(RowProject).filter(RowProject.code == proj_row[8]).first()
proj.description = proj_row[1]
proj.db = proj_row[2]
proj.path = proj_row[3]
proj.host = None if (proj_row[4] == "") else proj_row[4]
proj.port = None if (proj_row[5] == "") else proj_row[5]
proj.user = None if (proj_row[6] == "") else proj_row[6]
proj.active = proj_row[7]
if proj_row[0] != proj_row[8]:
proj.code = proj_row[0]
return self.do_Commit()
def modifyProjPass(self, code, newPass, encrypt):
#TODO: Implementate encryption
project = model.session.query(RowProject).filter(RowProject.code == code).first()
if encrypt == "None":
project.password = newPass
project.passwdcipher = None
return self.do_Commit()
def newProject(self, code, desc, db, path, host, port, user, password, encrypt, active):
project = RowProject()
project.code = code
project.description = desc
project.db = db
project.path = path
project.host = None if (host == "") else host
project.port = None if (port == "") else port
project.user = None if (user == "") else user
project.active = active
#TODO: Implementate Encryption
if encrypt == "None":
project.password = None if (password == "") else password
project.passwdcipher = None
model.session.add(project)
return self.do_Commit()
def delProject(self, code):
project = model.session.query(RowProject).filter(RowProject.code == code).first()
#TODO implementate cascade
query = model.session.query(RowProjectUser).filter(RowProjectUser.project == project)
for row in query:
model.session.delete(row)
model.session.delete(project)
return self.do_Commit()
########### USERS/PROJECTS ###########
def getActiveUsers(self,project):
userslist = []
query = model.session.query(RowUser).filter(
RowUser.id.in_(model.session.query(RowProjectUser.user_id).filter(
RowProjectUser.project_id.in_(model.session.query(RowProject.id).filter(RowProject.code == project)))))
for row in query:
userslist.append(row.username)
return userslist
def getInactiveUsers(self,project):
userslist = []
query = model.session.query(RowUser).filter(
~RowUser.id.in_(model.session.query(RowProjectUser.user_id).filter(
RowProjectUser.project_id.in_(model.session.query(RowProject.id).filter(RowProject.code == project)))))
for row in query:
userslist.append(row.username)
return userslist
def addUserToProject(self,user,project):
userProj = RowProjectUser()
userProj.project = model.session.query(RowProject).filter(RowProject.code == project).one()
userProj.user = model.session.query(RowUser).filter(RowUser.username == user).one()
model.session.add(userProj)
return self.do_Commit()
def delUserFromProject(self,user,project):
usr = model.session.query(RowUser).filter(RowUser.username == user).one()
proj = model.session.query(RowProject).filter(RowProject.code == project).one()
userProj = model.session.query(RowProjectUser).filter(RowProjectUser.user == usr).filter(RowProjectUser.project == proj).one()
model.session.delete(userProj)
return self.do_Commit()
def getManageProjects(rpc):
manageProjects = ManageProjects(rpc)
return manageProjects | {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,396 | gestiweb/llampex-mini | refs/heads/master | /projects/erp/billing/warehouse/warehouse/record/scripts/articulos.py | # encoding: UTF-8
from llampexwidgets import LlItemView
from PyQt4 import QtGui, QtCore, uic
import time
class RecordScript(object):
def __init__(self, form):
self.form = form
self.rpc = self.form.prjconn
self.db = self.rpc.qtdb
self.table = self.form.actionobj.table
self.ui = form.ui
self.model = form.model
self.tmd = form.tmd
self.row = form.row
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,397 | gestiweb/llampex-mini | refs/heads/master | /engine/database.py | import model # import Project, metadata, engine, session
from model import RowUser, RowProject, RowProjectUser
from project_manager import compute_password
from getpass import getpass
from config import Config
import os.path
import yaml
def connect(dboptions, echo = True):
from sqlalchemy import create_engine
conn_url = 'postgresql://%(dbuser)s:%(dbpasswd)s@%(dbhost)s:%(dbport)d/%(dbname)s' % dboptions
#print conn_url
model.engine = create_engine(conn_url, echo=echo)
model.Base.metadata.bind = model.engine
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=model.engine)
# create a Session
model.session = Session()
def filepath(): return os.path.abspath(os.path.dirname(__file__))
def filedir(x): # convierte una ruta relativa a este fichero en absoluta
if os.path.isabs(x): return x
else: return os.path.join(filepath(),x)
def main():
dboptions = {
'dbname' : Config.Database.dbname,
'dbuser' : Config.Database.dbuser,
'dbpasswd' : Config.Database.dbpasswd,
'dbhost' : Config.Database.dbhost,
'dbport' : Config.Database.dbport
}
connect(dboptions, echo = False)
import sys
params = []
if len(sys.argv) < 2: cmd = "help"
else:
cmd = sys.argv[1]
params = sys.argv[2:]
if cmd == "help": do_help(*params)
elif cmd == "init": do_init(*params)
elif cmd == "lsuser": do_lsuser(*params)
elif cmd == "lsproject": do_lsproject(*params)
elif cmd == "adduser": do_adduser(*params)
elif cmd == "deluser": do_deluser(*params)
elif cmd == "addproject": do_addproject(*params)
elif cmd == "delproject": do_delproject(*params)
elif cmd == "passwd": do_passwd(*params)
elif cmd == "addprojectuser": do_addprojectuser(*params)
elif cmd == "delprojectuser": do_delprojectuser(*params)
else: print "Unknown command or not implemented:", cmd
def do_adduser(username = None, password = None):
if username is None:
username = raw_input("Please give an username: ")
if not username: return
if password is None:
print "No password given, placing the same username as password."
password = username
user1 = RowUser()
user1.username = username
user1.password = compute_password(password)
model.session.add(user1)
model.session.commit()
print "Username %s added. To change the password use 'passwd' command" % (
repr(user1.username))
def do_deluser(username = None):
if username is None:
username = raw_input("Please give an username: ")
if not username: return
user1 = model.session.query(RowUser).filter(RowUser.username == username).first()
if user1 is None:
print "No user found with that name. Giving up."
return
verify = raw_input("Are you sure you want to delete the user "+repr(user1.username)+"? [Yes|No]: ")
if verify != "Yes":
print "Giving up."
return
model.session.delete(user1) #TODO deberia eliminarse en cascada
model.session.commit()
print "Username %s deleted." % (repr(user1.username))
def do_passwd(username = None, newpassword = None):
if username is None:
username = raw_input("Please give an username: ")
if not username: return
user1 = model.session.query(RowUser).filter(RowUser.username == username).first()
if user1 is None:
print "No user found with that name. Giving up."
return
if newpassword is None:
newpassword = getpass()
user1.password = compute_password(newpassword)
model.session.commit()
print "Password changed."
def do_lsproject(project = None):
if project is None: # list projects
print "Project List:"
for p in model.session.query(RowProject):
print "-", p.code,":", p
else:
project1 = model.session.query(RowProject).filter(RowProject.code == project).first()
if project1 is None:
print "No project found with that name. Giving up."
return
for k in "id,code,description,db,path,host,port,user,password,active".split(","):
print "- %s:" % k, repr(getattr(project1,k,None))
print
def do_lsuser():
print "User List:"
for u in model.session.query(RowUser):
print "-", u
def do_addprojectuser(username = None, project = None):
if username is None:
username = raw_input("Please give an username: ")
if not username: return
user1 = model.session.query(RowUser).filter(RowUser.username == username).first()
if user1 is None:
print "No user found with that name. Giving up."
return
if project is None:
do_lsproject()
project = raw_input("Give a project code: ")
if not project: return
project1 = model.session.query(RowProject).filter(RowProject.code == project).first()
if project1 is None:
print "No project found with that name. Giving up."
return
projectuser1 = model.session.query(RowProjectUser).filter(RowProjectUser.user == user1).filter(RowProjectUser.project == project1).first()
if projectuser1 is not None:
print "User is already granted on that project."
return
projectuser1 = RowProjectUser()
projectuser1.project = project1
projectuser1.user = user1
model.session.add(projectuser1)
model.session.commit()
print "User %s added to the project %s" % (username,project)
def do_delprojectuser(username = None, project = None):
if username is None:
username = raw_input("Please give an username: ")
if not username: return
user1 = model.session.query(RowUser).filter(RowUser.username == username).first()
if user1 is None:
print "No user found with that name. Giving up."
return
if project is None:
do_lsproject()
project = raw_input("Give a project code: ")
if not project: return
project1 = model.session.query(RowProject).filter(RowProject.code == project).first()
if project1 is None:
print "No project found with that name. Giving up."
return
userproject1 = model.session.query(RowProjectUser).filter(RowProjectUser.project_id == project1.id).filter(RowProjectUser.user_id == user1.id).first()
if userproject1 is None:
print "No user and project relation found with that name. Giving up."
return
verify = raw_input("Are you sure you want to delete this user-project relation? [Yes|No]: ")
if verify != "Yes":
print "Giving up."
return
model.session.delete(userproject1)
model.session.commit()
print "User-project relation deleted."
def do_addproject(code = None, description = None, db = None, path = None, host = None, port = None, user = None, password = None, active = None):
if code is None:
code = raw_input("Please give a code: ")
if not code: return
if model.session.query(RowProject).filter(RowProject.code == code).first():
print "This project already exists. Giving up."
return
if description is None:
description = raw_input("Please give a description: ")
if db is None:
db = raw_input("Please give a database: ")
if not db: return
if path is None:
path = raw_input("Please give a path: ")
if not path: return
if host is None:
host = raw_input("Please give a host: ")
if not host: host = "127.0.0.1" # default is localhost
if port is None:
port = raw_input("Please give a port: ")
if not port: port = "5432" # default is 5432
if user is None:
user = raw_input("Please give a user: ")
if not user: user = "llampexuser" # default
if password is None:
password = raw_input("Please give a password: ")
if not password: password = "llampexpasswd" # default
if active is None:
active = raw_input("Project is actived? [True|False]: ")
if active != "False":
active = "True" # default is true
project1 = RowProject()
project1.code = code
project1.description = description
project1.db = db
project1.path = path
project1.host = host
project1.port = port
project1.user = user
project1.password = password
project1.active = active
model.session.add(project1)
model.session.commit()
print "Project %s added." % (repr(project1.code))
def do_delproject(code = None):
if code is None:
code = raw_input("Please give a code: ")
if not code: return
project1 = model.session.query(RowProject).filter(RowProject.code == code).first()
if project1 is None:
print "No project found with that name. Giving up."
return
verify = raw_input("Are you sure you want to delete the project "+repr(project1.code)+"? [Yes|No]: ")
if verify != "Yes":
print "Giving up."
return
model.session.delete(project1) #TODO deberia eliminarse en cascada
model.session.commit()
print "Project %s deleted." % (repr(project1.code))
def create_all():
model.Base.metadata.create_all()
def do_init():
create_all()
project1 = model.session.query(RowProject).first()
if project1 is None:
print "No projects found. Creating a 'llampex' project"
project1 = RowProject()
project1.code = "llampex"
project1.description = "Llampex main project"
project1.db = "llampex"
project1.path = os.path.realpath(filedir("../"))
project1.active = True
model.session.add(project1)
model.session.commit()
user1 = model.session.query(RowUser).first()
if user1 is None:
print "No users found. Creating a 'llampex' user with password 'llampex'"
user1 = RowUser()
user1.username = "llampex"
user1.password = compute_password("llampex")
model.session.add(user1)
model.session.commit()
for project in model.session.query(RowProject):
nn = RowProjectUser()
nn.user = user1
nn.project = project
model.session.add(nn)
user1.projects.append(nn)
model.session.commit()
def do_help():
print "python database.py (command)"
print "Commands:"
print " help - prints this same help."
print " init - creates new tables and inits new rows."
print " adduser - adds a user to llampex."
print " deluser - delete a llampex user."
print " addproject - adds a project to llampex."
print " delproject - delete a llampex project."
print " lsuser - shows the userlist."
print " lsproject - shows the project list."
print " passwd - update a user password"
print " addprojectuser - grants acces to a project for a user"
print " delprojectuser - revokes access to a project for a user"
if __name__ == "__main__":
main()
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,398 | gestiweb/llampex-mini | refs/heads/master | /qt4client/objects/sqlcursor.py | # encoding: UTF-8
import os.path, traceback
from PyQt4 import QtGui, QtCore, uic
from PyQt4 import QtSql
from qsqlmetadatamodel import QSqlMetadataModel
import qsqlrpcdriver.qtdriver as qtdriver
"""
####### SqlCursor
Cursor de bae de datos, que funciona a partir de modelos de Qt.
Gestiona un QSqlMetadataModel y almacena una posicin determinada en el registro.
"""
BOF = -2
EOF = -1
class SqlCursor(QtCore.QObject):
Insert = 0
Edit = 1
Del = 2
Browse = 3
_model = None # MetadataModel
_rownumber = -1 # RowNumber
selectionChanged = QtCore.pyqtSignal(QtCore.QModelIndex,int)
currentIndexChanged = QtCore.pyqtSignal(QtCore.QModelIndex)
def __init__(self, metadata, prjconn, action = None):
QtCore.QObject.__init__(self)
self.metadata = metadata
self.prjconn = prjconn
self._modeAccess = None
self._mainfilter = ""
if action:
self.setAction(action)
def setAction(self, actionname):
# TODO: Buscar la accin en concreto.
# TODO: Crear aqu el metadata.model
self.action = self.metadata.action_index[actionname]
self.table = self.metadata.table_index[self.action.table]
self.model = QSqlMetadataModel(None, self.prjconn.qtdb, self.table)
self.model.setSort(0,0)
def modeAccess(self): return self._modeAccess
def setEditMode(self): return self.modeAcess(self.Mode.Edit)
def setMainFilter(self, where_filter):
self._mainfilter = where_filter
def select(self, where = ""):
if where and self._mainfilter:
where = "( %s ) AND ( %s )" % (where,self._mainfilter)
elif self._mainfilter:
where = self._mainfilter
self.model.select()
def refresh(self,fieldName=None):
return self.select()
def configureViewWidget(self,widget):
widget.setModel(self.model)
self.model.autoDelegate(widget)
selection = widget.selectionModel()
self.connect(selection, QtCore.SIGNAL("currentRowChanged(QModelIndex,QModelIndex)"), self.indexChanged)
self.connect(self, QtCore.SIGNAL("selectionChanged(QModelIndex,int)"),
selection, QtCore.SLOT("select(QModelIndex,SelectionFlags)"))
self.connect(self, QtCore.SIGNAL("currentIndexChanged(QModelIndex)"),
widget, QtCore.SLOT("setCurrentIndex(QModelIndex)"))
new = self.model.index(self._rownumber, 0)
F = QtGui.QItemSelectionModel
flags = F.Clear | F.Select | F.Rows | F.Current
self.selectionChanged.emit(new, flags)
self.currentIndexChanged.emit(new)
def indexChanged(self, new, old):
#print "*"
newrow, oldrow = (new.row(), old.row())
if newrow != self._rownumber:
oldnumber = self._rownumber
self._rownumber = newrow
F = QtGui.QItemSelectionModel
flags = F.Clear | F.Select | F.Rows | F.Current
self.selectionChanged.emit(new, flags)
self.currentIndexChanged.emit(new)
#self.emit(QtCore.SIGNAL("selectionChanged(QModelIndex,int)"), new, flags)
#print "changed: %d -> %d" % (oldnumber,newrow)
def commitBuffer(self):
# Commit changes for the current row
pass
def refreshBuffer(self):
# Discard changes for the current row
pass
"""
Slots pblicos
int modeAccess () const
bool setEditMode ()
QString mainFilter () const
void setMainFilter (const QString &f)
void setModeAccess (const int m)
void setAtomicValueBuffer (const QString &fN, const QString &functionName)
void setValueBuffer (const QString &fN, const QVariant &v)
QVariant valueBuffer (const QString &fN) const
QVariant valueBufferCopy (const QString &fN) const
bool isNull (const QString &name) const
void setNull (const QString &name)
bool isCopyNull (const QString &name) const
void setCopyNull (const QString &name)
void setEdition (const bool b)
void setBrowse (const bool b)
bool fieldDisabled (const QString &fN)
bool inTransaction ()
bool transaction (bool lock)
bool rollback ()
bool commit ()
void setAskForCancelChanges (bool a)
void setActivatedCheckIntegrity (bool a)
void setActivatedCommitActions (bool a)
bool checkIntegrity (bool showError=true)
void refresh (QString fN=QString::null)
bool refreshBuffer ()
int at ()
bool seek (int i, bool relative=false, bool emite=false)
bool next (bool emite=true)
bool prev (bool emite=true)
bool first (bool emite=true)
bool last (bool emite=true)
int del (bool invalidate=true)
bool select (const QString &filter, const QSqlIndex &sort=QSqlIndex())
bool select ()
int size ()
bool commitBuffer ()
bool commitBufferCursorRelation ()
FLSqlCursorInterface * cursorRelation ()
void setContext (QObject *c)
QObject * context () const
FLSqlCursor * obj ()
void emitNewBuffer ()
void emitBufferChanged (QString v)
void emitCursorUpdated ()
void emitRecordChoosed ()
void emitCurrentChanged (int v)
void emitAutoCommit ()
void emitBufferCommited ()
QString action ()
void setAction (QString action)
void setUnLock (const QString &fN, bool v)
bool isLocked ()
void editRecord ()
void chooseRecord ()
QString table () const
const int fieldType (const QString &fN) const
QString primaryKey () const
bool isValid () const
bool isModifiedBuffer ()
Seales
void newBuffer ()
void bufferChanged (QString)
void cursorUpdated ()
void recordChoosed ()
void currentChanged (int)
void autoCommit ()
void bufferCommited ()
Mtodos pblicos
FLSqlCursorInterface (const QString &n)
FLSqlCursorInterface (FLSqlCursor *obj)
~FLSqlCursorInterface ()
void setObj (FLSqlCursor *obj)
"""
"""
:::Codigo de ejemplo::::
: var curArticulo:FLSqlCursor = new FLSqlCursor("articulos");
- curArticulo.select("referencia = '" + referencia + "'");
- if (curArticulo.first()) {
- curArticulo.setModeAccess(curArticulo.Edit);
- curArticulo.refreshBuffer();
- curArticulo.setValueBuffer("costemedio", cM);
- curArticulo.commitBuffer();
- }
-
""" | {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,399 | gestiweb/llampex-mini | refs/heads/master | /qt4client/projectloader/__init__.py | # encoding: UTF-8
import os.path, os
import yaml
import re, traceback
"""
Handling paths:
All paths are relative from the YAML file that were read from. For example:
icon: icons/customer.png
is a file inside a subfolder relative to the yaml filepath.
You can also specify the ".." directory to refer parent folders:
icon: ../../customer.png
If you want to refer to the root of project you can use either absolute paths
or :project:/ paths.
icon: /generic/icons/edit.png
script: :project:/generic/icons/edit.png
You cannot exit out of project path by using ../ or :project: aliases.
Also, you have other aliases as well depending on it is placed your yaml
file (or when it is loaded).
:project:/ -> project root
:area:/ -> area content folder
:module:/ -> module content folder
:group:/ -> group content folder
:area: -> area yaml file folder
:module: -> module yaml file folder
:group: -> group yaml file folder
:action: -> action yaml file folder
Some of them should point to the same place.
The recommended format is :folder:/
"""
class BaseLlampexObject(yaml.YAMLObject):
yaml_tag = u'!LlampexBASE'
tagorder = []
taghidden = []
taghidefalse = True
def __init__(self,*args,**kwargs):
super(BaseLlampexObject,self).__init__()
def __getstate__(self):
listpairs = []
knownkeys = self.__dict__.keys()[:]
for key in self.tagorder:
if key in knownkeys:
if not self.taghidefalse or self.__dict__[key]:
listpairs.append( (key,self.__dict__[key]) )
knownkeys.remove(key)
knownkeys.sort()
for key in knownkeys:
if key[0] == '_': continue
if key in self.taghidden: continue
if not self.taghidefalse or self.__dict__[key]:
listpairs.append( (key,self.__dict__[key]) )
return listpairs
def __repr__(self):
args = ""
for key in self.tagorder:
if key in self.__dict__ and len(args) < 200:
value = self.__dict__[key]
if value and len(repr(value)) < 32:
args+=" " + key + "=" + repr(value)
return "<%s%s>" % (self.__class__.__name__,args)
def yaml_afterload(self):
pass
class ProjectLoadFatalError(Exception):
"""
Error raised whenever the project fails to load.
"""
class ProjectLoader(object):
def __init__(self, path, files):
self.path = path
self.files = files
self.filetree = {}
self.project = None
def load(self):
print "Loading project at" , self.path
for filepath in self.files:
dirname, filename = os.path.split(filepath)
if dirname not in self.filetree:
self.filetree[dirname] = []
self.filetree[dirname].append(filename)
"""
for dirname in sorted(self.filetree.keys()):
print dirname, " *** "
print repr(list(sorted(self.filetree[dirname])))
print
"""
projectfiles = self.getfilelist(".","project.yaml")
try:
assert(len(projectfiles) == 1)
except AssertionError:
raise ProjectLoadFatalError, "Project must have *exactly* one file *.project.yaml in the root folder!"
filename = os.path.join(self.path, projectfiles[0])
print "project filename:" , filename
self.project = self.loadfile(filename)
self.project.load(self,self.path,".")
for tname, table in self.project.table_index.iteritems():
table.action_index = []
for aname, action in list(self.project.action_index.iteritems()):
try:
table = self.project.table_index[action.table]
except KeyError:
print "WARN: Action %s references to a table %s which does not exist." % (aname, action.table)
del self.project.action_index[aname]
continue
table.action_index.append(action)
return self.project
def getfilelist(self, folder, ext):
if folder not in self.filetree:
print "WARN: folder %s does not exist." % repr(folder)
return []
return [ fname for fname in self.filetree[folder] if fname.endswith(".%s" % ext) ]
def loadfile(self,name):
ret = yaml.load(open(name).read())
ret.yaml_afterload()
try: ret.llampexProjectFile_afterLoad(self)
except AttributeError, e: print "ERROR Loading project afterload in ", ret
return ret
class LlampexBaseFile(BaseLlampexObject):
tagorder = ["code","name","version","icon","shortcut","weight"]
childtype = None
filetype = None
childrensubfolder = {}
def __init__(self,*args,**kwargs):
super(LlampexBaseFile,self).__init__()
self.dictpath = {}
def __cmp__(self, other):
if not isinstance(other,LlampexBaseFile): raise NotImplementedError
k1 = self._key()
k2 = other._key()
if k1 < k2: return -1
elif k1 > k2: return 1
elif k1 == k2: return 0
else: raise AssertionError
def _key(self):
return [self.filetype, self.weight, self.code]
def require_attribute(self, key):
if not hasattr(self,key): raise AttributeError, "%s attribute not found!" % repr(key)
def default_attribute(self, key, val):
if not hasattr(self,key): setattr(self,key,val)
def filedir(self, fname):
x = fname
if os.path.isabs(x): x = ":project:" + x
ftype = None
if x[0] == ":":
typeend = x.find(":",1)
ftype = x[1:typeend]
x = x[typeend+1:]
if os.path.isabs(x):
ftype+="/"
x = x[1:]
path = None
if ftype is None:
path = self.filepath
else:
try:
path = self.dictpath[ftype]
except KeyError:
print "ERROR: Path %s invalid at %s" % (fname,self.filepath)
if path is None:
path = self.filepath
print "WARN: rewritting path %s" % (x)
ret = os.path.join(path,x)
ret = os.path.normpath(ret)
try:
loaderpath = self.loader.path
if loaderpath[-1] != "/": loaderpath+="/"
assert(ret.startswith(loaderpath))
except AssertionError:
raise ValueError, "ERROR: Path %s invalid at %s. Path exits out of project!" % (fname,self.filepath)
return ret
def contentdir(self, x):
return os.path.join(self.fullpath,x)
def yaml_afterload(self):
super(LlampexBaseFile,self).yaml_afterload()
self.require_attribute("code")
self.require_attribute("name")
self.default_attribute("version", None)
self.default_attribute("icon", None)
self.default_attribute("shortcut", None)
self.default_attribute("weight", "zzz")
self.weight = unicode(self.weight)
self.dictpath = {}
def llampexProjectFile_afterLoad(self, loader):
project = loader.project
ftype = self.filetype # (project, module, area, table, action , ...)
attrname = "%s_index" % ftype
if not hasattr(project,attrname):
setattr(project,attrname, {})
index = getattr(project,attrname)
if self.code in index:
print "PANIC: Two files had the same code (%s) and type (%s) ::" % (self.code, self.ftype)
print " *" , index[self.code].filepath
print " *" , self.filepath
print " ... the first one will be overwritten."
print
index[self.code] = self
def load(self,loader,root,path):
if self.childtype is None: return
if self.childtype is []: return
if isinstance(self.childtype, list):
for child in self.childtype:
self.loadchildtype(loader,root,path,child)
else:
self.loadchildtype(loader,root,path,self.childtype)
def loadchildtype(self,loader,root,path,childtype):
path = os.path.normpath(path)
self.root = root
self.path = path
self.loader = loader
if childtype in self.childrensubfolder:
path = os.path.join(self.path,self.childrensubfolder[childtype])
files = self.loader.getfilelist(path,"%s.yaml" % childtype)
fullpath = os.path.join(self.root, path)
self.fullpath = fullpath
if self.filetype:
self.dictpath[self.filetype+"/"] = self.fullpath
tmplist = []
for fname in sorted(files):
fullname = os.path.join(fullpath, fname)
try:
child = self.loader.loadfile(fullname)
except yaml.YAMLError, e:
print traceback.format_exc(0)
print "FATAL: Error scanning yaml %s:" % fullname
continue
except Exception, e:
print traceback.format_exc()
print "PANIC: Unexpected error when loading %s:" % fullname
continue
child.loader = self.loader
child.dictpath = self.dictpath.copy()
child.dictpath[childtype] = self.fullpath
tmplist.append( (child.weight, child.code, child) )
self.child_list = []
self.child = {}
if not tmplist:
print "WARN: %s at %s has no childs of type %s" % (self.__class__.__name__, self.path, childtype)
for w,c,child in sorted(tmplist):
self.child_list.append(c)
self.child[c] = child
child.parent = self
child.filepath = self.fullpath
if hasattr(child,"load"):
child.load(loader, root, os.path.join(path,c))
setattr(self,"%s" % childtype, self.child)
setattr(self,"%s_list" % childtype, self.child_list)
class LlampexProject(LlampexBaseFile):
yaml_tag = u'!LlampexProject'
tagorder = LlampexBaseFile.tagorder + []
filetype = "project"
childtype = "area"
def yaml_afterload(self):
super(LlampexProject,self).yaml_afterload()
#self.index = {}
class LlampexArea(LlampexBaseFile):
yaml_tag = u'!LlampexArea'
tagorder = LlampexBaseFile.tagorder + []
filetype = "area"
childtype = "module"
def yaml_afterload(self):
super(LlampexArea,self).yaml_afterload()
self.default_attribute("description","")
class LlampexModule(LlampexBaseFile):
yaml_tag = u'!LlampexModule'
tagorder = LlampexBaseFile.tagorder + []
filetype = "module"
childtype = ["group","table"]
childrensubfolder = { "table" : "tables" }
class LlampexGroup(LlampexBaseFile):
yaml_tag = u'!LlampexGroup'
tagorder = LlampexBaseFile.tagorder + []
filetype = "group"
childtype = "action"
class LlampexAction(LlampexBaseFile):
yaml_tag = u'!LlampexAction'
tagorder = LlampexBaseFile.tagorder + []
filetype = "action"
def yaml_afterload(self):
super(LlampexAction,self).yaml_afterload()
self.require_attribute("table")
self.require_attribute("record")
self.require_attribute("master")
self.require_attribute("description")
class EmptyTemplate(object):
pass
class FieldsObject(object):
def __init__(self, parent):
self.parent = parent
def __getitem__(self, idx):
try:
fieldname = self.parent.fieldlist[int(idx)]
except ValueError:
fieldname = idx
return self.parent.fields[fieldname]
class LlampexTable(LlampexBaseFile):
yaml_tag = u'!LlampexTable'
tagorder = LlampexBaseFile.tagorder + []
tableindex = {} # TODO: Mover el TableIndex al proyecto en cuestión
# .. aquí corre el riesgo de que se mezclen las tablas de dos proyectos
# .. cargados a la vez o serialmente.
filetype = "table"
def yaml_afterload(self):
super(LlampexTable,self).yaml_afterload()
self.require_attribute("fields")
self.tableindex[self.code] = self
self.field = FieldsObject(self)
field_ordering_list = []
for name, field in self.fields.iteritems():
setattr(self.field, name, field)
field["name"] = name
default_weight=9999
if field.get("pk"): default_weight=0
if field.get("unique"): default_weight=50
if field.get("ck"): default_weight=100
field_ordering_list.append( (field.get("weight",default_weight), name) )
self.fieldlist = [ name for weight, name in sorted(field_ordering_list) ]
@property
def primarykey(self):
for name, field in self.fields.iteritems():
if field['pk']: return name
return None
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,400 | gestiweb/llampex-mini | refs/heads/master | /engine/model/table_users.py | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, Boolean
from sqlalchemy.orm import relation as relationship
from . import Base
class RowUser(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(64), nullable=False, unique=True)
password = Column(String(255))
active = Column(Boolean, nullable=False, default=True)
admin = Column(Boolean, nullable=False, default=False)
def __str__(self):
return "<RowUser(%s) username=%s active=%s>" % (
repr(self.id),
repr(self.username),
repr(self.active),
repr(self.admin)
) | {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,401 | gestiweb/llampex-mini | refs/heads/master | /projects/erp/generic/master/scripts/masterbasicangel.py | # encoding: UTF-8
import os.path, traceback
from PyQt4 import QtGui, QtCore, uic
from PyQt4 import QtSql
from masterform import LlampexMasterForm
import time
import re
import qsqlrpcdriver.qtdriver as qtdriver
import threading
import traceback
from projectloader import LlampexTable
from qsqlmetadatamodel import QSqlMetadataModel, ItemComboDelegate
def h(*args): return os.path.realpath(os.path.join(os.path.dirname(os.path.abspath( __file__ )), *args))
class MasterScript(object):
def __init__(self, form):
self.form = form
self.rpc = self.form.prjconn
self.db = self.rpc.qtdb
self.table = self.form.actionobj.table
self.model = None
print
try:
tmd=LlampexTable.tableindex[self.table]
print "We are in MasterBasicAngel :)"
print tmd
print "PKey:", tmd.primarykey
print tmd.fieldlist
print tmd.fields
print "f0:", tmd.field[0]
print "f1:", tmd.field[1]
print "Nombre:", tmd.field.nombre
except Exception, e:
print "Error loading table metadata:"
print traceback.format_exc()
print
table = self.form.ui.table
table.setSortingEnabled( True )
try:
tableheader = table.horizontalHeader()
tableheader.setSortIndicator(0,0)
tableheader.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.headerMenu = QtGui.QMenu(tableheader)
action_addfilter = QtGui.QAction(
QtGui.QIcon(h("../../icons/page-zoom.png")),
"Add &Filter...", tableheader)
action_showcolumns = QtGui.QAction(
QtGui.QIcon(h("../../icons/preferences-actions.png")),
"Show/Hide &Columns...", tableheader)
action_hidecolumn = QtGui.QAction("&Hide this Column", tableheader)
action_addfilter.setIconVisibleInMenu(True)
action_showcolumns.setIconVisibleInMenu(True)
self.headerMenu.addAction(action_addfilter)
self.headerMenu.addAction(action_showcolumns)
self.headerMenu.addAction(action_hidecolumn)
tableheader.setStretchLastSection(True)
self.form.connect(tableheader, QtCore.SIGNAL("sortIndicatorChanged(int,Qt::SortOrder)"), self.table_sortIndicatorChanged)
self.form.connect(tableheader, QtCore.SIGNAL("customContextMenuRequested(const QPoint&)"),self.table_headerCustomContextMenuRequested)
self.form.connect(action_addfilter, QtCore.SIGNAL("triggered(bool)"), self.action_addfilter_triggered)
self.form.connect(action_hidecolumn, QtCore.SIGNAL("triggered(bool)"), self.action_hidecolumn_triggered)
except Exception, e:
print e
# set search invisible
self.form.ui.searchFrame.setVisible(False)
self.form.connect(self.form.ui.btnNew, QtCore.SIGNAL("clicked()"), self.btnNew_clicked)
self.form.connect(self.form.ui.btnBrowse, QtCore.SIGNAL("clicked()"), self.btnBrowse_clicked)
self.form.connect(self.form.ui.searchBox, QtCore.SIGNAL("textChanged(const QString&)"), self.searchBox_changed)
self.form.connect(self.form.ui.searchCombo, QtCore.SIGNAL("currentIndexChanged(const QString&)"), self.searchCombo_changed)
self.model = QSqlMetadataModel(None,self.db, tmd)
self.model.decorations[None] = QtGui.QIcon(h("../../icons/null.png"))
self.model.decorations[True] = QtGui.QIcon(h("../../icons/true.png"))
self.model.decorations[False] = QtGui.QIcon(h("../../icons/false.png"))
# Add fields to combobox
self.form.ui.searchCombo.addItems(self.model.getHeaderAlias())
self.modelReady = threading.Event()
self.modelSet = threading.Event()
self.reload_data()
self.select_data()
self.settablemodel()
def btnNew_clicked(self):
print "Button New clicked"
dialog = QtGui.QDialog(self.form)
dialog.setWindowTitle("Insert new record")
ret = dialog.exec_()
print ret
def btnBrowse_clicked(self):
print "Button Browse clicked"
#change visibility of searchFrame
self.form.ui.searchFrame.setVisible(not self.form.ui.searchFrame.isVisible())
def searchBox_changed(self,text):
print "Search Box changed to ", unicode(text)
self.model.setBasicFilter(self.form.ui.searchCombo.currentText(),text)
self.model.refresh()
def searchCombo_changed(self,alias):
print "Search Combo changed to ", unicode(alias)
self.model.setBasicFilter(alias,self.form.ui.searchBox.text())
self.model.refresh()
def action_addfilter_triggered(self, checked):
print "Add Filter triggered:", checked
rettext, ok = QtGui.QInputDialog.getText(self.form, "Add New Filter",
"Write New WHERE expression:", QtGui.QLineEdit.Normal, self.model.getFilter())
if ok:
self.form.ui.searchBox.setText("")
self.model.setFilter(rettext)
self.model.refresh()
def action_hidecolumn_triggered(self, checked):
print "Hide Column triggered:", checked
self.model.tmd.fieldlist.pop(self.lastColumnClicked)
self.model.refresh()
self.form.ui.searchCombo.clear()
self.form.ui.searchCombo.addItems(self.model.getHeaderAlias())
def table_headerCustomContextMenuRequested(self, pos):
print pos
self.lastColumnClicked = self.form.ui.table.horizontalHeader().logicalIndexAt(pos)
print "We are in column: " + str(self.lastColumnClicked)
self.headerMenu.exec_( self.form.ui.table.horizontalHeader().mapToGlobal(pos) )
def table_sortIndicatorChanged(self, column, order):
print column, order
self.model.setSort(column,order)
self.model.refresh()
def reload_data(self):
self.model.setSort(0,0)
def select_data(self):
self.model.select()
def settablemodel(self):
self.form.ui.table.setModel(self.model)
self.model.autoDelegate(self.form.ui.table)
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,402 | gestiweb/llampex-mini | refs/heads/master | /qt4client/test_sqlcursors.py | # encoding: UTF-8
from PyQt4 import QtCore, QtGui, QtSql
from objects.sqlcursor import SqlCursor
import sys, os, os.path
from login import ConfigSettings, SplashDialog
import bjsonrpc
import qsqlrpcdriver.qtdriver as qtdriver
import time
from datetime import timedelta, datetime
import projectloader
import math, itertools
import weakref
def apppath(): return os.path.abspath(os.path.dirname(sys.argv[0]))
def filepath(): return os.path.abspath(os.path.dirname(__file__))
def appdir(x): # convierte una ruta relativa a la aplicación en absoluta
if os.path.isabs(x): return x
else: return os.path.join(apppath(),x)
def filedir(x): # convierte una ruta relativa a este fichero en absoluta
if os.path.isabs(x): return x
else: return os.path.join(filepath(),x)
def signalFactory(dest, *args):
def fn():
return dest(*args)
return fn
class TestDialog(QtGui.QDialog):
def __init__(self):
QtGui.QDialog.__init__(self)
self.setWindowTitle("Test de Cursores (SqlCursor)")
self.datetext = None
self.notificacion = QtGui.QLabel(self)
self.tabwidget = QtGui.QTabWidget(self)
self.tabwidget_p1log = QtGui.QWidget(self.tabwidget)
self.tabwidget_p2actions = QtGui.QWidget(self.tabwidget)
self.tabwidget.addTab(self.tabwidget_p1log, "Log/Registro")
self.tabwidget.addTab(self.tabwidget_p2actions, "Actions")
self.layout = QtGui.QVBoxLayout(self)
self.layout.addWidget(self.tabwidget)
self.layout.addWidget(self.notificacion)
self.logbox = QtGui.QTextEdit(self.tabwidget)
self.logbox.setFrameStyle(QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain)
self.logbox.setReadOnly(True)
self.logbox.setFocusPolicy(QtCore.Qt.NoFocus)
self.tab1layout = QtGui.QVBoxLayout(self.tabwidget_p1log)
#self.layout.addStretch()
self.tab1layout.addWidget(self.logbox)
self.tab2layout = QtGui.QGridLayout(self.tabwidget_p2actions)
QtCore.QTimer.singleShot(50, self.iniciar)
self.resize(400,300)
self.notificar("Esperando . . .")
self.date = None
self.splash = SplashDialog()
self.splash.finishLoad = self.fin_carga
self.dialogs = {}
def notificar(self, text):
date = datetime.today()
datetext = unicode(date.strftime("%A, %d. %B %Y %H:%M"), "UTF8")
if self.datetext != datetext:
self.datetext = datetext
self.logbox.insertHtml(u"<br /><b>%s</b><br />" % datetext)
htmltext = u"(%s) %s<br />" % (unicode(date.strftime("%S.%f"),"UTF8"),text)
self.notificacion.setText(text)
self.notificacion.repaint()
self.logbox.insertHtml(htmltext)
self.forceRedraw()
def forceRedraw(self):
evLoop = QtCore.QEventLoop(self)
evLoop.processEvents( QtCore.QEventLoop.ExcludeUserInputEvents, 50)
def iniciar(self):
self.notificar("Inicializando . . .")
settings = ConfigSettings.load()
self.conn = bjsonrpc.connect(host=settings.host,port=int(settings.port))
self.notificar("Conectado.")
# self.conn._debug_socket = True
logged = self.conn.call.login(settings.username,settings.password)
assert(logged)
self.notificar("Registrado.")
availableprojects = self.conn.call.getAvailableProjects()
project = availableprojects[0]['code']
self.prj = self.conn.call.connectProject(project)
self.notificar("Proyecto conectado.")
self.splash.prjconn = self.prj
self.prj.qtdriver = qtdriver.QSqlLlampexDriver(self.prj)
self.prj.qtdb = QtSql.QSqlDatabase.addDatabase(self.prj.qtdriver, "llampex-qsqlrpcdriver")
if not self.prj.qtdb.open("",""):
print "ERROR: Error trying to connect Qt to RPC Database."
self.notificar("Esperando a fin de carga.")
self.splash.show()
def fin_carga(self):
self.notificar("Carga finalizada, cargando proyecto . . . ")
self.projectpath = self.splash.projectpath
self.projectfiles = self.splash.rprj.files
self.prjloader = projectloader.ProjectLoader(self.projectpath,self.projectfiles)
self.project = self.prjloader.load()
tableindex = projectloader.LlampexTable.tableindex
actions_sz = len(self.project.action_index.keys())
rows = int(math.ceil(math.sqrt(float(actions_sz))))
cols = int(math.ceil(rows / 2.2))
self.notificar("Acciones: (%d) " % (actions_sz))
modules = {}
for action in self.project.action_index.values():
module = action.parent.parent
if module not in modules: modules[module] = []
modules[module].append(action)
i = itertools.count()
def nextval(c):
i = c.next()
col = i % cols
row = (i - col) / cols
i+=1
return row,col
for module, actions in sorted(modules.iteritems()):
row, col = nextval(i)
while col != 0: row, col = nextval(i)
widget = QtGui.QLabel("%s module (%s area)" % (module.name, module.parent.name), self.tabwidget_p2actions)
self.notificar(" **> %s - %s " % (module.code, module.name))
self.tab2layout.addWidget(widget, row, col)
while col != cols-1: row, col = nextval(i)
for action in sorted(actions):
row, col = nextval(i)
widget = QtGui.QPushButton(action.name, self.tabwidget_p2actions)
self.connect(widget, QtCore.SIGNAL("clicked()"), signalFactory(self.action_clicked,widget,action.code))
self.tab2layout.addWidget(widget, row, col)
self.notificar(" * %s - %s" % (action.code, action.name))
self.notificar("Proyecto cargado.")
def action_clicked(self, widget, actioncode):
if actioncode not in self.dialogs:
dialog = TestSqlCursorDialog(self, self.project, self.prj, actioncode)
self.dialogs[actioncode] = dialog
self.dialogs[actioncode].show()
class TestSqlCursorDialog(QtGui.QDialog):
def tbutton(self, title, action = None):
button = QtGui.QToolButton(self)
button.setText(title)
self.buttonLayout.addWidget(button)
if action:
if type(action) is list or type(action) is tuple:
self.connect(button, QtCore.SIGNAL("clicked()"), *action)
else:
self.connect(button, QtCore.SIGNAL("clicked()"), action)
return button
def __init__(self, parent, project, rpc, actioncode, cursor = None):
QtGui.QDialog.__init__(self)
self.project = project
self.parent = parent
self.prjconn = rpc
self.action = self.project.action_index[actioncode]
self.table = self.project.table_index[self.action.table]
self.cursor = cursor
print "Loading", self.action.name
self.setWindowTitle("(SqlCursor) %s -> %s" % (self.action.name, self.table.name))
self.layout = QtGui.QVBoxLayout(self)
self.title = QtGui.QLabel(self.action.name)
self.title.setAlignment(QtCore.Qt.AlignCenter)
font = self.title.font()
font.setBold(True)
self.title.setFont(font)
self.table = QtGui.QTableView(self)
self.table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.table.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.table.setSortingEnabled(True)
tableheader = self.table.horizontalHeader()
tableheader.setSortIndicator(0,0)
self.connect(tableheader, QtCore.SIGNAL("sortIndicatorChanged(int,Qt::SortOrder)"), self.table_sortIndicatorChanged)
self.layout.addWidget(self.title)
self.buttonLayout = QtGui.QHBoxLayout()
self.tbInsert = self.tbutton("Insert")
self.tbDelete = self.tbutton("Delete")
self.tbCommit = self.tbutton("Commit", action=self.buttonCommit_clicked)
self.tbRevert = self.tbutton("Revert", action=self.buttonRevert_clicked)
self.buttonLayout.addStretch()
self.tbNewView = self.tbutton("New View", action=self.buttonNewView_clicked)
self.layout.addLayout(self.buttonLayout)
self.layout.addWidget(self.table)
if self.cursor is None:
self.cursor = SqlCursor(self.project, self.prjconn, self.action.code)
self.cursor.select()
self.cursor.configureViewWidget(self.table)
self.resize(600,400)
self.child_views = []
def table_sortIndicatorChanged(self, column, order):
print "Sorting", column, order
self.table.model().setSort(column,order)
self.table.model().refresh()
def closeEvent(self,event):
del self.parent.dialogs[self.action.code]
event.accept()
def buttonNewView_clicked(self):
child = TestSqlCursorDialog(self.parent, self.project, self.prjconn, self.action.code, self.cursor)
self.child_views.append(child)
child.show()
def buttonCommit_clicked(self):
self.cursor.commitBuffer()
def buttonRevert_clicked(self):
self.cursor.refreshBuffer()
app = QtGui.QApplication(sys.argv)
testdialog = TestDialog()
testdialog.show()
retval = app.exec_()
sys.exit(retval) # salimos de la aplicación con el valor de retorno adecuado.
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,403 | gestiweb/llampex-mini | refs/heads/master | /projects/erp/generic/master/scripts/masterbasic2.py | # encoding: UTF-8
import os.path, traceback
from PyQt4 import QtGui, QtCore, uic
from PyQt4 import QtSql
from masterform import LlampexMasterForm
import time
import re
import qsqlrpcdriver.qtdriver as qtdriver
import threading
import traceback
from projectloader import LlampexTable
def h(*args): return os.path.realpath(os.path.join(os.path.dirname(os.path.abspath( __file__ )), *args))
class MasterScript(object):
def __init__(self, form):
self.form = form
self.rpc = self.form.prjconn
# This code is obsolete! >>>>
if not hasattr(self.rpc,"qtdriver"):
print "####### LOADING QT-SQL DRIVER IN PROJECT CODE !!! #####"
qtdriver.DEBUG_MODE = True
self.rpc.qtdriver = qtdriver.QSqlLlampexDriver(self.rpc)
self.rpc.qtdb = QtSql.QSqlDatabase.addDatabase(self.rpc.qtdriver, "llampex-qsqlrpcdriver")
assert(self.rpc.qtdb.open("",""))
qtdriver.DEBUG_MODE = False
# <<< This code is obsolete!
self.db = self.rpc.qtdb
self.table = self.form.actionobj.table
self.model = None
print
try:
tmd=LlampexTable.tableindex[self.table]
print tmd
print "PKey:", tmd.primarykey
print tmd.fieldlist
print tmd.fields
print "f0:", tmd.field[0]
print "f1:", tmd.field[1]
print "Nombre:", tmd.field.nombre
except Exception, e:
print "Error loading table metadata:"
print traceback.format_exc()
print
table = self.form.ui.table
table.setSortingEnabled( True )
tableheader = table.horizontalHeader()
tableheader.setSortIndicator(0,0)
tableheader.setContextMenuPolicy( QtCore.Qt.ActionsContextMenu )
action_addfilter = QtGui.QAction(
QtGui.QIcon(h("../../icons/page-zoom.png")),
"Add &Filter...", tableheader)
action_showcolumns = QtGui.QAction(
QtGui.QIcon(h("../../icons/preferences-actions.png")),
"Show/Hide &Columns...", tableheader)
action_hidecolumn = QtGui.QAction("&Hide this Column", tableheader)
action_addfilter.setIconVisibleInMenu(True)
action_showcolumns.setIconVisibleInMenu(True)
tableheader.addAction(action_addfilter)
tableheader.addAction(action_showcolumns)
tableheader.addAction(action_hidecolumn)
tableheader.setStretchLastSection(True)
self.form.connect(action_addfilter, QtCore.SIGNAL("triggered(bool)"), self.action_addfilter_triggered)
self.form.connect(tableheader, QtCore.SIGNAL("sortIndicatorChanged(int,Qt::SortOrder)"), self.table_sortIndicatorChanged)
self.form.connect(tableheader, QtCore.SIGNAL("customContextMenuRequested(QPoint &)"),self.table_headerCustomContextMenuRequested)
self.form.connect(self.form.ui.btnNew, QtCore.SIGNAL("clicked()"), self.btnNew_clicked)
self.model = QtSql.QSqlTableModel(None,self.db)
self.modelReady = threading.Event()
self.modelSet = threading.Event()
QtCore.QTimer.singleShot(5,self.settablemodel)
thread1 = threading.Thread(target=self.reload_data)
thread1.start()
def btnNew_clicked(self):
print "Button New clicked"
dialog = QtGui.QDialog(self.form)
dialog.setWindowTitle("Insert new record")
ret = dialog.exec_()
print ret
def action_addfilter_triggered(self, checked):
print "Add Filter triggered:", checked
rettext, ok = QtGui.QInputDialog.getText(self.form, "Add New Filter",
"Write New WHERE expression:", QtGui.QLineEdit.Normal, self.model.filter())
self.model.setFilter(rettext)
thread1 = threading.Thread(target=self.select_data)
thread1.start()
def table_headerCustomContextMenuRequested(self, point):
print point
def table_sortIndicatorChanged(self, column, order):
print column, order
def reload_data(self):
#table = self.form.ui.table
print "Model table:", self.table
self.model.setTable(self.table)
self.model.setSort(0,0)
print "ok"
self.model.setEditStrategy(QtSql.QSqlTableModel.OnRowChange)
self.modelReady.set()
self.modelSet.wait()
# QtCore.QTimer.singleShot(100,self.settablemodel)
self.select_data()
def select_data(self):
print "Selecting!"
self.model.select()
print "ok"
def settablemodel(self):
self.modelReady.wait()
print ">Setting Table Model!"
self.form.ui.table.setModel(self.model)
print ">ok"
self.modelSet.set()
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,404 | gestiweb/llampex-mini | refs/heads/master | /engine/model/table_projectusers.py | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, Boolean
from sqlalchemy.orm import relation as relationship
from sqlalchemy.orm import backref
from . import Base
class RowProjectUser(Base):
__tablename__ = 'projectusers'
id = Column(Integer, primary_key=True)
project_id = Column(Integer, ForeignKey('projects.id'))
project = relationship("RowProject", backref=backref('users', order_by=id))
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship("RowUser", backref=backref('projects', order_by=id))
def __str__(self):
return "<RowProjectUser(%s) projectcode=%s username=%s>" % (
repr(self.id),
repr(self.project.code),
repr(self.user.username)
) | {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,405 | gestiweb/llampex-mini | refs/heads/master | /qt4client/widgets/llampexmainmenu.py | #!/usr/bin/env python
from PyQt4 import QtCore, QtGui
import math, sys, random
class LlampexMainMenuButton(QtGui.QCommandLinkButton):
def __init__(self, text, key, fn = None, parent=None):
super(LlampexMainMenuButton, self).__init__(text,parent)
self.setMinimumHeight(36)
self.setMaximumHeight(36)
self._key = key
self._callback = fn
self.connect(self,QtCore.SIGNAL("clicked()"),self.button_clicked)
def button_clicked(self):
if self._callback:
self._callback(self._key)
else:
print "Clicked", self._key
class LlampexMainMenuItemList(QtGui.QFrame):
def __init__(self, parent=None):
super(LlampexMainMenuItemList, self).__init__(parent)
self.llampex_layout = QtGui.QVBoxLayout()
self.llampex_items = []
self.setMinimumHeight(36)
self.setMaximumHeight(5000)
self.llampex_layout.setSizeConstraint(QtGui.QLayout.SetMinAndMaxSize)
self.setLayout(self.llampex_layout)
self.setFrameShadow(QtGui.QFrame.Plain)
self.setFrameShape(QtGui.QFrame.StyledPanel)
def llampex_addItem(self, text, key, fn):
llampex_item = LlampexMainMenuButton(text,key,fn)
llampex_item.setMinimumHeight(36)
llampex_item.setMaximumHeight(36)
self.llampex_items.append(llampex_item)
self.llampex_layout.addWidget(llampex_item)
#self.llampex_layout.insertWidget(self.llampex_layout.count()-1,llampex_item,1)
return llampex_item
addItem = llampex_addItem
class LlampexMainMenuItem(QtGui.QFrame):
def __init__(self, text="Button", parent=None):
super(LlampexMainMenuItem, self).__init__(parent)
self.setFrameShadow(QtGui.QFrame.Raised)
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.llampex_layout = QtGui.QVBoxLayout()
self.llampex_cmdbutton = QtGui.QCommandLinkButton(text)
self.llampex_subitems = LlampexMainMenuItemList() # Cambiar luego por un widget distinto.
self.button = self.llampex_cmdbutton
self.llampex_cmdbutton.setCheckable(True) # Checked es mostrado.
self.llampex_cmdbutton.setMaximumHeight(36)
self.llampex_cmdbutton.setMinimumHeight(36)
self.llampex_cmdbutton.setMinimumWidth(200)
self.llampex_subitems.setVisible(False)
self.llampex_subitems.setMinimumHeight(30)
self.llampex_layout.addWidget(self.llampex_cmdbutton)
self.llampex_layout.addWidget(self.llampex_subitems)
self.setMinimumHeight(20)
self.setMaximumHeight(5000)
self.llampex_layout.setSizeConstraint(QtGui.QLayout.SetMinAndMaxSize)
self.setLayout(self.llampex_layout)
self.connect( self.llampex_cmdbutton, QtCore.SIGNAL("toggled(bool)"), self.llampex_subitems.setVisible )
self._default_callback = None
def setDefaultCallback(self,fn):
self._default_callback = fn
def llampex_addItem(self, text,key=None,fn=None):
if key is None: key = text
if fn is None: fn = self._default_callback
return self.llampex_subitems.llampex_addItem(text,key,fn)
addItem = llampex_addItem
class LlampexMainMenuFrame(QtGui.QFrame):
def __init__(self, parent=None):
super(LlampexMainMenuFrame, self).__init__(parent)
self.llampex_layout = QtGui.QVBoxLayout()
self.llampex_items = []
self.llampex_layout.addStretch()
self.llampex_layout.setSizeConstraint(QtGui.QLayout.SetMinimumSize)
self.setLayout(self.llampex_layout)
#self.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised);
def llampex_addItem(self, textobj):
if isinstance(textobj,basestring):
llampex_item = LlampexMainMenuItem(textobj)
elif isinstance(textobj,LlampexMainMenuItem):
llampex_item = textobj
else:
raise ValueError, "The 1st argument isn't either a string nor a Item Object!"
self.llampex_items.append(llampex_item)
self.llampex_layout.insertWidget(self.llampex_layout.count()-1,llampex_item,1)
return llampex_item
class LlampexMainMenu(QtGui.QScrollArea):
def __init__(self, parent=None):
super(LlampexMainMenu, self).__init__(parent)
self.mywidget = LlampexMainMenuFrame()
self.setWidget(self.mywidget)
self.setWidgetResizable(True)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setMinimumWidth(250)
class LlampexDockMainMenu(QtGui.QDockWidget):
def __init__(self, parent=None):
super(LlampexDockMainMenu, self).__init__(parent)
self.mywidget = LlampexMainMenu() # Se carga en memoria
self.setWidget(self.mywidget) # y se establece que es el control contenido por el Dock.
def addItem(self, item):
return self.mywidget.mywidget.llampex_addItem(item)
if __name__ == "__main__": # programa de demo.
app = QtGui.QApplication(sys.argv)
form1scroll = LlampexMainMenu()
form1scroll.resize(350,500)
form1 = form1scroll.mywidget
boton = QtGui.QCommandLinkButton("pppp")
for i in range(random.randint(3,7)):
llampex_item = LlampexMainMenuItem("texto %d" % i)
#llampex_item = form1.llampex_addItem("texto %d" % i)
for j in range(random.randint(1,7)):
llampex_item.llampex_addItem("subtexto %d.%d" % (i,j))
#llampex_item.llampex_addItem(boton)
form1.llampex_addItem(llampex_item)
form1scroll.show()
sys.exit(app.exec_())
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,406 | gestiweb/llampex-mini | refs/heads/master | /qt4client/llitemview.py | # encoding: UTF-8
import os.path, traceback
from PyQt4 import QtGui, QtCore, uic
from PyQt4 import QtSql
import time
import re
import qsqlrpcdriver.qtdriver as qtdriver
import threading
import traceback
from projectloader import LlampexTable
from qsqlmetadatamodel import QSqlMetadataModel, ItemComboDelegate
def _getAllWidgets(form):
widgets = []
for obj in form.children():
if isinstance(obj, QtGui.QWidget):
widgets.append(obj)
widgets+=_getAllWidgets(obj)
return widgets
def getAllWidgets(form):
return [ obj for obj in _getAllWidgets(form) if obj.objectName() ]
PERSISTENT_EDITOR=False # TODO: Fix editors when committing.
class LlItemView1(QtGui.QAbstractItemView):
def setup(self):
self.colwidth = {}
self.row = 0
self.col = 0
self.margin = (0,0,0,0)
self.item = None
self.tabWidget = self
self.persistentEditor = None
self.setSizePolicy(QtGui.QSizePolicy.Preferred,QtGui.QSizePolicy.Fixed)
self.setEditTriggers(QtGui.QAbstractItemView.DoubleClicked | QtGui.QAbstractItemView.EditKeyPressed)
self.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
#self.viewport().setSizePolicy(QtGui.QSizePolicy.Preferred,QtGui.QSizePolicy.Minimum)
#self.setTabKeyNavigation(False)
#self.setFrameStyle(QtGui.QFrame.NoFrame)
viewport = self.viewport()
viewport.setBackgroundRole(QtGui.QPalette.Window)
self.setFrameStyle(QtGui.QFrame.NoFrame)
#self.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Sunken)
def setTabWidget(self, widget):
self.tabWidget = widget
def minimumSizeHint(self):
w = self.colwidth.get(self.col, 50)
sz = QtCore.QSize(w+24,24)
return sz
def setPosition(self,row, col):
self.row = row
self.col = col
self.updatePosition()
def setRow(self, row):
self.row = row
self.updatePosition()
def setCol(self, col):
self.col = col
self.updatePosition()
def focusInEvent(self, event):
QtGui.QAbstractItemView.focusInEvent(self,event)
if self.item and self.item.isValid():
#print "focus IN:", self.row, self.col
# TODO: Devuelve error si no se puede editar o si ya estaba editandose
self.edit(self.item)
def focusOutEvent(self, event):
QtGui.QAbstractItemView.focusOutEvent(self,event)
#if self.item:
# #print "focus OUT:", self.row, self.col
def updatePosition(self):
model = self.model()
if self.persistentEditor:
self.closePersistentEditor(self.item)
self.item = model.index(self.row, self.col)
if not self.item.isValid():
#print "Item invalid::"
return False
fnAutoDelegate = getattr(model, "autoDelegate", None)
if fnAutoDelegate: fnAutoDelegate(self)
smodel = self.selectionModel()
smodel.setCurrentIndex(self.item, QtGui.QItemSelectionModel.NoUpdate);
if model.flags(self.item) & QtCore.Qt.ItemIsEditable:
self.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Sunken)
if PERSISTENT_EDITOR:
self.openPersistentEditor(self.item)
self.persistentEditor = True
else:
self.setFrameStyle(QtGui.QFrame.NoFrame)
#self.update()
#szh = self.sizeHint()
#szh += QtCore.QSize(15,15)
#self.resize(szh)
return True
def sizeHint(self):
#sz = QtGui.QAbstractItemView.sizeHint(self)
#sz.setHeight(32)
w = self.colwidth.get(self.col, 50)
sz = QtCore.QSize(w+64,32)
return sz
if self.item:
sz = self.sizeHintForIndex(self.item)
return sz
def setColumnWidth(self, col, width):
self.colwidth[col] = width
"""
def setDelegateForColumn(self, col, delegate):
if col != self.col: return
self.delegate = delegate
"""
def paintEvent(self, pEvent):
QtGui.QAbstractItemView.paintEvent(self, pEvent)
if not self.item: return
if not self.item.isValid(): return
S = QtGui.QStyle
focus = self.hasFocus()
viewstate = self.state()
option = self.viewOptions()
state = option.state
enabled = bool(state & S.State_Enabled)
item = self.item # Element to be drawn
if focus:
option.state |= S.State_HasFocus
if viewstate & S.State_Editing:
option.state |= S.State_Editing
if viewstate & S.State_MouseOver:
option.state |= S.State_MouseOver
else:
option.state &= ~S.State_MouseOver
painter = QtGui.QStylePainter(self.viewport())
option.rect = self.visualRect(item)
fwidth = self.frameWidth()
option.rect.moveTo(fwidth,fwidth)
option.palette.setColor(QtGui.QPalette.Base, QtGui.QColor())
#painter.save()
delegate = self.itemDelegate(item)
#painter.setClipRegion(QtGui.QRegion(option.rect))
delegate.paint(painter, option, item)
#painter.restore()
# virtual QModelIndex indexAt ( const QPoint & point ) const = 0
def indexAt(self, point):
return self.item
# virtual void scrollTo ( const QModelIndex & index, ScrollHint hint = EnsureVisible ) = 0
def scrollTo(self, index, hint):
#print "scrollTo", index,hint
return
# virtual QRect visualRect ( const QModelIndex & index ) const = 0
def visualRect(self, index):
if index != self.item: return QtCore.QRect()
rect = self.rect()
margin = self.margin
fwidth = self.frameWidth()
rect.adjust(margin[0],margin[1],-margin[2]-fwidth*2,-margin[3]-fwidth*2)
#szh = self.sizeHint()
#print rect, szh
return rect
# *** PROTECTED *** / INTERNAL FUNCTIONS::
# virtual int horizontalOffset () const = 0
def horizontalOffset(self):
"Returns the horizontal offset of the view"
return int(self.col)
# virtual int verticalOffset () const = 0
def verticalOffset(self):
"Returns the vertical offset of the view"
return int(self.row)
# virtual bool isIndexHidden ( const QModelIndex & index ) const = 0
def isIndexHidden(self, index):
"""
Returns true if the item referred to by the given index is hidden
in the view, otherwise returns false.
Hiding is a view specific feature. For example in TableView a column
can be marked as hidden or a row in the TreeView.
"""
row = index.row()
col = index.col()
if (row,col) == (self.row, self.col): return True
else: return False
# virtual QModelIndex moveCursor ( CursorAction cursorAction, Qt::KeyboardModifiers modifiers ) = 0
def moveCursor(self, cursorAction, kbmodifiers):
"""
Returns a QModelIndex object pointing to the next object in the
view, based on the given cursorAction and keyboard modifiers
specified by modifiers.
"""
w = None
parent = None
thisparent = self.tabWidget.parentWidget()
smodel = self.selectionModel()
selectedIndex = smodel.currentIndex()
if not selectedIndex.isValid():
if not self.updatePosition():
# TODO: This is a patch to allow a future redraw when we're called in the middle of a Model Reset.
QtCore.QTimer.singleShot(200,self.updatePosition)
return self.item
try:
if cursorAction == QtGui.QAbstractItemView.MoveNext:
w = self.tabWidget
for i in range(10):
w = w.nextInFocusChain()
parent = w.parentWidget()
if parent == thisparent: break
elif cursorAction == QtGui.QAbstractItemView.MovePrevious:
w = self.tabWidget
for i in range(10):
w = w.previousInFocusChain()
parent = w.parentWidget()
if parent == thisparent: break
else:
#print "moveCursor:", cursorAction, kbmodifiers
pass
if w:
if not isinstance(w,self.__class__):
for obj in getAllWidgets(w):
if isinstance(obj,self.__class__):
w = obj
parent = w.parentWidget()
print "moveCursor, giving focus:", w.__class__.__name__, w.objectName()
try: print w.row, w.col
except Exception, e: print e
#print parent
#print thisparent
QtCore.QTimer.singleShot(50,w,QtCore.SLOT("setFocus()"))
finally:
return self.item
# virtual void setSelection ( const QRect & rect, QItemSelectionModel::SelectionFlags flags ) = 0
def setSelection(self, rect, flags):
"""
Applies the selection flags to the items in or touched by
the rectangle, rect.
When implementing your own itemview setSelection should
call selectionModel()->select(selection, flags) where selection
is either an empty QModelIndex or a QItemSelection that contains
all items that are contained in rect.
"""
# Does nothing.
return
# virtual QRegion visualRegionForSelection ( const QItemSelection & selection ) const = 0
def visualRegionForSelection(self, selection):
"""
Returns the region from the viewport of the items in the given selection.
"""
return QtGui.QRegion(self.visualRect(self.item))
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,407 | gestiweb/llampex-mini | refs/heads/master | /engine/config.py | # coding: UTF8
from autoconfig.autoconfig import AutoConfigTemplate, ConfigReader
import sys, os.path
def filepath(): return os.path.abspath(os.path.dirname(__file__))
def filedir(x):
if os.path.isabs(x): return x
else: return os.path.join(filepath(),x)
Config = None
config_filelist = ['config.ini']
class ConfigDatabase(AutoConfigTemplate):
"""
dbname=string:llampex
dbuser=string:llampexuser
dbpasswd=string:llampexpasswd
dbhost=hostname:127.0.0.1
dbport=int:5432
createtables=bool:False
"""
def reloadConfig(saveTemplate = False):
global Config, config_filelist
fullpath_filelist = [ filedir(x) for x in config_filelist ]
if not os.path.isfile(fullpath_filelist[0]):
print "INFO: config.ini not found. Creating one for *you*."
try:
f_out = open(fullpath_filelist[0],"w")
f_in = open(filedir("config.template.ini"),"r")
f_out.write(f_in.read())
f_out.close()
f_in.close()
except Exception, e:
print "WARN: Some error ocurred, try to copy manually config.template.ini to config.ini"
print repr(e)
Config = ConfigReader(files=fullpath_filelist, saveConfig = saveTemplate)
Config.Database = ConfigDatabase(Config,section = "database")
if saveTemplate:
f1w = open(saveTemplate, 'wb')
Config.configini.write(f1w)
f1w.close()
else:
if Config.errors:
print "INFO: ** La configuracion esta desactualizada, ejecute <python config.py update > para actualizarla ** "
return Config
def main():
if len(sys.argv) > 1:
if sys.argv[1] == 'savetemplate':
global config_filelist
config_filelist = []
reloadConfig(saveTemplate = 'config.template.ini')
elif sys.argv[1] == 'update':
reloadConfig(saveTemplate = 'config.ini')
else:
reloadConfig()
print "host:", repr(Config.Database.host), type(Config.Database.host)
print "port:", repr(Config.Database.port), type(Config.Database.port)
if __name__ == "__main__": main()
else: reloadConfig() | {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,408 | gestiweb/llampex-mini | refs/heads/master | /engine/rpc_cursor.py | # encoding: UTF-8
import threading
# UNICODE HANDLING:
# In Python 2, if you want to receive uniformly all your database input in
# Unicode, you can register the related typecasters globally as soon as
# Psycopg is imported:
import psycopg2
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
from bjsonrpc.exceptions import ServerError
from bjsonrpc.handlers import BaseHandler
def tuplenormalization(rows):
retrows = []
for row in rows:
retrow = []
for field in row:
if field is None:
retfield = None
elif type(field) is bool:
retfield = field
else:
retfield = unicode(field)
retrow.append(retfield)
retrows.append(retrow)
return retrows
def withrlock(function):
def lockfn(self,*args,**kwargs):
if self.cur is None: raise ServerError, "Cursor not Open!"
self.rlock.acquire()
ret = function(self,*args,**kwargs)
self.rlock.release()
return ret
lockfn.__name__ = function.__name__
return lockfn
class RPCCursor(BaseHandler):
globaldata = { 'cursornumber' : 1 }
def __init__(self, rpc, prjmanager):
BaseHandler.__init__(self,rpc)
self.pm = prjmanager
cursornumber = self.globaldata['cursornumber']
self.globaldata['cursornumber'] += 1
self.curname = "rpccursor_%04x" % cursornumber
self.cur = self.pm.conn.cursor()
self.rlock = threading.RLock()
@withrlock
def description(self):
"Returns field properties"
return self.cur.description
@withrlock
def fields(self):
"Returns field list"
descrip = self.cur.description
if descrip is None: return None
fields = [l[0] for l in descrip]
return fields
@withrlock
def commit(self):
"Commits the current transaction."
self.pm.conn.commit()
@withrlock
def rollback(self):
"Rollbacks the changes for the current transaction."
self.pm.conn.rollback()
@withrlock
def close(self):
"Closes the cursor."
self.cur.close()
self.pm.cursors.remove(self)
self.cur = None
BaseHandler.close(self)
@withrlock
def execute(self, sql, params = None):
"Executes the specified SQL with given parameters."
if params:
return self.cur.execute(sql,params)
else:
return self.cur.execute(sql)
@withrlock
def selecttable(self, tablename, fieldlist = ["*"], wherelist = [], orderby = [], limit = 5000, offset = 0):
"""
Selects the specified table with the specified columns and filtered with the specified values
returns the field list and some other info.
"""
self._sqlparams = {
'fields' : ",".join(fieldlist),
'table' : tablename,
'limit' : limit,
'offset' : offset
}
txtwhere = []
for where1 in wherelist:
txt = ""
if type(where1) is not dict: raise ServerError, "WhereClauseNotObject"
fieldname = where1.get("fieldname")
op = where1.get("op")
value = where1.get("value")
if not op and not fieldname:
txt = self.cur.mogrify("%s", [value])
elif op in "< > = >= <= LIKE ILIKE IN ~".split():
txt = self.cur.mogrify(fieldname + " " + op + " %s", [value])
else:
raise ServerError, "WhereClauseBadFormatted: " + repr(where1)
if not txt: raise ServerError, "WhereClauseEmpty"
txtwhere.append(txt)
if not txtwhere: txtwhere = ["TRUE"]
self._sqlparams["where"] = " AND ".join(txtwhere)
self.cur.execute("""
SELECT %(fields)s
FROM %(table)s
LIMIT 0
""" % self._sqlparams)
descrip = self.cur.description
if descrip is None: raise ServerError, "UnexpectedQueryError"
self._sqlinfo = {}
self._sqlinfo["fields"] = [l[0] for l in descrip]
if not orderby:
orderby = self._sqlinfo["fields"][:1]
self._sqlparams["orderby"] = ", ".join(orderby)
return self.getmoredata(limit)
@withrlock
def getmoredata(self, amount = 5000):
self._sqlparams["limit"] = amount
sql = """
SELECT COUNT(*) as c FROM (
SELECT 0
FROM %(table)s
WHERE %(where)s
LIMIT %(limit)d OFFSET %(offset)d
) a
""" % self._sqlparams
try:
self.cur.execute(sql)
except Exception:
print "SQL::" + sql
raise
row = self.cur.fetchone()
self._sqlinfo["count"] = row[0]
def thread_moredata():
self.rlock.acquire() # Will wait until parent call finishes.
self.cur.execute("""
SELECT %(fields)s
FROM %(table)s
WHERE %(where)s
ORDER BY %(orderby)s
LIMIT %(limit)d OFFSET %(offset)d
""" % self._sqlparams)
self.rlock.release()
self._sqlparams["offset"] += self._sqlparams["limit"]
th1 = threading.Thread(target = thread_moredata)
th1.start()
return self._sqlinfo
@withrlock
def fetch(self, size=20):
"Fetches many rows. Use -1 or None for querying all available rows."
if size is None or size <= 0:
return tuplenormalization(self.cur.fetchall())
else:
return tuplenormalization(self.cur.fetchmany(size))
@withrlock
def scroll(self, value, mode = 'relative'):
"""Moves the cursor up and down specified by *value* rows. mode can be
set to 'absoulte'. """
try:
return self.cur.scroll(value, mode)
except (psycopg2.ProgrammingError, IndexError), e:
return None
@withrlock
def rowcount(self):
"Returns the count of rows for the last query executed."
return self.cur.rowcount
@withrlock
def rownumber(self):
"Return the row index where the cursor is in a zero-based index."
return self.cur.rownumber
@withrlock
def query(self):
"Return the latest SQL query sent to the backend"
return self.cur.query
@withrlock
def statusmessage(self):
"Return the latest Status message returned by the backend"
return self.cur.query
@withrlock
def copy_from(self,*args):
"Coipes a data set from a file to the server"
raise ServerError, "NotImplementedError"
@withrlock
def copy_to(self,*args):
"Dumps a data set from table to a file"
raise ServerError, "NotImplementedError"
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,409 | gestiweb/llampex-mini | refs/heads/master | /qt4client/manage_dialog.py | from PyQt4 import QtGui, QtCore, uic
import bjsonrpc
from bjsonrpc.exceptions import ServerError
class ManageDialog(QtGui.QDialog):
def __init__(self, conn, ui_filepath, np_filepath):
# Prepare Dialog
QtGui.QDialog.__init__(self)
self.ui = uic.loadUi(ui_filepath,self) # Cargamos un fichero UI externo
# Prepare Connection
self.conn = conn
# Call to the ManageProjects Class
self.manageProjects = self.conn.call.getManageProjects()
# Class attributes
self.forceNoChange = False
self.np_filepath = np_filepath
self.tableProjects = self.ui.table_projects
self.tableUsers = self.ui.table_users
self.comboProjects = self.ui.comboProjects
self.listActive = self.ui.listActive
self.listInactive = self.ui.listInactive
#fill
users = self.manageProjects.call.getUsers()
projects = self.manageProjects.call.getProjects()
self.fillTable(self.tableUsers,users)
self.fillTable(self.tableProjects,projects)
# more class attributes
self.oldUserNames = self.getCol(self.tableUsers,0)
self.oldCodes = self.getCol(self.tableProjects,0)
#Projects/Users Fill
self.fillComboBox()
# signals
self.ui.connect(self.btnExit, QtCore.SIGNAL("clicked(bool)"), self.exit_performed)
self.ui.connect(self.tableUsers, QtCore.SIGNAL("cellChanged(int,int)"), self.tableUsers_cellChanged)
self.ui.connect(self.btnAddUser, QtCore.SIGNAL("clicked(bool)"), self.add_user)
self.ui.connect(self.btnPassUser, QtCore.SIGNAL("clicked(bool)"), self.changePassUser)
self.ui.connect(self.btnDelUser, QtCore.SIGNAL("clicked(bool)"), self.del_user)
self.ui.connect(self.tableProjects, QtCore.SIGNAL("cellChanged(int,int)"), self.tableProjects_cellChanged)
self.ui.connect(self.btnAddProj, QtCore.SIGNAL("clicked(bool)"), self.add_project)
self.ui.connect(self.btnPassProj, QtCore.SIGNAL("clicked(bool)"), self.changePassProject)
self.ui.connect(self.btnDelProj, QtCore.SIGNAL("clicked(bool)"), self.del_project)
self.ui.connect(self.comboProjects, QtCore.SIGNAL("currentIndexChanged(const QString&)"), self.comboProjects_changed)
self.ui.connect(self.btnRight, QtCore.SIGNAL("clicked(bool)"), self.rightPerformed)
self.ui.connect(self.btnLeft, QtCore.SIGNAL("clicked(bool)"), self.leftPerformed)
self.ui.connect(self.btnAllRight, QtCore.SIGNAL("clicked(bool)"), self.allRightPerformed)
self.ui.connect(self.btnAllLeft, QtCore.SIGNAL("clicked(bool)"), self.allLeftPerformed)
def fillTable(self, table, rows):
# prepare space for rows
table.setRowCount(len(rows))
#fill!
i = 0
j = 0
for row in rows:
for cell in row:
if cell is not None:
item = QtGui.QTableWidgetItem(unicode(cell))
if cell == True:
item.setCheckState(QtCore.Qt.Checked)
item.setFlags(QtCore.Qt.ItemFlags(48))
item.setText("")
elif cell == False:
item.setCheckState(QtCore.Qt.Unchecked)
item.setFlags(QtCore.Qt.ItemFlags(48))
item.setText("")
table.setItem(i, j, item)
j+=1
i+=1
j=0
def showMessageBox(self,title,text,icon):
msgBox = QtGui.QMessageBox()
msgBox.setText(text)
msgBox.setWindowTitle(title)
msgBox.setIcon(icon)
msgBox.exec_()
def getRow(self,table,row):
#returns a list with the items of a row
result = []
for i in range(table.columnCount()):
item = table.item(row,i)
if QtCore.Qt.ItemFlags(48) == item.flags():
#boolean
if item.checkState() == 2:
result.append(True)
else:
result.append(False)
else:
#text
result.append(unicode(item.text()))
return result
def getCol(self,table,col):
#returns a list with the items of a column
result = []
for i in range(table.rowCount()):
item = table.item(i,col)
if QtCore.Qt.ItemFlags(48) == item.flags():
#boolean
if item.checkState() == 2:
result.append(True)
else:
result.append(False)
else:
#text
result.append(unicode(item.text()))
return result
###################### USERS ######################
def tableUsers_cellChanged(self,row,col):
if not self.forceNoChange:
#if is a name, validate
validate = True
if col == 0:
name = unicode(self.tableUsers.item(row,col).text())
listOfNames = self.getCol(self.tableUsers,col)
del listOfNames[row]
if name in listOfNames:
self.showMessageBox("Error","The name can't be repeated",QtGui.QMessageBox.Warning)
print "Error: The name can't be repeated"
validate = False
if not validate:
self.tableUsers.item(row,col).setText(self.oldUserNames[row])
else:
user = self.getRow(self.tableUsers,row)
user.append(self.oldUserNames[row])
#rpc
if not self.manageProjects.call.modifyUser(user):
self.showMessageBox("Fatal Error","There is an unexpected error in the database. Exiting...",QtGui.QMessageBox.Critical)
self.close()
# renovate the oldUserNames
self.oldUserNames = self.getCol(self.tableUsers,0)
# renovate ProjectsUsers
self.fillComboBox()
def add_user(self, b):
# Add a new User
validate = True
itemActived = QtGui.QTableWidgetItem("")
itemAdmin = QtGui.QTableWidgetItem("")
items = QtCore.QStringList()
items.append("Yes")
items.append("No")
name, ok = QtGui.QInputDialog.getText(self, 'New User', 'Enter user name:')
if ok:
if name == "":
self.showMessageBox("Error","The name is required",QtGui.QMessageBox.Warning)
print "Error: The name is required"
validate = False
else:
listOfNames = self.getCol(self.tableUsers,0)
if name in listOfNames:
self.showMessageBox("Error","This username already exists",QtGui.QMessageBox.Warning)
print "Error: The name can't be repeated"
validate = False
else:
validate = False
if validate:
password, ok = QtGui.QInputDialog.getText(self, 'New User', 'Enter password:', QtGui.QLineEdit.Password)
if ok:
if password == "":
self.showMessageBox("Error","The password is required",QtGui.QMessageBox.Warning)
print "Error: The password is required"
validate = False
else:
validate = False
if validate:
active, ok = QtGui.QInputDialog.getItem(self, "New User", "User is actived?", items, 0, False)
if ok:
itemActived.setFlags(QtCore.Qt.ItemFlags(48))
if active == "Yes":
active = True
itemActived.setCheckState(QtCore.Qt.Checked)
else:
active = False
itemActived.setCheckState(QtCore.Qt.Unchecked)
else:
validate = False
if validate:
admin, ok = QtGui.QInputDialog.getItem(self, "New User", "User is administrator?", items, 0, False)
if ok:
itemAdmin.setFlags(QtCore.Qt.ItemFlags(48))
if admin == "Yes":
admin = True
itemAdmin.setCheckState(QtCore.Qt.Checked)
else:
admin = False
itemAdmin.setCheckState(QtCore.Qt.Unchecked)
else:
validate = False
if validate: #All right!
#rpc!
if not self.manageProjects.call.newUser(unicode(name),unicode(password),active,unicode(admin)):
self.showMessageBox("Fatal Error","There is an unexpected error in the database. Exiting...",QtGui.QMessageBox.Critical)
self.close()
# renovate the oldUserNames
self.oldUserNames.append(unicode(name))
# Add to table
self.forceNoChange = True
self.tableUsers.insertRow(self.tableUsers.rowCount())
self.tableUsers.setItem(self.tableUsers.rowCount()-1,0,QtGui.QTableWidgetItem(unicode(name)))
self.tableUsers.setItem(self.tableUsers.rowCount()-1,1,itemActived)
self.tableUsers.setItem(self.tableUsers.rowCount()-1,2,itemAdmin)
self.showMessageBox("Info","User added correctly",QtGui.QMessageBox.Information)
self.forceNoChange = False
# renovate ProjectsUsers
self.fillComboBox()
def changePassUser(self, b):
row = self.tableUsers.currentRow()
if row == -1:
self.showMessageBox("Change Password","You must select a user",QtGui.QMessageBox.Warning)
else:
name = unicode(self.tableUsers.item(row,0).text())
password, ok = QtGui.QInputDialog.getText(self, 'Change Password', 'Enter new password for '+name+':', QtGui.QLineEdit.Password)
if ok and password != "":
if not self.manageProjects.call.modifyUserPass(unicode(name),unicode(password)):
self.showMessageBox("Fatal Error","There is an unexpected error in the database. Exiting...",QtGui.QMessageBox.Critical)
self.close()
self.showMessageBox("Info","Password changed correctly",QtGui.QMessageBox.Information)
def del_user(self, b):
row = self.tableUsers.currentRow()
if row == -1:
self.showMessageBox("Delete User","You must select a user",QtGui.QMessageBox.Warning)
else:
name = unicode(self.tableUsers.item(row,0).text())
ok = QtGui.QMessageBox.question(self, "Delete User", "Are you sure you want to delete "+name+"?", 1, 2)
if ok == 1:
#rpc
if not self.manageProjects.call.delUser(name):
self.showMessageBox("Fatal Error","There is an unexpected error in the database. Exiting...",QtGui.QMessageBox.Critical)
self.close()
# delete the row
self.forceNoChange = True
self.tableUsers.removeRow(row)
self.forceNoChange = False
# renovate the oldUserNames
self.oldUserNames = self.getCol(self.tableUsers,0)
self.showMessageBox("Info","User deleted correctly",QtGui.QMessageBox.Information)
# renovate ProjectsUsers
self.fillComboBox()
###################### PROJECTS ######################
def tableProjects_cellChanged(self,row,col):
if not self.forceNoChange:
validate = True
#if is a code, validate
if col == 0:
code = unicode(self.tableProjects.item(row,col).text())
listOfCodes = self.getCol(self.tableProjects,col)
del listOfCodes[row]
if code in listOfCodes:
self.showMessageBox("Error","The code can't be repeated",QtGui.QMessageBox.Warning)
print "Error: The code can't be repeated"
validate = False
self.tableProjects.item(row,col).setText(self.oldCodes[row])
#if is a port, validate Integer
if col == 5:
try:
int(self.tableProjects.item(row,col).text())
except ValueError:
self.showMessageBox("Error","The port must be a number",QtGui.QMessageBox.Warning)
validate = False
self.forceNoChange = True
self.tableProjects.item(row,col).setText("0")
self.forceNoChange = False
if validate:
proj = self.getRow(self.tableProjects,row)
proj.append(self.oldCodes[row])
#rpc
if not self.manageProjects.call.modifyProject(proj):
self.showMessageBox("Fatal Error","There is an unexpected error in the database. Exiting...",QtGui.QMessageBox.Critical)
self.close()
# renovate the oldCodes
self.oldCodes = self.getCol(self.tableProjects,0)
# renovate ProjectsUsers
self.fillComboBox()
def add_project(self, b):
newWindow = NewProjectDialog(self)
newWindow.show()
def changePassProject(self,b):
row = self.tableProjects.currentRow()
if row == -1:
self.showMessageBox("Change Password","You must select a project",QtGui.QMessageBox.Warning)
else:
code = unicode(self.tableProjects.item(row,0).text())
password, ok = QtGui.QInputDialog.getText(self, 'Change Password', 'Enter new password for '+code+' project:', QtGui.QLineEdit.Password)
if ok and password != "":
if not self.manageProjects.call.modifyProjPass(unicode(code),unicode(password),"None"):
self.showMessageBox("Fatal Error","There is an unexpected error in the database. Exiting...",QtGui.QMessageBox.Critical)
self.close()
self.showMessageBox("Info","Password changed correctly",QtGui.QMessageBox.Information)
def del_project(self,b):
row = self.tableProjects.currentRow()
if row == -1:
self.showMessageBox("Delete Project","You must select a project",QtGui.QMessageBox.Warning)
else:
code = unicode(self.tableProjects.item(row,0).text())
ok = QtGui.QMessageBox.question(self, "Delete Project", "Are you sure you want to delete "+code+"?", 1, 2)
if ok == 1:
#rpc
if not self.manageProjects.call.delProject(code):
self.showMessageBox("Fatal Error","There is an unexpected error in the database. Exiting...",QtGui.QMessageBox.Critical)
self.close()
# delete the row
self.forceNoChange = True
self.tableProjects.removeRow(row)
self.forceNoChange = False
# renovate the oldUserNames
self.oldCodes = self.getCol(self.tableProjects,0)
self.showMessageBox("Info","Project deleted correctly",QtGui.QMessageBox.Information)
# renovate ProjectsUsers
self.fillComboBox()
###################### USERS/PROJECTS ######################
def fillComboBox(self):
self.comboProjects.clear()
self.comboProjects.addItems(self.oldCodes)
# If exists any project, fill the Lists
if self.comboProjects.maxCount() != 0:
self.fillListsUsers(self.comboProjects.itemText(0))
def fillListsUsers(self, project):
self.fillActiveUsers(project)
self.fillInactiveUsers(project)
def fillActiveUsers(self, project):
self.listActive.clear()
self.listActive.addItems(self.manageProjects.call.getActiveUsers(unicode(project)))
def fillInactiveUsers(self, project):
self.listInactive.clear()
self.listInactive.addItems(self.manageProjects.call.getInactiveUsers(unicode(project)))
def comboProjects_changed(self, project):
# refill the Lists
self.fillListsUsers(project)
def addUserToProject(self,item):
user = unicode(item.text())
code = unicode(self.comboProjects.currentText())
#rpc
if not self.manageProjects.call.addUserToProject(user,code):
self.showMessageBox("Fatal Error","There is an unexpected error in the database. Exiting...",QtGui.QMessageBox.Critical)
self.close()
def delUserFromProject(self,item):
user = unicode(item.text())
code = unicode(self.comboProjects.currentText())
#rpc
if not self.manageProjects.call.delUserFromProject(user,code):
self.showMessageBox("Fatal Error","There is an unexpected error in the database. Exiting...",QtGui.QMessageBox.Critical)
self.close()
def rightPerformed(self,b):
i = 0
while i <= self.listActive.count():
if self.listActive.isItemSelected(self.listActive.item(i)):
item = self.listActive.takeItem(i)
self.delUserFromProject(item)
#move item
self.listInactive.addItem(item)
else:
i+=1
def leftPerformed(self,b):
i = 0
while i <= self.listInactive.count():
if self.listInactive.isItemSelected(self.listInactive.item(i)):
item = self.listInactive.takeItem(i)
self.addUserToProject(item)
#move item
self.listActive.addItem(item)
else:
i+=1
def allRightPerformed(self,b):
for i in range(self.listActive.count()):
item = self.listActive.takeItem(0)
if not item == None:
self.delUserFromProject(item)
#move item
self.listInactive.addItem(item)
def allLeftPerformed(self,b):
for i in range(self.listInactive.count()):
item = self.listInactive.takeItem(0)
if not item == None:
self.addUserToProject(item)
#move item
self.listActive.addItem(item)
def exit_performed(self, b):
# return to login?
self.close()
class NewProjectDialog(QtGui.QDialog):
def __init__(self, manageForm):
self.manageForm = manageForm
# Prepare Dialog
QtGui.QDialog.__init__(self)
self.ui = uic.loadUi(self.manageForm.np_filepath,self) # Cargamos un fichero UI externo
# Prepare Connection
self.manageProjects = self.manageForm.manageProjects
self.manageForm.setEnabled(False)
self.ui.connect(self.btnCancel, QtCore.SIGNAL("clicked(bool)"), self.cancel_performed)
self.ui.connect(self.btnSave, QtCore.SIGNAL("clicked(bool)"), self.save_performed)
self.ui.connect(self, QtCore.SIGNAL("finished(int)"), self.exit_performed)
def save_performed(self, b):
#reiniciem l'error
self.errorLabel.setText("")
if self.code.text() == "" or self.name.text() == "" or self.db.text() == "" or self.path.text() == "":
self.errorLabel.setText("Fields with * must be filled.")
elif self.code.text() in self.manageForm.getCol(self.manageForm.tableProjects,0):
self.errorLabel.setText("The project code already exists.")
else: #All right!
active = True if (self.active.currentText() == "Yes") else False
itemActived = QtGui.QTableWidgetItem("")
itemActived.setFlags(QtCore.Qt.ItemFlags(48))
if active == True:
itemActived.setCheckState(QtCore.Qt.Checked)
else:
itemActived.setCheckState(QtCore.Qt.Unchecked)
#rpc!
if not self.manageProjects.call.newProject(unicode(self.code.text()), unicode(self.name.text()),
unicode(self.db.text()), unicode(self.path.text()), unicode(self.host.text()), self.port.value(),
unicode(self.user.text()), unicode(self.password.text()), unicode(self.encrypt.currentText()), active):
self.showMessageBox("Fatal Error","There is an unexpected error in the database. Exiting...",QtGui.QMessageBox.Critical)
self.close()
# renovate the oldCodes
self.manageForm.oldCodes.append(unicode(self.code.text()))
# renovate ProjectsUsers
self.manageForm.fillComboBox()
# Add to table
self.manageForm.forceNoChange = True
self.manageForm.tableProjects.insertRow(self.manageForm.tableProjects.rowCount())
self.manageForm.tableProjects.setItem(self.manageForm.tableProjects.rowCount()-1,0,QtGui.QTableWidgetItem(unicode(self.code.text())))
self.manageForm.tableProjects.setItem(self.manageForm.tableProjects.rowCount()-1,1,QtGui.QTableWidgetItem(unicode(self.name.text())))
self.manageForm.tableProjects.setItem(self.manageForm.tableProjects.rowCount()-1,2,QtGui.QTableWidgetItem(unicode(self.db.text())))
self.manageForm.tableProjects.setItem(self.manageForm.tableProjects.rowCount()-1,3,QtGui.QTableWidgetItem(unicode(self.path.text())))
self.manageForm.tableProjects.setItem(self.manageForm.tableProjects.rowCount()-1,4,QtGui.QTableWidgetItem(unicode(self.host.text())))
self.manageForm.tableProjects.setItem(self.manageForm.tableProjects.rowCount()-1,5,QtGui.QTableWidgetItem(unicode(self.port.value())))
self.manageForm.tableProjects.setItem(self.manageForm.tableProjects.rowCount()-1,6,QtGui.QTableWidgetItem(unicode(self.user.text())))
self.manageForm.tableProjects.setItem(self.manageForm.tableProjects.rowCount()-1,7,itemActived)
self.manageForm.showMessageBox("Info","Project added correctly",QtGui.QMessageBox.Information)
self.manageForm.forceNoChange = False
# close addProject form
self.manageForm.setEnabled(True)
self.close()
def cancel_performed(self, b):
self.manageForm.setEnabled(True)
self.close()
def exit_performed(self, i):
self.manageForm.setEnabled(True)
self.close()
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,410 | gestiweb/llampex-mini | refs/heads/master | /engine/model/__init__.py | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import MetaData
Base = declarative_base()
from table_projects import RowProject
from table_users import RowUser
from table_projectusers import RowProjectUser
from table_userconfigs import RowUserConfig
metadata = Base.metadata
engine = None
session = None | {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,411 | gestiweb/llampex-mini | refs/heads/master | /qt4client/widgets/llampextable.py | #!/usr/bin/env python
# encoding: UTF-8
from PyQt4 import QtCore, QtGui
import sys, time, gc
class LlampexTable(QtGui.QTableView):
def __init__(self):
QtGui.QTableView.__init__(self)
self.contextmenu = QtGui.QMenu("Properties")
self.contextmenu.addAction("Configure server filters")
self.contextmenu.addAction("Configure server orderby")
self.contextmenu.addAction("Show/hide columns")
self.contextmenu.addAction("x")
self.connect(self.contextmenu, QtCore.SIGNAL("triggered(QAction*)"), self.contextmenu_triggered)
self.model = QtGui.QStandardItemModel()
self.setModel(self.model)
self.model.setColumnCount(15)
for i in range(250):
self.model.appendRow( [ QtGui.QStandardItem("z%d:%d" % (x,i)) for x in range(5) ] )
self.ncount = 5000
def contextmenu_triggered(self, action):
print "Activated action:", unicode(action.text())
if unicode(action.text()) == "x":
del self.model
self.close()
return
self.ncount *= 2
t1 = time.time()
rows = self.model.rowCount()
startrow = rows
rows += self.ncount
self.model.setRowCount(rows)
z = 0
for i in range(self.ncount):
for x in range(15):
z+=1
mytext = "x%d:%d:%d" % (i,x,z)
item = QtGui.QStandardItem(mytext)
self.model.setItem( i+startrow , x , item )
print "Added %d rows in %.3fms. (%.2f cells / s)" % (self.ncount, (time.time() - t1)*1000.0, z / (time.time() - t1))
def contextMenuEvent(self,event):
print "Context menu:", event.pos()
self.contextmenu.popup(event.globalPos())
class MyDialog(QtGui.QDialog):
def __init__(self):
QtGui.QDialog.__init__(self)
self.setWindowTitle("Test Llampex Table")
self.layout = QtGui.QVBoxLayout()
self.table = LlampexTable()
self.layout.addWidget(self.table)
self.table.parent = self
self.setLayout(self.layout)
self.resize(500,300)
self.timer = QtCore.QTimer()
self.connect(self.timer, QtCore.SIGNAL("timeout()"), self.timer_timeout)
self.timer.start(3000)
def timer_timeout(self):
print "collect!" , gc.collect()
xl = gc.get_objects()
cnt = {}
problem = []
for x in xl:
tx = type(x)
if tx not in cnt: cnt[tx] = 0
cnt[tx] += 1
for tx in cnt:
if cnt[tx] > 1000:
problem.append(tx)
print tx, cnt[tx]
print len(cnt), len(xl)
def main():
gc.disable()
app = QtGui.QApplication(sys.argv)
dialog = MyDialog()
dialog.show()
retval = app.exec_()
sys.exit(retval)
if __name__ == "__main__":
main()
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,412 | gestiweb/llampex-mini | refs/heads/master | /engine/model/table_userconfigs.py | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, Boolean
from sqlalchemy.orm import relation as relationship
from sqlalchemy.orm import backref
from . import Base
class RowUserConfig(Base):
__tablename__ = 'userconfigs'
id = Column(Integer, primary_key=True)
user = Column(String(64))
project = Column(String(64))
configname = Column(String(128))
value = Column(String)
def __str__(self):
return "<RowUserConfig(%s) projectcode=%s username=%s configname=%s>" % (
repr(self.id),
repr(self.project),
repr(self.user),
repr(self.configname)
)
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
71,413 | gestiweb/llampex-mini | refs/heads/master | /engine/project_manager.py | # encoding: UTF-8
import psycopg2
import model
import binascii
import hashlib, threading
import os , os.path, bz2, zlib
import yaml
from bjsonrpc.exceptions import ServerError
from bjsonrpc.handlers import BaseHandler
from base64 import b64encode, b64decode
import rpc_cursor
import qsqlrpcdriver.servercursor as servercursor
verbose = False
b64digest_filecache = {}
b64digest_namecache = {}
def get_b64digest(text):
bindigest = hashlib.sha1(text).digest()
b64digest = b64encode(bindigest)[:20]
return b64digest
def get_file_b64digest(filename, name):
mtime = os.stat(filename).st_mtime
if filename in b64digest_filecache:
mtime2, b64digest2 = b64digest_filecache[filename]
if mtime == mtime2:
return b64digest2
f1 = open(filename)
filetext = f1.read()
text_b64digest = get_b64digest(filetext)
name_b64digest = get_b64digest(name)
b64digest = get_b64digest(name_b64digest + text_b64digest)
b64digest_namecache[b64digest] = {'name': name, 'digest':text_b64digest}
b64digest_filecache[filename] = (mtime, b64digest )
return b64digest
class HashTable(BaseHandler):
def __init__(self, rpc):
BaseHandler.__init__(self,rpc)
self.hashlist = []
self.index = {}
self.index_maxdepth = 2
self.b64tr1 = "+/0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
self.b64tr2 = {}
for n,c in enumerate(list(self.b64tr1)):
self.b64tr2[c] = n/8
def add(self,key):
self.hashlist.append(key)
self._addindex(key, key, self.index)
def _addindex(self, key, value, index, depth = 1):
k = key[0]
if depth < self.index_maxdepth:
if k not in index:
index[k] = {}
self._addindex(key[1:], value, index[k], depth + 1)
else:
if k not in index:
index[k] = []
index[k].append(value)
def _get_hash_index(self,index):
if type(index) is list:
return index
if type(index) is dict:
retlist = []
for k,ilist in index.iteritems():
retlist += self._get_hash_index(ilist)
return retlist
def _get_hash_list(self, key, index, pos = 0):
if len(key) <= pos:
return self._get_hash_index(index)
if type(index) is list:
return [ ilist for ilist in index if ilist.startswith(key) ]
if type(index) is dict:
k = key[pos]
if k not in index: return []
else:
return self._get_hash_list(key,index[k],pos+1)
def _get_hash_options(self, key, index, pos = 0):
if len(key) > pos: # advance to find the correct options.
if type(index) is dict:
k = key[pos]
if k not in index: return []
return self._get_hash_options(key,index[k],pos+1)
# final position, or end-of-index, return possible option-letters.
if type(index) is dict: # final position and not endofindex.
return sorted(index.keys())
# end of index. manually get choices or return empty.
return []
def _get_node_hash_list(self, keylist = [""], style = "dict", hashsize = 10, hashoffset = 0):
hashlist = set([])
for key in keylist:
hashes = set(self._get_hash_list(key, self.index))
hashlist|= hashes
if style == "dict":
hashdict = {}
for mhash in hashlist:
hashdict[mhash] = b64digest_namecache[mhash]
return hashdict
elif style == "list":
hlist = [ mhash[hashoffset:hashoffset+hashsize] for mhash in sorted(list(hashlist))]
return hlist
else: raise ValueError, "unknown style."
def getNodeSignature(self, parentkey = "", hashsize = 20, hashoffset = 0):
return self._get_signature([parentkey],"list",hashsize,hashoffset)[0]
def getChild8Signature(self, parentkey = "", hashsize = 10, hashoffset = 0):
hashoptions = self._get_hash_options(parentkey, self.index)
keylist = [ parentkey + opt for opt in hashoptions ]
signdict = self._get_signature(keylist,"dict",hashsize,hashoffset)
child8 = [[],[],[],[],[],[],[],[]]
for k, val in signdict.iteritems():
k8 = self.b64tr2[k[-1]]
child8[k8].append(val)
child8_2 = []
for v in child8:
child8_2.append(get_b64digest("".join(v)))
return child8_2
def getChildSignature(self, parentkey = "", hashsize = 10, hashoffset = 0):
hashoptions = self._get_hash_options(parentkey, self.index)
keylist = [ parentkey + opt for opt in hashoptions ]
return self._get_signature(keylist,"dict",hashsize,hashoffset)
def getNodeHashList(self, keylist, hashsize = 10, hashoffset = 0):
return self._get_node_hash_list(keylist, "list", hashsize, hashoffset)
def getNodeHashValue(self, keylist):
if isinstance(keylist,basestring):
keylist = [keylist]
return self._get_node_hash_list(keylist, "dict",20,0)
def _get_signature(self, keylist = [""], style = "list", hashsize = 20, hashoffset = 0):
retlist = []
for key in keylist:
pos = len(key)
hashlist = self._get_hash_list(key, self.index)
hashoptions = self._get_hash_options(key, self.index)
digest = get_b64digest("".join(hashlist))[hashoffset:hashoffset+hashsize]
size = len(hashlist)
retlist.append([key,size,digest])
if style == "list":
return retlist
elif style == "text":
textdigest = ""
for key,size,digest in retlist:
textdigest += key+digest
return textdigest
elif style == "dict":
textdigest = {}
for key,size,digest in retlist:
textdigest[key] = digest
return textdigest
else: raise ValueError, "unknown style."
class ProjectManager(BaseHandler):
def __init__(self, rpc, prj, user, conn):
BaseHandler.__init__(self,rpc)
self.data = prj
self.path = prj.path
self.user = user
self.conn = conn
self.rpc = rpc
self.cachehashsize = 4
self.cachehashoffset = 0
self.filelist = {}
#self.filehash = {}
#self.filecache = {}
#self.treecache = {}
self.b64list = HashTable(self.rpc)
self.is_loaded = False
self.load_thread = threading.Thread(target=self._load)
self.load_thread.start()
self.cursors = []
def newCursor(self):
newcur = rpc_cursor.RPCCursor(self.rpc, self)
self.cursors.append(newcur)
return newcur
def getCursor(self):
return servercursor.CursorSQL(self)
def isLoaded(self):
return self.is_loaded
def _load(self):
print "Loading . . . " , self.data
digests = set([])
for root, dirs, files in os.walk(self.path):
relroot = root[len(self.path):]
if relroot.startswith("/"):
relroot = relroot[1:]
if relroot == "": relroot = "."
for name in files[:]:
if name.startswith("."):
files.remove(name)
for name in dirs[:]:
if name.startswith("."):
dirs.remove(name)
if files:
for name in files:
if name.endswith(".llampexcache"): continue
fullpath = os.path.join(root,name)
key = relroot+"/"+name
self.filelist[key] = fullpath
b64digest = get_file_b64digest(fullpath, name = key)
self.b64list.add(b64digest)
self.is_loaded = True
print "project loaded."
"""
print
from bjsonrpc.jsonlib import dumps
print
print "** Get global signature and options :::"
print dumps( self.b64list.getNodeSignature(), self._conn)
print
print "** Get signature for each 1/64 part :::"
print dumps( self.b64list.getChildSignature() , self._conn)
print
print "** Get signature for each 1/8 part for '+' :::"
print dumps( self.b64list.getChild8Signature("+") , self._conn)
print
print "** Get all hashes for '+%' part 0 :::"
print dumps( self.b64list.getNodeHashList([ "+" + opt for opt in "+/012345"]) , self._conn)
print
print "** Get all hashes for 'zc%' 'zD%' :::"
print dumps( self.b64list.getNodeHashValue(["zc","zD"]) , self._conn)
print
"""
def getFileName(self,filename):
if filename not in self.filelist:
return None
fullpath = self.filelist[filename]
pathhead, pathtail = os.path.split(fullpath)
cachepath = os.path.join(pathhead, ".%s.llampexcache" % pathtail)
mtime = os.stat(fullpath).st_mtime
try:
mtime2 = os.stat(cachepath).st_mtime
if mtime2 < mtime: raise ValueError
f1 = open(cachepath)
zipcontent = f1.read()
f1.close()
except Exception:
print "creating cache for", filename
filecontent = open(fullpath).read()
zipcontent = bz2.compress(filecontent,9)
f1 = open(cachepath,"w")
f1.write(zipcontent)
f1.close()
b64content = b64encode(zipcontent)
return b64content
def getFileTree(self):
return self.b64list
def getDirectLinks(self):
links = None
try:
config = model.session.query(
model.RowUserConfig).filter(model.RowUserConfig.user==self.user).filter(
model.RowUserConfig.project==self.data.code).filter(
model.RowUserConfig.configname=="directlinks").one()
links = yaml.load(config.value)
except:
links = []
return links
def updateDirectLinks(self,links):
config = None
try:
config = model.session.query(
model.RowUserConfig).filter(model.RowUserConfig.user==self.user).filter(
model.RowUserConfig.project==self.data.code).filter(
model.RowUserConfig.configname=="directlinks").one()
except:
config = model.RowUserConfig()
config.user = self.user
config.project = self.data.code
config.configname = "directlinks"
model.session.add(config)
config.value = yaml.dump(links)
model.session.commit()
def connect_project(rpc,project,username):
print "connecting to project", project
print "as user", username
if project.host is None: project.host = model.engine.url.host
if project.port is None: project.port = model.engine.url.port
if project.user is None: project.user = model.engine.url.username
if project.password is None: project.password = model.engine.url.password
try:
conn = psycopg2.connect(
database = project.db,
user = project.user,
password = project.password,
host = project.host,
port = project.port
)
if not conn:
raise ValueError, "connection invalid!"
except psycopg2.OperationalError, e:
print e.__class__.__name__, repr(e.args[:])
raise ServerError,"DatabaseConnectionError"
except Exception, e:
print e.__class__.__name__, repr(e.args[:])
raise ServerError, "Some unknown error ocurred trying to connect to the project. Check logs at server."
projectmanager = ProjectManager(rpc, project, username, conn)
return projectmanager
def validate_password(userpass, dbpass):
hashmethod, hashsalt, hashdigest = dbpass.split("$")
if hashmethod in ("md5","sha1"):
binsalt = binascii.a2b_hex(hashsalt)
userdigest = compute_password_hexhash(userpass, hashmethod, binsalt)
else:
print "Unknown hashmethod %s" % repr(hashmethod)
return False
if userdigest == hashdigest: return True
if verbose:
print "Password validation failed:"
print "User supplied:", userdigest
print "Database supplied:", hashdigest
return False
def compute_password(userpass, hashmethod = "sha1", saltsize = 4):
hashsalt = hashlib.sha1(userpass + hashmethod + os.urandom(32)).hexdigest()[:saltsize*2]
if hashmethod in ("md5","sha1"):
binsalt = binascii.a2b_hex(hashsalt)
userdigest = compute_password_hexhash(userpass, hashmethod, binsalt)
else:
print "Unknown hashmethod %s" % repr(hashmethod)
return False
return "$".join([hashmethod,hashsalt,userdigest])
def compute_password_hexhash(userpass, hashmethod, binsalt):
saltedpass = binsalt + str(userpass)
m = None
if hashmethod == "md5":
m = hashlib.md5()
elif hashmethod == "sha1":
m = hashlib.sha1()
else:
raise ValueError, "Unsupported hashmethod '%s' " % repr(hashmethod)
m.update(saltedpass)
userdigest = m.hexdigest()
return userdigest
| {"/engine/model/table_users.py": ["/engine/model/__init__.py"], "/engine/model/table_projectusers.py": ["/engine/model/__init__.py"], "/engine/model/table_userconfigs.py": ["/engine/model/__init__.py"], "/engine/model/table_projects.py": ["/engine/model/__init__.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.