text
stringlengths
38
1.54M
def str_together(in_string): """Given a list of word strings, return a single string with all the strings together, with ' - ' in between the words.""" return " - ".join(map(str,in_string)) fruits = ['Orange', 'Lemon', 'Lime', 'Cherry', 'Peach', 'Apricot'] fruit_string = str_together(fruits) print(fruit_string)
import commands import sys from Utility import ( wlog) def Estimate_Cutoff_and_Annotate_Artifact(conf_dict,logfile): format_FPR_file = conf_dict['General']['outputdirectory']+'Format_False_Positive_Table.txt' user_Max_FPR_cutoff = conf_dict['General']['max_fpr'] all_samples_MQS_WCQS_file = conf_dict['General']['outputdirectory']+'Samples_MQS_and_WeightedCombinedQualityScore.txt' meet_criteria_list = [] infile = open(format_FPR_file,'r') header = infile.readline() line = infile.readline() while(line): FPR = float(line.split()[-1]) if(FPR <= float(user_Max_FPR_cutoff)): fraction_artifacts = float(line.split()[2]) meet_criteria_list.append([fraction_artifacts,line]) else: pass line = infile.readline() infile.close() if(len(meet_criteria_list)>0): meet_criteria_list.sort() meet_criteria_list.reverse() outfile = open(conf_dict['General']['outputdirectory']+'Real_cutoff.txt','w') outfile.write(header) outfile.write(meet_criteria_list[0][1]) outfile.close() MQS_Cutoff = float(meet_criteria_list[0][1].split()[0].split('(')[1].split(')')[0].split(',')[0]) WCQS_Cutoff = float(meet_criteria_list[0][1].split()[0].split('(')[1].split(')')[0].split(',')[1]) infile = open(all_samples_MQS_WCQS_file,'r') header = infile.readline() header = header.rstrip()+'\t'+'QC'+'\n' outfile = open(conf_dict['General']['outputdirectory']+'MyQC_All_Samples_QC_information.txt','w') outfile.write(header) line = infile.readline() while(line): outfile.write(line.rstrip()+'\t') type = line.split()[1] QC = '' if(type=='MainPopulationCell'): QC = 'PASS' elif(type=='GeneExpressionOutlier'): MQS = float(line.split()[-2]) WCQS = float(line.split()[-1]) if(MQS <= MQS_Cutoff or WCQS <= WCQS_Cutoff): QC = 'Artifact' else: QC = 'PASS' else: wlog('Error',logfile) outfile.write(QC+'\n') line = infile.readline() infile.close() outfile.close() else: outfile = open(conf_dict['General']['outputdirectory']+'Real_cutoff.txt','w') outfile.write('None of MQS_WCQS_meet_criteria'+'\n') outfile.close()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse class Formatter(argparse.HelpFormatter): def _split_lines(self, text, width): if text.startswith('R|'): return text[2:].splitlines() # this is the RawTextHelpFormatter._split_lines return argparse.HelpFormatter._split_lines(self, text, width)
import helper import gold def parse_content(content): '''Convert input into a dictionary word: number''' content_dict = {word: float(freq) for string in content.splitlines() for word, freq in [string.split()]} return content_dict def make_tree(words): '''Creates trie-like nested dictionary''' words_trie ={} for key, value in words.items(): for i, char in enumerate(key): if char not in words_trie: words_trie[char] = set() words_trie = words_trie[char] words_trie['$' + key] = value return words_trie def predict(tree, numbers): return {} if __name__ == '__main__': content = helper.read_content(filename='ngrams-10k.txt') # When you've finished implementing a part, remove the `gold.` prefix to check your own code. # PART 1: Parsing a string into a dictionary. words = gold.parse_content(content) # PART 2: Building a trie from a collection of words. tree = gold.make_tree(words) while True: # PART 3: Predict words that could follow numbers = helper.ask_for_numbers() predictions = gold.predict(tree, numbers) if not predictions: print('No words were found that match those numbers. :(') else: for prediction, frequency in predictions[:10]: print(prediction, frequency) response = input('Want to go again? [y/N] ') again = response and response[0] in ('y', 'Y') if not again: break
from bs4 import BeautifulSoup from selenium import webdriver stock_file = r"C:\Users\Tut10\Desktop\PSTool-Python\Final\test.txt" def Costco_Shipping(url): """This opens up chrome. Scans through the webpage for the out of stock class prints out if in stock or out and writes it to file. Pretty simple """ driver = webdriver.Chrome(r"C:\Users\Tut10\Desktop\PSTool-Python\chromedriver.exe") driver.get(url) file_to_write = open(stock_file, "a") try: x = driver.find_element_by_class_name("out-of-stock") if x: driver.quit() file_to_write.write("[-] Out of stock " + url + '\n') file_to_write.close() return "\t\t[-] Out of Stock" except: driver.quit() file_to_write.close() return "\t\t[+] In Stock" def Home_Depot_Shipping(url): """This opens up chrome. Scans through the webpage for the id buybelt and then creates a list of all the items in buybelt with the class u__text--sucess. Then it chooses the second one in the list, the correct one shipping, not pickup or delivery and saves it in a variable called shipping """ driver = webdriver.Chrome(r"C:\Users\Tut10\Desktop\PSTool-Python\chromedriver.exe") driver.get(url) # open a file to write to file_to_write = open(stock_file, "a") # send that file to me via email or text free_delivery = driver.find_elements_by_xpath(r'//*[@id="buybelt"]/div[2]/div[2]/div/div[2]') # Now it checks both for it try: if "Free Delivery" in free_delivery[0].text: driver.quit() file_to_write.close() return "\t\t[+] Free Delivery" elif "Get it as soon as tomorrow" or "Schedule delivery to your home or jobsite" in free_delivery[0].text: driver.quit() #file_to_write.write("Cell: " + str(cell) + "[*] Express Delivery " + url + '\n') file_to_write.write("[*] Express Delivery " + url + "\n") file_to_write.close() return "\t\t[*] Express Delivery" else: driver.quit() file_to_write.write("[-] Out of stock %s\n" % url) file_to_write.close() return "\t\t[-] Out of stock!" except Exception as e: driver.quit() print(e) def Kohls_Shipping(url): pass def BedBathBeyond_Shipping(url): pass
from django.test import TestCase from mayerapi.models import Payment,Loan,Client from django.db.utils import IntegrityError from datetime import datetime from decimal import Decimal import os.path from mayerapi.tests.utils import( create_client_from_model, create_loan_from_model, create_payment_from_model ) class TestPaymentModel(TestCase): def setUp(self) -> None: client=create_client_from_model loan=create_loan_from_model Payment=create_payment_from_model def test_payment_instance(self): expected_payment="made" expected_date='2022-01-03' expected_amount=400 self.assertIsInstance(self.payment,Payment) self.assertIsInstance(self.payment.loan,Loan) self.assertEqual(expected_amount=self.payment.amount) self.assertEqual(expected_loan=self.payment.loan) self.assertEqual(expected_date=self.payment.date) def test_validate_raises_exception(self): with self.assertRaises(ValueError): self.payment.validate() def test_payment__str__(self): self.assertEqual(str(self.payment),str(self.payment.id)) class TestLoanModel(TestCase): def setUp(self) -> None: self.client=create_client_from_model self.loan=create_loan_from_model def test_loan_instance(self): expected_amount=3000 expected_term=1 expected_rate=0.21 expected_date = "2022-06-12" self.assertIsInstance(self.loan,Loan) self.assertIsInstance(self.loan,Client) self.assertEqual(expected_amount,self.loan.amount) self.assertEqual(expected_term,self.loan.term) self.assertEqual(expected_rate,self.loan.rate) def test_loan__str__(self)->None: self.assertEqual(str(self.loan),str(self.loan.id)) def test_installment(self)->None: for loan in Loan: with self.subTest( name=f' rate:{loan.rate},amount:{loan.amount}, term:{loan.term}' ): rate = Decimal(loan.rate) term = Decimal(loan.term) amount = Decimal(loan.amount.replace(",", "")) expected_installment = Decimal(loan.installment.replace(",", "")) actual_loan = create_loan_from_model( self.client, rate=rate, term=term, amount=amount ) self.assertEqual(expected_installment,actual_loan) def test_interest_rate(self): with self.assertRaises(ValueError): Loan.interest_rate(self.loan.client,0.21) class TestClientModel(TestCase): def setUp(self)->None: self.client=create_client_from_model def test_client_instance(self) ->None: expected_name='Nicholas' expected_surnname='Wabera' expected_email='nicholaswabera@gmail.com' expected_telephone="+254712165970" expected_cuid= 'MDQ2134XYWE' self.assertIsInstance(self.client,Client) self.assertIsInstance(self.client.date,datetime) self.assertEqual(expected_name,self.client.name) self.assertEqual(expected_surnname,self.client.surnname) self.assertEqual(expected_email,self.client.email) self.assertEqual(expected_telephone,self.client.telephone) self.assertEqual(expected_cuid,self.client.cuid) def test_client_instance_blank_telephone(self): client=create_client_from_model() self.assertEqual(Client.telephone) def test_client_instance_unique_cuid(self): with self.assertRaises(IntegrityError): def test_client__str__(self): self.assertEqual(str(self.client),str(self.client.id))
import math import numpy import os import config from itertools import combinations #file_path = '%stest_out2.txt' % config.data_directory def get_slim_metadata(file_path): metadata_dict = {} to_int = ['population_size', 'genome_size', 'number_generations', 'tract_length'] with open(file_path) as file: for line in file: line_split = line.strip().split(',') if line_split[0] in to_int: metadata_dict[line_split[0]] = int(line_split[1]) else: metadata_dict[line_split[0]] = float(line_split[1]) return metadata_dict def read_slim_outputFull(file_path): populations = False mutations = False individuals = False genomes = False #population_sizes_dict = {} mutation_dict = {} individual_count_dict = {} genomes_dict = {} with open(file_path) as file: for line in file: line_split = line.strip().split(' ') if 'Populations:' in line_split: populations=True continue if 'Mutations:' in line_split: mutations=True continue if 'Individuals:' in line_split: individuals=True continue if 'Genomes:' in line_split: genomes=True continue if (populations == True) and (mutations == True) and (individuals == False) and (genomes == False): # position, fitness, time_origin, frequency # start with zero, add counts based on individuals and genomes data mutation_dict[line_split[0]] = [int(line_split[3]), float(line_split[4]), int(line_split[7]), 0] if (populations == True) and (mutations == True) and (individuals == True) and (genomes == False): population_id = line_split[2].split(':')[0] if int(line_split[4]) == 0: continue individual_count_dict[line_split[2]] = int(line_split[4]) if (populations == True) and (mutations == True) and (individuals == True) and (genomes == True): # only get genomes that are present in the final sample if line_split[0] in individual_count_dict: genome_mutations = [str(mutation) for mutation in line_split[2:] ] for genome_mutation in genome_mutations: mutation_dict[genome_mutation][-1] += individual_count_dict[line_split[0]] genomes_dict[line_split[0]] = set( genome_mutations ) population_size = sum(individual_count_dict.values()) mutation_frequency_dict = {} individual_frequency_dict = {} #mutation_population_dict = {} for key in mutation_dict: key_copy = mutation_dict[key] key_copy[-1] = key_copy[-1]/population_size # add empty list, fill with population labels later key_copy.append(set()) mutation_frequency_dict[key] = key_copy #mutation_population_dict[key] = set() for key, mutation_ids in individual_count_dict.items(): individual_frequency_dict[key] = individual_count_dict[key]/population_size for genome_id, value in genomes_dict.items(): for site_id in value: mutation_frequency_dict[str(site_id)][-1].add(genome_id) return mutation_frequency_dict, individual_frequency_dict, genomes_dict, population_size #def filter_slim_dicts(mutation_frequency_dict, selection_coefficient=float(0)): def calculate_unbiased_sigmasquared(mutation_frequency_dict, individual_frequency_dict, genomes_dict, population_size, genome_size, synonymous=True, delta_l=1000): # identify pairs of sites within delta_l #sliding window of 0.2 log units? #genome_size = 100000 gene_size = 1000 gene_bins = numpy.logspace(2, numpy.log10(gene_size), num=50, base=10.0) gene_bins = numpy.floor(gene_bins) mid_gene_bins = numpy.logspace(1, 2, num=15, base=10.0, endpoint=False) mid_gene_bins = numpy.floor(mid_gene_bins) early_gene_bins = numpy.asarray([1, 6]) gene_bins = numpy.insert(gene_bins, 0, mid_gene_bins, axis=0) gene_bins = numpy.insert(gene_bins, 0, early_gene_bins, axis=0) gene_bins_dict = {} for gene_bin_idx in range(0, len(gene_bins)-1): gene_bins_dict[gene_bins[gene_bin_idx]] = {} gene_bins_dict[gene_bins[gene_bin_idx]]['unbiased_sigmasquared_numerator'] = [] gene_bins_dict[gene_bins[gene_bin_idx]]['unbiased_sigmasquared_denominator'] = [] for range_i in range(0, genome_size, gene_size): gene_positions = range(range_i, range_i+gene_size) mutations_to_keep = [key for key, value in mutation_frequency_dict.items() if value[0] in gene_positions] # arbitrary minimum number of sites if len(mutations_to_keep) < 3: continue mutation_id_pairs = combinations(mutations_to_keep, 2) for mutation_id_pair in mutation_id_pairs: mutation_individuals_1 = mutation_frequency_dict[mutation_id_pair[0]][-1] mutation_individuals_2 = mutation_frequency_dict[mutation_id_pair[1]][-1] mutation_id_pair_distance = abs(mutation_frequency_dict[mutation_id_pair[0]][0] - mutation_frequency_dict[mutation_id_pair[1]][0]) if mutation_id_pair_distance < early_gene_bins[0]: continue f_12 = len(mutation_individuals_1.intersection(mutation_individuals_2)) /population_size if f_12 ==0: continue f_1 = mutation_frequency_dict[mutation_id_pair[0]][-2] f_2 = mutation_frequency_dict[mutation_id_pair[1]][-2] unbiased_sigmasquared_numerator = (f_12 - (f_1 * f_2)) ** 2 unbiased_sigmasquared_denominator = f_1 * (1-f_1) * f_2 * (1-f_2) # dont look at mutation pairs where both mutations occur at the same site if mutation_id_pair_distance == 0: continue sign_change_locations = (numpy.diff(numpy.sign( gene_bins - mutation_id_pair_distance )) != 0)*1 bin_location = numpy.where(sign_change_locations==1)[0][0] gene_bins_dict[gene_bins[bin_location]]['unbiased_sigmasquared_numerator'].append(unbiased_sigmasquared_numerator) gene_bins_dict[gene_bins[bin_location]]['unbiased_sigmasquared_denominator'].append(unbiased_sigmasquared_denominator) #if mutation_id_pair_distance == 0: # print( mutation_frequency_dict[mutation_id_pair[0]][0] , mutation_frequency_dict[mutation_id_pair[1]][0] ) # print( mutation_id_pair, mutation_id_pair_distance, f_12) #print(mutation_frequency_dict[mutation_pair[0]], mutation_frequency_dict[mutation_pair[1]]) distances = [] unbiased_sigmasquared_sums = [] for key, value in gene_bins_dict.items(): #if key < 13.0: # print(key, value['unbiased_sigmasquared_denominator']) if sum(value['unbiased_sigmasquared_denominator']) == 0: continue unbiased_sigmasquared = sum(value['unbiased_sigmasquared_numerator']) / sum(value['unbiased_sigmasquared_denominator']) distances.append(key) unbiased_sigmasquared_sums.append(unbiased_sigmasquared) distances = numpy.asarray(distances) unbiased_sigmasquared_sums = numpy.asarray(unbiased_sigmasquared_sums) return distances, unbiased_sigmasquared_sums
for i in range(2, 9+1) : for j in range(1, 9+1) : print('{} * {} = {}'.format(i, j, i*j))
# -*- coding: utf-8 -*- import pandas as pd __author__ = "Asim Krticic" class Service(object): """This class """ def __calculate_subscription_period(self, x): """ Accepts joined data frame. Calculate subscription period for every user. Returns updated Pandas data frames with subscription period. :param df: warehouse data frame :type df: data frame :returns: pandas data frames :rtype: data frame """ # user have start and cancel subscription events, so we can use both to calculate period if x['_merge'] == 'both': x['subscription_year'] = int(abs(x['year_cancel']-x['year_start'])) x['subscription_month'] = int( abs(x['month_cancel']-x['month_start'])) x['subscription'] = str( int(x['subscription_year'])*12 + int(x['subscription_month'])) + ' months' # user only have start subscription events, so we use current date for cancel event in order to calculate period elif x['_merge'] == 'left_only': x['subscription_year'] = abs( pd.to_datetime('today').year-x['year_start']) x['subscription_month'] = abs(pd.to_datetime( 'today').month-x['month_start']) x['subscription'] = str( int(x['subscription_year'])*12 + int(x['subscription_month'])) + ' months' return x def customer_subscriptions(self, df,utm_medium=None,utm_campaign=None): """ Accepts warehouse data frame and optional paramaters - utm_medium and utm_campaign. Calculate subscription period for every user. Returns filtred Pandas data frames with subscription period. :param df: warehouse data frame :type df: data frame :returns: pandas data frames :rtype: data frame """ # Filtering conditions cond1 = df["type"] == 'Subscription Started' cond2 = df["type"] == 'Subscription Cancelled' # If additional filtering params are passed if utm_medium is None and utm_campaign is None: cond_started = (cond1) cond_cancelled = (cond2) elif utm_medium is not None and utm_campaign is None: cond3 = df["utm_medium"] == utm_medium cond_started = (cond1 & cond3) cond_cancelled = (cond2 & cond3) elif utm_medium is None and utm_campaign is not None: cond3 = df["utm_campaign"]==utm_campaign cond_started = (cond1 & cond3) cond_cancelled = (cond2 & cond3) elif utm_medium is not None and utm_campaign is not None: cond3 = df["utm_medium"]==utm_medium cond4 = df["utm_campaign"]==utm_campaign cond_started = (cond1 & (cond3 & cond4)) cond_cancelled = (cond2 & (cond3 & cond4)) # Filtered data from warehouse into two dataframes by Subscription Started and Subscription Cancelled event types subscription_started = df[cond_started] subscription_cancelled = df[cond_cancelled] # Outer Join these data frames in order to calculate subcription period subscribed_df = pd.merge(subscription_started, subscription_cancelled, how='outer', on='user_id', suffixes=('_start', '_cancel'), indicator=True) # Calculate subcription period subscribed_df = subscribed_df.apply( lambda row: self.__calculate_subscription_period(row), axis=1) # return empty data frame if no info about subscription,otherwise return data frame with calculated subscription period if len(subscribed_df)>0: return subscribed_df[['user_id', 'subscription']] else: return pd.DataFrame(columns=['user_id','subscription']) #return subscribed_df[['user_id', 'subscription','utm_medium_start']].groupby(['user_id', 'subscription','utm_medium_start']).size().reset_index(name='counts') def non_trial_customer(self, df,utm_medium=None,utm_campaign=None): """ Accepts warehouse data frame and optional paramaters - utm_medium and utm_campaign. Count Users with either Signup Completed or Subscription Started event type. Returns filtred Pandas data frames with subscription period. :param df: warehouse data frame :type df: data frame :returns: pandas data frames :rtype: data frame """ # Filtering conditions cond1 = df["type"] == 'Signup Completed' cond2 = df["type"] == 'Subscription Started' # If additional filtering params are passed if utm_medium is None and utm_campaign is None: cond = (cond1 | cond2) elif utm_medium is not None and utm_campaign is None: cond3 = df["utm_medium"] == utm_medium cond = ((cond1 | cond2) & (cond3)) elif utm_medium is None and utm_campaign is not None: cond3 = df["utm_campaign"]==utm_campaign cond = ((cond1 | cond2) & (cond3)) elif utm_medium is not None and utm_campaign is not None: cond3 = df["utm_medium"]==utm_medium cond4 = df["utm_campaign"]==utm_campaign cond = ((cond1 | cond2) & (cond3 & cond4)) temp = df[cond].groupby( ['user_id']).size().reset_index(name='counts') return len(temp) def from_trial_to_paid(self, df,utm_medium=None,utm_campaign=None): """ Accepts warehouse data frame and optional paramaters - utm_medium and utm_campaign. Count Users with Trial Started and Subscription Started event type. Returns filtered Pandas dataframes grouped by year and month. :param df: warehouse data frame :type df: data frame :returns: pandas data frames :rtype: data frame """ # Filtering conditions cond1 = df["type"] == 'Trial Started' cond2 = df["type"] == 'Subscription Started' # If additional filtering params are passed if utm_medium is None and utm_campaign is None: cond = (cond1 | cond2) elif utm_medium is not None and utm_campaign is None: cond3 = df["utm_medium"] == utm_medium cond = ((cond1 | cond2) & (cond3)) elif utm_medium is None and utm_campaign is not None: cond3 = df["utm_campaign"]==utm_campaign cond = ((cond1 | cond2) & (cond3)) elif utm_medium is not None and utm_campaign is not None: cond3 = df["utm_medium"]==utm_medium cond4 = df["utm_campaign"]==utm_campaign cond = ((cond1 | cond2) & (cond3 & cond4)) # Count users that meet conditions temp = df[cond].groupby( ['user_id']).size().reset_index(name='counts') # Take only users that meet both conditions user_ids = list(temp[temp['counts'] >= 2]['user_id']) return df[(df['user_id'].isin(user_ids)) & (df['type'] == 'Subscription Started')].groupby(['year', 'month']).size().reset_index(name='counts') def customer_churn(self, df,utm_medium=None,utm_campaign=None): """ Accepts warehouse data frame and optional paramaters - utm_medium and utm_campaign. Count Users with Subscription Started and Subscription Cancelled event type. Returns filtered Pandas dataframes grouped by year and month. :param df: warehouse data frame :type df: data frame :returns: pandas data frames :rtype: data frame """ # Filtering conditions cond1 = df["type"] == 'Subscription Started' cond2 = df["type"] == 'Subscription Cancelled' # If additional filtering params are passed if utm_medium is None and utm_campaign is None: cond = (cond1 | cond2) elif utm_medium is not None and utm_campaign is None: cond3 = df["utm_medium"] == utm_medium cond = ((cond1 | cond2) & (cond3)) elif utm_medium is None and utm_campaign is not None: cond3 = df["utm_campaign"]==utm_campaign cond = ((cond1 | cond2) & (cond3)) elif utm_medium is not None and utm_campaign is not None: cond3 = df["utm_medium"]==utm_medium cond4 = df["utm_campaign"]==utm_campaign cond = ((cond1 | cond2) & (cond3 & cond4)) # Count users that meet conditions temp = df[cond].groupby( ['user_id']).size().reset_index(name='counts') # Take only users that meet all conditions user_ids = list(temp[temp['counts'] >= 2]['user_id']) return df[(df['user_id'].isin(user_ids)) & (df['type'] == 'Subscription Cancelled')].groupby(['year', 'month']).size().reset_index(name='counts')
from collections import OrderedDict import torch.distributed as dist import torch import torch.nn as nn def parse_losses(losses): log_vars = OrderedDict() for loss_name, loss_value in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) else: raise TypeError( f'{loss_name} is not a tensor or list of tensors') loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key) log_vars['loss'] = loss for loss_name, loss_value in log_vars.items(): # reduce loss when distributed training if dist.is_available() and dist.is_initialized(): loss_value = loss_value.data.clone() dist.all_reduce(loss_value.div_(dist.get_world_size())) log_vars[loss_name] = loss_value.item() return loss, log_vars def set_requires_grad(models, requires_grad=False): if not isinstance(models, list): models = [models] for model in models: if model is None: continue for param in model.parameters(): param.requires_grad = requires_grad def build_mlp(fcs): # fcs: list/tuple of channels mlp = [] for i, (in_dim, out_dim) in enumerate(zip(fcs[:-1], fcs[1:])): mlp.append(nn.Linear(in_dim, out_dim)) if i == len(fcs) - 2: break mlp.append(nn.ReLU(inplace=True)) mlp = nn.Sequential(*mlp) return mlp if __name__ == '__main__': print(build_mlp((2, 3, 4)))
def encode(json, schema): payload = schema.Main() payload.cjs = json['cjs'] payload.mainFields = json['mainFields'] payload.mode = json['mode'] payload.force = json['force'] payload.cache = json['cache'] payload.sourceMap = json['sourceMap'] return payload def decode(payload): return payload.__dict__
def draw_c(size, row): if row == 0 or row == size -1: print("* " * size, end ="") else: print("* ",end = " " * (size-1)) print(end =" " * size ) def draw_o(size, row): if row == 0 or row == size -1: print("* " * size, end ="") else: print("* "+" "*(size-2),end ="* ") print(end =" " * size) def draw_d(size,row): if row == 0 or row == size -1: print("* " * (size-1), end =" ") else: print("* "+" " * (size-2), end ="* ") print(end =" " * size) def draw_e(size,row): if row == 0 or row == size -1: print("* " * size, end ="") else: if row == (size//2): print("* " * (size-1), end =" ") else: print("* ", end = " " * (size-1)) print(end =" " *size ) n =2 * int(input("Enter size of drawing (<=5) :")) + 1 for i in range(n): draw_c(n,i) draw_o(n,i) draw_d(n,i) draw_e(n,i) print()
# Задание-1: # Напишите функцию, возвращающую ряд Фибоначчи с n-элемента до m-элемента. # Первыми элементами ряда считать цифры 1 1 def fibonacci(n, m): pass #TODO: def fibonacci(n, m): current, next_item = 1, 1 numbers = [] for x in range(1, m + 1): if x >= n: numbers.append(current) current, next_item = next_item, current + next_item return numbers print(fibonacci(10, 20)) # Задача-2: # Напишите функцию, сортирующую принимаемый список по возрастанию. # Для сортировки используйте любой алгоритм (например пузырьковый). # Для решения данной задачи нельзя использовать встроенную функцию и метод sort() def sort_to_max(origin_list): pass sort_to_max([2, 10, -12, 2.5, 20, -11, 4, 4, 0]) #TODO: def sort_to_max(origin_list): n = 1 while n < len (origin_list): for i in range(len(origin_list) - n): if origin_list[i] > origin_list[i + 1]: origin_list[i], origin_list[i + 1] = origin_list[i + 1], origin_list[i] n += 1 return origin_list print(sort_to_max([2, 10, -12, 2.5, 20, -11, 4, 4, 0])) # Задача-3: # Напишите собственную реализацию стандартной функции filter. # Разумеется, внутри нельзя использовать саму функцию filter. #TODO: lost = [99,99, 99,99,4, 2, 10, -12, 101, 2.5, 20, 7, 3, -11,4,4,4,0] def filt(arg, obj): print (obj) print ('='*60) lst = [] for i in obj : if i != arg : lst.append(i) print (lst) filt(4, lost) # Задача-4: # Даны четыре точки А1(х1, у1), А2(x2 ,у2), А3(x3 , у3), А4(х4, у4). # Определить, будут ли они вершинами параллелограмма.def check_parall(x1, y1, x2, y2, x3, y3, x4, y4): #TODO: def check_parallelogramm(x1, y1, x2, y2, x3, y3, x4, y4): side1 = y2 - y1 side2 = x3 - x2 side3 = y3 - y4 side4 = x4 - x1 if side1 == side3 and side2 == side4: print('Это параллелограмм') else: print('Не параллелограмм') pass check_parallelogramm(1, 1, 1, 4, 4, 4, 4, 1)
from controller import Robot, Supervisor, Field, Node from Arm import * from Gripper import * from Base import * import numpy as np import math robot = Supervisor() timestep = int(robot.getBasicTimeStep()) # Initialize the base, arm and gripper of the youbot robot base = Base(robot) arm = Arm(robot) gripper = Gripper(robot) # Enable compass/gps modules compass = robot.getDevice('compass') compass.enable(timestep) gps = robot.getDevice('gps') gps.enable(timestep) # Initialize waypoints and state machine initial state waypoints = [(22.26, 24.61), (22.2, 24.6), (22.03, 26.06), (26.0, 26.4), (28.7, 25.0)]#(25.5, 25.0), current_waypoint = waypoints.pop(0) state = 'lower arm' # Establish the gains for feedback controls x_gain = 1.5 theta_gain = 2.0 # Define function to determine if we are at the end waypoint for feedback control navigation. def is_endpoint(x, y):#(25.5, 25.0) if np.isclose(x, 28.7, 0.01) and np.isclose(y, 25.0, 0.01): return True else: return False # Main entry point while (robot.step(timestep) != -1): # Get robot pose values coord = gps.getValues() bearing = compass.getValues() pose_x = coord[0] pose_y = coord[2] pose_theta = -math.atan2(bearing[0], bearing[2])+math.pi/2#-1.5708) # Initial state: robot moves into position to grab the repair materials if state == 'lower arm': gripper.release() arm.pick_up() if robot.getTime() > 3.0: state = 'grab' # Second state: robot grabs repair materials from the 'shelf' elif state == 'grab': pose_x = gps.getValues()[0] pose_y = gps.getValues()[2] dist_error = math.sqrt(math.pow(pose_x - current_waypoint[0], 2) + math.pow(pose_y - current_waypoint[1], 2)) base.base_forwards(dist_error * x_gain) if dist_error <= 0.01: base.base_stop() gripper.grip() current_waypoint = waypoints.pop(0) state = 'lift' # Third state: robot lifts repair materials off of 'shelf' elif state == 'lift': arm.lift() if robot.getTime() > 43.8: state = 'drive' # Fourth state: robot drives the repair materials to desired location elif state == 'drive': # Compute bearing and distance error bearing_error = pose_theta + math.atan2(current_waypoint[1] - pose_y, current_waypoint[0] - pose_x) dist_error = math.sqrt(math.pow(pose_x - current_waypoint[0], 2) + math.pow(pose_y - current_waypoint[1], 2)) if dist_error <= 0.1 and len(waypoints) != 0: current_waypoint = waypoints.pop(0) # Compute velocity for wheels x_prime = dist_error * x_gain theta_prime = abs(bearing_error) * theta_gain velocity = theta_prime + theta_gain # Feedback control if(bearing_error > 0.01): base.base_turn_left(velocity) elif(bearing_error < -0.01): base.base_turn_right(velocity) elif (not waypoints and dist_error < 0.1 and is_endpoint(pose_x, pose_y)): state = 'get_pos' base.base_stop() else: v = x_prime + x_gain base.base_forwards(v) # Fifth state: robot lines up with damaged section elif state == 'get_pos': goal_theta = 0.0 if (pose_theta - goal_theta) > 0.04: base.base_turn_left(1.0) else: base.base_stop() current_waypoint = (28.78, 25.14) dist_error = math.sqrt(math.pow(pose_x - current_waypoint[0], 2) + math.pow(pose_y - current_waypoint[1], 2)) if dist_error > 0.2: base.base_forwards(0.5) else: base.base_stop() state = 'put_down' # Sixth state: robot positions repair materials relative to the damaged section elif state == 'put_down': starttime = robot.getTime() arm.drop() if starttime > 163.488: state = 'finally' # Final state: Robot releases repair materials elif state == 'finally': gripper.release()
# Generated by Django 2.1.1 on 2018-11-07 13:06 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('companies', '0008_companycategory'), ] operations = [ migrations.AlterModelOptions( name='company', options={'verbose_name': 'Company', 'verbose_name_plural': 'Companies'}, ), migrations.AlterModelOptions( name='companycategory', options={'verbose_name': 'Company Category', 'verbose_name_plural': 'Company Categories'}, ), migrations.AddField( model_name='companycategory', name='company', field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.DO_NOTHING, related_name='company_category', to='companies.Company'), preserve_default=False, ), ]
import os import pty import time import select m1, s1 = pty.openpty() m_name1, s_name1 = os.ttyname(m1), os.ttyname(s1) m2, s2 = pty.openpty() m_name2, s_name2 = os.ttyname(m2), os.ttyname(s2) print('{} -> {}'.format(s_name1, s_name2)) while 1: ready, _, _ = select.select([m1, m2], [], []) for device in ready: if device == m1: data = os.read(m1, 1024) os.write(m2, data) else: data = os.read(m2, 1024) os.write(m1, data) time.sleep(0.1)
import sounddevice as sd import numpy as np class AudioHandler(): def __init__(self, duration=1, sample_rate=48000, device=0): sd.default.samplerate = sample_rate sd.default.device = device sd.default.channels = 2 self.fs = sample_rate self.duration = duration def get_recording(self, channel=0): recording = sd.rec(self.duration*self.fs, blocking=True) array = [np.uint8((sample[channel] + 1) * 128) for sample in recording] return array def get_sample(self, index=0): recording = self.get_recording() return recording[index] if __name__ == "__main__": audio = AudioHandler() print(audio.get_sample())
# Generated by Django 3.2 on 2021-07-19 11:46 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('communities', '0001_initial'), ('quizzes', '0001_initial'), ] operations = [ migrations.CreateModel( name='Tournament', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time_created', models.DateTimeField(auto_now_add=True)), ('time_updated', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=100)), ('is_active', models.BooleanField(default=False)), ('community', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tournaments', to='communities.community')), ], options={ 'db_table': 'tournaments', }, ), migrations.CreateModel( name='Round', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('start_time', models.DateTimeField()), ('finish_time', models.DateTimeField()), ('quiz', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='quizzes.quiz')), ('tournament', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rounds', to='tournaments.tournament')), ], options={ 'db_table': 'rounds', }, ), migrations.AddIndex( model_name='tournament', index=models.Index(fields=['time_created'], name='tournaments_time_cr_ccbf5a_idx'), ), migrations.AddIndex( model_name='round', index=models.Index(fields=['finish_time'], name='rounds_finish__8ea428_idx'), ), ]
#!/usr/bin/env python """ model enumerator for the SDD package """ import sdd def elements_as_list(node): size = sdd.sdd_node_size(node) elements = sdd.sdd_node_elements(node) return [ sdd.sddNodeArray_getitem(elements,i) for i in xrange(2*size) ] def models(node,vtree): """A generator for the models of an SDD.""" if sdd.sdd_vtree_is_leaf(vtree): var = sdd.sdd_vtree_var(vtree) if node is True or sdd.sdd_node_is_true(node): yield {var:0} yield {var:1} elif sdd.sdd_node_is_false(node): yield {} elif sdd.sdd_node_is_literal(node): lit = sdd.sdd_node_literal(node) sign = 0 if lit < 0 else 1 yield {var:sign} else: left_vtree = sdd.sdd_vtree_left(vtree) right_vtree = sdd.sdd_vtree_right(vtree) if node is True or sdd.sdd_node_is_true(node): # sdd is true for left_model in models(True,left_vtree): for right_model in models(True,right_vtree): yield _join_models(left_model,right_model) elif sdd.sdd_node_is_false(node): # sdd is false yield {} elif sdd.sdd_vtree_of(node) == vtree: # enumerate prime/sub pairs #elements = sdd.sdd_node_elements(node) elements = elements_as_list(node) for prime,sub in _pairs(elements): if sdd.sdd_node_is_false(sub): continue for left_model in models(prime,left_vtree): for right_model in models(sub,right_vtree): yield _join_models(left_model,right_model) else: # gap in vtree if sdd.sdd_vtree_is_sub(sdd.sdd_vtree_of(node),left_vtree): for left_model in models(node,left_vtree): for right_model in models(True,right_vtree): yield _join_models(left_model,right_model) else: for left_model in models(True,left_vtree): for right_model in models(node,right_vtree): yield _join_models(left_model,right_model) def _join_models(model1,model2): """Join two models.""" model = model1.copy() model.update(model2) return model def _pairs(my_list): """A generator for (prime,sub) pairs.""" if my_list is None: return it = iter(my_list) for x in it: y = it.next() yield (x,y) def str_model(model,var_count=None): """Convert model to string.""" if var_count is None: var_count = len(model) return " ".join( str(model[var]) for var in xrange(1,var_count+1) ) if __name__ == '__main__': var_count = 10 vtree = sdd.sdd_vtree_new(var_count,"balanced") manager = sdd.sdd_manager_new(vtree) alpha = sdd.sdd_manager_false(manager) for var in xrange(1,var_count+1): lit = sdd.sdd_manager_literal(-var,manager) alpha = sdd.sdd_disjoin(alpha,lit,manager) vt = sdd.sdd_manager_vtree(manager) model_count = 0 for model in models(alpha,vt): model_count += 1 print str_model(model,var_count=var_count) #lib_mc = sdd.sdd_model_count(alpha,manager) print "model count: %d" % model_count sdd.sdd_manager_free(manager) sdd.sdd_vtree_free(vtree)
from django.urls import path from django.conf.urls import url from . import views app_name='core' urlpatterns=[ path('signup1/',views.SignUp1.as_view(),name='signup1'), path('signup2/',views.SignUp2.as_view(),name='signup2'), ]
""" Date: December 15, 2015 Author: Laura Buchanan and Alex Simonoff This module set all the functions and helper functions used by the main program. """ import time import datetime from Visualization import * weekdays = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'] months = ['January','February','March','April','May','June','July','August','September','October','November','December'] hours = ['Midnight - 1AM', '1AM - 2AM', '2AM - 3AM', '3AM - 4AM', '4AM - 5AM', '5AM - 6AM', '6AM - 7AM', '7AM - 8AM', '8AM - 9AM', '9AM - 10AM', '10AM - 11AM', '11AM - 12PM', '12PM - 1PM', '1PM - 2PM', '2PM - 3PM', '3PM - 4PM', '4PM - 5PM', '5PM - 6PM', '6PM - 7PM', '7PM - 8PM', '8PM - 9PM', '9PM - 10PM', '10PM - 11PM', '11PM - Midnight'] short_months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec'] short_days = ['mon','tue','wed','thu','fri','sat','sun'] short_times = range(24) valid_zips = [10001, 10002, 10003, 10004, 10005, 10007, 10009, 10010, 10011, 10012, 10013, 10014, 10016, 10017, 10018, 10019, 10021, 10022, 10023, 10024, 10028, 10036, 10038, 10069, 10103, 10119, 10128, 10167, 10168, 10199, 10278, 10280, 10282, 11101, 11201, 11205, 11206, 11211, 11213, 11216, 11217, 11221, 11222, 11233, 11238] sleep_time = .2 # This function says goodbye and ends the program def goodbye(): break_line() print "OK! Have a nice day!\n" # This function asks the user if they would like to perform another search def search_again(): break_line() print "Would you like to perform another search?" status = 0 while status == 0: again_input = raw_input("Please type 'yes' or 'no' and hit 'Enter'\n") again_input = clean_input(again_input) if again_input == 'yes' or again_input == 'y': end_program = 0 break_line() print "OK!" status = 1 elif again_input == 'no' or again_input == 'n' or again_input == 'exit' or again_input == 'quit': end_program = 1 status = 1 else: break_line() print "Sorry! I didn't understand that." status = 0 return end_program ## This function asks the user if they would like to save the graphs def save_check(): break_line() print "Would you like to save the results of the bike availability search?" print "Please type 'yes' or 'no' and hit 'Enter'" status = 0 while status == 0: saveVal = raw_input("Save:") saveVal = clean_input(saveVal) if saveVal == 'yes' or saveVal == 'y': saveVal = 'Y' return saveVal elif saveVal == 'no' or saveVal == 'n': saveVal = 'N' return saveVal else: print "\nOops! Didn't understand that." print "Would you like to save the results?." break_line() status = 0 # This function does the bike availibility look up and # prints the vizulization to the screen def bike_lookup(weekday,month,time,zip_code): saveVar = save_check() break_line() print "Awesome!" print "Please wait while the graphs load..." #print "\nHere are the probable number of bikes at the stations in zip code " + str(zip_code) + ":\n" visual = visualization(zip_code, month, weekday, time) visual.barPlot(saveVar) print "Plotting, please wait ..." plt.clf() visual.basemapPlot(saveVar) plt.show() # This function determines zip_code to look over def where_check(): print "Now, what is your 5-digit zip code?" status = 0 while status == 0: try: zip_code = input("Zip Code:\n") if len(str(zip_code))==5: if zip_code in valid_zips: status = status_good() return zip_code else: status = data_error() else: status = len_error() except ValueError: status = int_error() except NameError: status = int_error() except SyntaxError: status = int_error() # Add a visual division on screen def break_line(): print "\n***********************************\n" # Error message when input is an integer as needed def int_error(): break_line() print "Oops! Make sure you format your zip code as an integer." return status_error() # Error message when we don't have data requested def data_error(): break_line() print "I'm sorry! We don't have data for that zip code." print "Try one of these NYC zip codes with Citi Bike data:" print "e.g. 10001, 10014, 10036, 10168, or 11222" return status_error() # Error message when zip code isn't 5 digits def len_error(): break_line() print "\nOops! Make sure your zip code is 5 digits." return status_error() # Sets status value when there is an error def status_error(): status = 0 return status # Sets status value when user input collected correctly def status_good(): status = 1 return status # This function determines which day of the week we should search over def day_check(): break_line() print "OK! What day of the week do you want to ride a Citi Bike?" print "Please type the first 3 letter of the day you're interested in." status = 0 while status == 0: day = raw_input("Day of the Week:\n") day = clean_input(day) day = day[0:3] if day in short_days: status = 1 day = short_days.index(day) return day else: break_line() print "\nOops! Didn't understand that." print "Please type just the first 3 letters of the weekday you're interested in." break_line() status = 0 # This function determines which month we should search over def month_check(): break_line() print "OK! What month do you want to ride a Citi Bike?" print "Please type the first 3 letter of the month you're interested in." status = 0 while status == 0: month = raw_input("Month:\n") month = clean_input(month) month = month[0:3] if month in short_months: status = 1 month = short_months.index(month) return month else: break_line() print "\nOops! Didn't understand that." print "Please type just the first 3 letters of the month you're interested in." break_line() status = 0 # This function determines which hour we should search over def time_check(): break_line() print "OK! What time do you want to ride a Citi Bike?" print "We will search over a given hour, in military time." print "e.g. Type '0' for Midnight - 1AM, '10' for 10AM - 11AM, or '14' for 2PM - 3PM" status = 0 while status == 0: time = raw_input("Time:\n") time = time.strip() try: time = int(time) if time in short_times: status = 1 time = short_times.index(time) return time else: break_line() print "\nOops! Didn't understand that." print "Please type the hour you're interested in, in military time." break_line() status = 0 except: print "\nOops! Didn't understand that." print "Please type the hour you're interested in, in military time." break_line() status = 0 # This function determines whether we should look up bike availability # now or at some other time def when_check(): when_status = 0 while when_status == 0: print "Are you checking how many bikes might be availible right now?" now_input = raw_input("Please type 'yes' or 'no' and hit 'Enter' \n") now_input = clean_input(now_input) if now_input == 'yes' or now_input == 'y': time_info = datetime.datetime.now() weekday = weekdays[datetime.datetime.today().weekday()] month = time_info.strftime("%B") time = hours[time_info.hour] break_line() print ("\nGreat! We will look up probable bike availibility near you on " + weekdays[datetime.datetime.today().weekday()] + "s, in " + time_info.strftime("%B") + ", between " + hours[time_info.hour] + ".\n") break_line() when_status = 1 month = months.index(month)+1 time = hours.index(time) return (weekday,month,time) elif now_input == 'no' or now_input == 'n': weekday = day_check() month = month_check()+1 time = time_check() break_line() print ("\nGreat! We will look up probable bike availibility near you on " + weekdays[weekday] + "s, in " + months[month] + ", between " + hours[time] + ".\n") break_line() when_status = 1 weekday = weekdays[weekday] return (weekday,month,time) else: print "Hmm, didn't quite understand that..." when_status = 0 # This function removes whitespace and lowercasese input from user def clean_input(user_input): user_input = user_input.strip() user_input = user_input.lower() return user_input # This function welcomes the user to the program, asks if they would like # to read and introduction, and prints an introduction if they type 'yes' def intro_sequence(): break_line() break_line() print "Welcome to Citi Bike Station Check!" break_line() break_line() time.sleep(sleep_time) print "Is this your first time using Citi Bike Station Check?\n" print "Please type 'yes' if you would like to read an introduction" print "or 'no' to proceed to the program.\n" intro = 1 while intro == 1: intro_input = raw_input("Please type 'yes' or 'no' and hit 'Enter' \n") intro_input = clean_input(intro_input) break_line() if intro_input == 'yes' or intro_input == 'y': time.sleep(sleep_time) print "Welcome!\n" print "This program let's you check how many Citi Bikes are likely" print "to be available at your local Citi Bike stations...\n" time.sleep(sleep_time) print "This way, if there are probably no bikes availible, you don't" print "even have to leave your apartment before you know whether you" print "can bike! ...\n" time.sleep(sleep_time) print "This program is especially useful if you plan to bike with a" print "group of people, you will have an idea if enough bikes will" print "be available! ...\n" time.sleep(sleep_time) print "The output of this program are the locations of your nearby" print "bike stations and how many bikes we expect to be available" print "based on past data. Past weather data has been incorporated" print "into our prediction...\n" time.sleep(sleep_time) print "We hope you find this product useful!" break_line() time.sleep(sleep_time) intro = 0 elif intro_input == 'no' or intro_input == 'n': intro = 0 else: print "Hmm, didn't quite understand that..." print "Is this your first time using Citi Bike Station Check?" print "Enter 'yes' if you would like to read an introduction." intro = 1 print "OK! Let's begin!\n"
from django.shortcuts import render,redirect,render_to_response from .models import Petition_info,User import os # Create your views here. def create(request): return render(request,'creator.html',{'UserEmail':'Anonymous'}) def handle_uploaded_file(f,n): destination = open(os.path.dirname(os.path.dirname(__file__))+'/static/images/uploads/P'+str(n)+'.jpg','wb+') for chunk in f.chunks(): destination.write(chunk) return def first_page(request): arry1 = [100,500,20] x=[] ximg=[] petition = Petition_info.objects.all() print petition for i in petition: x.append(str(i.title)) return render_to_response('first_page.html',{'array1': x,'UserEmail':'Anonymous'}) def save_create(request): print "hEAA" if request.method == 'POST': s1=Petition_info() s1.title=request.POST['title'] s1.to_whom=request.POST['to_whom'] s1.what_is=request.POST['what_is'] s1.desc=request.POST['desc'] s1.signers=0 f = open('Petition_num.txt','r') n = f.readline().strip('\n') f.close() f = open('Petition_num.txt','w') lol = int(n) + 1 f.write(str(lol)) f.close() s1.num=n s1.users="" print request.FILES if('img' in request.FILES): print(request.FILES['img'].size) img_size=request.FILES['img'].size/1024 if(img_size<=1024): # for 50 kb s1.img= request.FILES['img'] handle_uploaded_file(s1.img,n) #S1.imgd s1.save() print s1 print s1 return redirect('/petition/'+str()) return redirect('/') def open_petition(request,num1): print "the num is " + num1 petition = Petition_info.objects.get(num=num1) return render(request,'open_petition.html',{'num':num1,'signers':petition.signers,'title':petition.title,'to_whom':petition.to_whom,'what_is':petition.what_is,'desc':petition.desc,'img_src':'/static/images/uploads/P'+num1+'.jpg','UserEmail':'Anonymous'}) def sign_petition(request,num1): print 'hi' if request.method=='POST': petition = Petition_info.objects.get(num=num1) petition.signers=petition.signers+1 print request.POST['userEmail'] user=User.objects.get(email=request.POST['userEmail']) x=petition.users print x y=[int(i) for i in x[1:].split(',')] if(user.usernum in y): return render(request,'open_petition.html',{'UserEmail':request.POST['userEmail'],'printer':"signeduser();",'signers':petition.signers-1,'num':num1,'title':petition.title,'to_whom':petition.to_whom,'what_is':petition.what_is,'desc':petition.desc,'img_src':'/static/images/uploads/P'+num1+'.jpg'}) petition.users=petition.users+','+str(user.usernum) petition.save() print 'hello' return redirect('/') def create_user(request): print "hi" if request.method=='GET': return redirect('/') if request.method=='POST': print "hello" num1=request.POST['num2'] petition = Petition_info.objects.get(num=num1) sign_num=petition.signers email1=request.POST['user_email'] password1=request.POST['user_pass'] f = open('User_num.txt','r') n = f.readline().strip('\n') f.close() f = open('User_num.txt','w') lol = int(n) + 1 f.write(str(lol)) f.close() num_results = (User.objects.filter(email = email1)).count() print num_results if(num_results>0): return render(request,'open_petition.html',{'UserEmail':'Anonymous','printer':"alertgive();",'signers':sign_num,'num':num1,'title':petition.title,'to_whom':petition.to_whom,'what_is':petition.what_is,'desc':petition.desc,'img_src':'/static/images/uploads/P'+num1+'.jpg'}) s1=User(usernum=n,email=email1,password=password1) s1.save() return render(request,'open_petition.html',{'UserEmail':email1,'num':num1,'title':petition.title,'to_whom':petition.to_whom,'what_is':petition.what_is,'desc':petition.desc,'img_src':'/static/images/uploads/P'+num1+'.jpg','signers':sign_num}) return redirect('/') def check(request): if request.method=='POST': num1=request.POST['num1'] petition = Petition_info.objects.get(num=num1) email1=request.POST['email'] password1=request.POST['pass'] sign_num=petition.signers user = User.objects.filter(email = email1) print user if(user.count()>0 and user[0].password==password1): return render(request,'open_petition.html',{'UserEmail':email1,'signers':sign_num,'num':num1,'title':petition.title,'to_whom':petition.to_whom,'what_is':petition.what_is,'desc':petition.desc,'img_src':'/static/images/uploads/P'+num1+'.jpg'}) else: return render(request,'open_petition.html',{'UserEmail':'Anonymous','printer':"invalidpass();",'signers':sign_num,'num':num1,'title':petition.title,'to_whom':petition.to_whom,'what_is':petition.what_is,'desc':petition.desc,'img_src':'/static/images/uploads/P'+num1+'.jpg'}) return render(request,'open_petition.html',{'UserEmail':'Anonymous','num':num1,'signers':sign_num,'title':petition.title,'to_whom':petition.to_whom,'what_is':petition.what_is,'desc':petition.desc,'img_src':'/static/images/uploads/P'+num1+'.jpg'})
''' indexer for suche Search Engine copyright (c) 2014 Suche ''' from indexer.models import SucheURL,Link,Word,Result from indexer.htmlparser import HTMLParser from crawler.models import * class Indexer: def set_raw(self,raw): ''' sets the raw data row to which the indexer is to operate. ''' self.raw = raw def operate(self): ''' operate on the data. This will extract links, etc First of all, we have to revert back the changes of previous website data for eg reduce the number of links, reduce word counts, etc. Then, we have to apply the changes due to new data. Finally, move the new data to old data and set oeprated to true ''' parser = HTMLParser(self.raw.new_data, self.raw.url) parser.parse() #TODO: parser will detect error in the HTML document. If the HTML # is ill formed or less than certain length, ignore it. #now extract information from the parse and update database urls = [] # we filter out the valid URLs from the list generated by the html parser # for example, we want to save only the URLs from certain domains # for testing purpose filteredurls = [] for url,text in parser.get_links(): if len(url) < 200: if SucheURL.isvalid(url): filteredurls.append((url,text)) for url,text in filteredurls: urls.append(text+"-"+url) newurl,created = SucheURL.objects.get_or_create(url = url) if created: #create the crawl record for the newly generated url newdata = CrawlData(url = newurl) newdata.save() #now go on to create the links thisurl = SucheURL.objects.get(url = self.raw.url) # actually, we retrieve the old links, and compare them with the new links # to leave the older links and delete or add new links only as needed # but for now, we empty the table and add the links again Link.objects.filter(fromurl = thisurl).delete() for url,text in filteredurls: desturl = SucheURL.objects.get(url = url) if desturl != thisurl: link = Link(fromurl = thisurl, tourl = desturl, text = text) link.save() #if any new words are found in the document, add them to the words table for word in parser.get_word_dict().keys(): wordrow, created = Word.objects.get_or_create(word = word) # now get the rank of the current URL for the words and place it in index if it has higher rank than # the current URLs or if the current URL list is incomplete for word in parser.get_word_dict().keys(): wordobj = Word.objects.get(word = word) wordobj.wordcount += 1 #delete any previous result Result.objects.filter(word = wordobj, url = thisurl).delete() #create a result for that word result = Result(word = wordobj, url = thisurl) # get the word rank for this page result.wordrank = parser.word_rank(word) # count the number of times this word appear in the title of the website result.titlewordcount = parser.title_word_count(word) #count the number of times this word has been linked with text containing this word result.linkswordcount = Link.objects.filter(tourl = thisurl, text__icontains = word).count() # we ignore the user rank for now #calculate the url point as # urlpoint = wordrank + pagerank of website + max(6 , No of words in title) + max(6, 2 * no of words in URL) + max( 20, linkswordcount / 10) result.urlpoint = result.wordrank + min(6, result.titlewordcount) + min( 6, 2 * thisurl.url.count(word)) + min( 20, result.linkswordcount / 10) # if the url doesnot contain any title, reduce the URL point as it can be ajax script or comment section if len(parser.get_title()) < 5: result.urlpoint -= 6 #if there are not enough results for the word or if the current # URL is better than the previous URLs, save the result if Result.objects.filter(word = wordobj).count() < 100: # if less result, simple add this result result.save() else: # if the current URL is better than the worst result, delete the worst result and add this result worstresult = Result.objects.filter(word = wordobj).order_by('urlpoint')[0] if result.urlpoint > worstresult.urlpoint: worstresult.delete() result.save() # set the data as operated try: self.raw.operated = True self.raw.old_data = self.raw.new_data self.raw.new_data = '' self.raw.save() except: pass #do nothing in case of error self.raw.url.title = parser.get_title() self.raw.url.body = parser.get_content() self.raw.url.save() return parser.get_info()
import numpy as np def equation3(theta): """ Equation 2.3 from the book. Input ---------- theta: Angle that I want to evaluate equation 2.3 at. Input should be in degrees Output ---------- Value of the function at theta, f(theta) """ th_rad = np.radians(theta) # Convert the input into radian return 250*np.cos(th_rad)* (np.sin(th_rad) + np.sqrt( np.sin(th_rad)**2 + 0.08 ) ) - 200
# Dependencies import os import sys import numpy as np import tensorflow as tf import time import cv2 """ tf.data.Dataset-- The dataset consists of elements of the same structure. Each element has one or more tf.Tensor objects, which are called components. Each component has tf.DType (type of elements of tensor) and tf.TensorShape (fully/partially defined shape of element) tf.data.Dataset.output_types and tf.data.Dataset.output_shapes allow to inspect these properties, as inferred from the input data Datasets can be nested and the constituents of the dataset can be named """ print('\n\n####################################################################################################') print('DATASETS') print('####################################################################################################') # Simple datasets-- dataset_d_1 = tf.data.Dataset.from_tensor_slices(np.random.random([4, 10])) # Batch of 4 entries print('[INFO] The dataset d_1 has elements with types : ' + str(dataset_d_1.output_types)) print('[INFO] The dataset d_1 has elements with shapes : ' + str(dataset_d_1.output_shapes)) # [INFO] The dataset d_1 has elements with types : <dtype: 'float64'> # [INFO] The dataset d_1 has elements with shapes : (10,) dataset_d_2 = tf.data.Dataset.from_tensor_slices(np.array([1, 2, 3, 4])) print('[INFO] The dataset d_2 has elements with types : ' + str(dataset_d_2.output_types)) print('[INFO] The dataset d_2 has elements with shapes : ' + str(dataset_d_2.output_shapes)) # [INFO] The dataset d_2 has elements with types : <dtype: 'int64'> # [INFO] The dataset d_2 has elements with shapes : () dataset_d_3 = tf.data.Dataset.from_tensor_slices(tf.truncated_normal([10, 15])) # 10 sized batch with 15 dim features! print('[INFO] The dataset d_3 has elements with types : ' + str(dataset_d_3.output_types)) print('[INFO] The dataset d_3 has elements with shapes : ' + str(dataset_d_3.output_shapes)) # [INFO] The dataset d_3 has elements with types : <dtype: 'float32'> # [INFO] The dataset d_3 has elements with shapes : (15,) # Simulating real-life datasets : 100 entries with features of size 20 and 100 labels. Include the constituents as a tuple dataset_d_4 = tf.data.Dataset.from_tensor_slices((tf.truncated_normal([100, 20], mean = 0.0, stddev = 0.1), tf.constant(np.array([int(x) for x in range(100)]).astype(np.float32)))) print('[INFO] The dataset d_4 has elements with types : ' + str(dataset_d_4.output_types)) print('[INFO] The dataset d_4 has elements with shapes : ' + str(dataset_d_4.output_shapes)) # [INFO] The dataset d_4 has elements with types : (tf.float32, tf.float32) # [INFO] The dataset d_4 has elements with shapes : (TensorShape([Dimension(20)]), TensorShape([])) # Nested datasets dataset_d_5 = tf.data.Dataset.zip((dataset_d_1, dataset_d_3)) print('[INFO] The dataset d_5 has elements with types : ' + str(dataset_d_5.output_types)) print('[INFO] The dataset d_5 has elements with shapes : ' + str(dataset_d_5.output_shapes)) # Naming the constituents of the dataset. In this case, we need to pass the named constituents as key-valued pairs dataset_d_6 = tf.data.Dataset.from_tensor_slices({ 'feats_1' : tf.truncated_normal([100, 20], mean = 0.0, stddev = 0.1) , 'labels_1' : tf.constant(np.array([int(x) for x in range(100)]).astype(np.float32)) }) print('[INFO] The dataset d_6 has elements with types : ' + str(dataset_d_6.output_types)) print('[INFO] The dataset d_6 has elements with shapes : ' + str(dataset_d_6.output_shapes)) # [INFO] The dataset d_6 has elements with types : {'feats_1': tf.float32, 'labels_1': tf.float32} # [INFO] The dataset d_6 has elements with shapes : {'feats_1': TensorShape([Dimension(20)]), 'labels_1': TensorShape([])} dataset_d_7 = tf.data.Dataset.from_tensor_slices({ 'feats_1' : tf.truncated_normal([1000, 20], mean = 0.0, stddev = 0.1) , 'labels_1' : tf.constant(np.array([int(x) for x in range(1000)]).astype(np.float32)) }) print('[INFO] The dataset d_7 has elements with types : ' + str(dataset_d_7.output_types)) print('[INFO] The dataset d_7 has elements with shapes : ' + str(dataset_d_7.output_shapes)) # [INFO] The dataset d_7 has elements with types : {'feats_1': tf.float32, 'labels_1': tf.float32} # [INFO] The dataset d_7 has elements with shapes : {'feats_1': TensorShape([Dimension(20)]), 'labels_1': TensorShape([])} dataset_d_8 = tf.data.Dataset.zip({ 'dataset_constituent_1' : dataset_d_6 , 'dataset_constituent_7' : dataset_d_7 }) print('[INFO] The dataset d_8 has elements with types : ' + str(dataset_d_8.output_types)) print('[INFO] The dataset d_8 has elements with shapes : ' + str(dataset_d_8.output_shapes)) # [INFO] The dataset d_8 has elements with types : {'dataset_constituent_1': {'feats_1': tf.float32, 'labels_1': tf.float32}, 'dataset_constituent_7': {'feats_1': tf.float32, 'labels_1': tf.float32}} # [INFO] The dataset d_8 has elements with shapes : {'dataset_constituent_1': {'feats_1': TensorShape([Dimension(20)]), 'labels_1': TensorShape([])}, 'dataset_constituent_7': {'feats_1': TensorShape([Dimension(20)]), 'labels_1': TensorShape([])}} """ tf.data.Dataset.map-- Datasets of any shape can be transformed using the "map" transformations These maps input functions that take as input a function-- Either a well-defined stand-alone function or a lambda function """ print('\n\n####################################################################################################') print('DATASETS AND MAPS') print('####################################################################################################') # Define complex datasets dataset_d_1 = tf.data.Dataset.from_tensor_slices({ 'feats_1' : tf.random_uniform([2, 10]) }) # Define maps map_d_1 = lambda x: x['feats_1']*2 # The map multiplies input's feats_1 by 2 dataset_d_1.map(map_d_1) # As each element of d_1 is a single tf.Tensor, our map must have 1 input # Define unnamed datasets dataset_d_1 = tf.data.Dataset.from_tensor_slices(tf.random_uniform([2, 10])) dataset_d_2 = tf.data.Dataset.from_tensor_slices( ( tf.truncated_normal([2, 15]) , tf.constant(np.random.randint(0, 2, [2, 3]).astype(np.float32)) ) ) dataset_d_3 = tf.data.Dataset.zip((dataset_d_1 , dataset_d_2)) dataset_d_1.map(lambda x : x*2) # Map a function that inputs each entry and multiplies it by 2 dataset_d_2.map(lambda x, y : (x*2, y*3)) # A lambda function can return multiple entries as a tuple! # dataset_d_3.map(lambda x, (y, z) : (x*2, (y*3, z*4))) # THIS IS NOT SUPPORTED! # dataset_d_3.flat_map(lambda x, y : (x*2, y)) # THIS FAILS TOO! Take home : Avoid complicated dataset structures dataset_d_3.map(lambda x, y : (x*2, y)) # This is fine!! """ Iterators over the dataset-- Datsets can be accessed via iterators over the dataset Iterators are of different types-- one-shot, initializable, reinitializable and feedable """ """ One-shot iterators-- These iterators are the simplest type of iterators. They iterate over the entire dataset once and only once. They can handle input pipelines that are based on queue, except for parametrization. Thus, to range over all the dataset exactly once, we can use these iterators """ print('\n\n####################################################################################################') print('ONE SHOT ITERATORS') print('####################################################################################################') # Create a dataset using range attribute of dataset dataset_d_1 = tf.data.Dataset.range(4) # Create a one-shot iterator itr_one_shot_d_1 = dataset_d_1.make_one_shot_iterator() # Create an op that gives the next element! Note that in the fashion of tf, we can not get the data right-away. We need to create an op, run in via a session to get the data op_next_element = itr_one_shot_d_1.get_next() # Create a session and run the iterator to get all elements sess = tf.Session() for i in range(4) : op_next_element_ = sess.run(op_next_element) print('[INFO] Iteration number : ' + str(i) + ' dataset entry returned by the iterator : ' + str(op_next_element_)) # [INFO] Iteration number : 0 dataset entry returned by the iterator : 0 # [INFO] Iteration number : 1 dataset entry returned by the iterator : 1 # [INFO] Iteration number : 2 dataset entry returned by the iterator : 2 # [INFO] Iteration number : 3 dataset entry returned by the iterator : 3 # We can also try and make it run for more number of times than the number of entries in the dataset! THE ITERATOR GIVES AN ERROR THEN!! # dataset_d_2 = tf.data.Dataset.from_tensor_slices(np.array([int(x*x) for x in range(5)])) # itr_one_shot_d_2 = dataset_d_2.make_one_shot_iterator() # op_next_element_2 = itr_one_shot_d_2.get_next() # sess = tf.Session() # for i in range(6) : # There are only 5 entries in the dataset # op_next_element_ = sess.run(op_next_element_2) # print('[INFO] Iteration number : ' + str(i) + ' dataset entry returned by the iterator : ' + str(op_next_element_)) # # [INFO] Iteration number : 0 dataset entry returned by the iterator : 0 # # [INFO] Iteration number : 1 dataset entry returned by the iterator : 1 # # [INFO] Iteration number : 2 dataset entry returned by the iterator : 4 # # [INFO] Iteration number : 3 dataset entry returned by the iterator : 9 # # [INFO] Iteration number : 4 dataset entry returned by the iterator : 16 # # OutOfRangeError (see above for traceback): End of sequence # # [[Node: IteratorGetNext_1 = IteratorGetNext[output_shapes=[[]], output_types=[DT_INT64], _device="/job:localhost/replica:0/task:0/device:CPU:0"](OneShotIterator_1)]] """ Initializable iterators-- Many times, the size of dataset (in general, the structure of dataset) might depend upon the input fed to a placeholder. Thus, there is a need to have iterators that can incorporate this into their definition. Initializable iterators are good for this case, wherein, the dataset is created based on the values taken by a placeholder. """ print('\n\n####################################################################################################') print('INITIALIZABLE ITERATORS') print('####################################################################################################') # Create a dynamically generated dataset and corresponding initializable iterator pl_size = tf.placeholder(tf.int64, shape = []) dataset_d_1 = tf.data.Dataset.range(pl_size) # # Try to make one-shot iterator on this. THIS GIVES AN ERROR SINCE THE DATASET HAS PLACEHOLDER VALUES INVOLVED # itr_init_d_1 = dataset_d_1.make_one_shot_iterator() # ValueError: Cannot capture a placeholder (name:Placeholder, type:Placeholder) by value. itr_init_d_1 = dataset_d_1.make_initializable_iterator() op_next_element = itr_init_d_1.get_next() sess = tf.Session() # We first need to initialize the dataset with its appropriate value fed in dictioanry op_dataset_initializer = itr_init_d_1.initializer # Not a method called, but an attribute! sess.run(op_dataset_initializer, feed_dict = {pl_size : 5}) # Feed in the size and create the dataset # Now the dataset is initialized, we can have the entries obtained from it! for i in range(5) : op_next_element_ = sess.run(op_next_element) print('[INFO] Iteration number : ' + str(i) + ' dataset entry returned by the iterator : ' + str(op_next_element_)) # We can try an run the iterator for more number of iterations than the number of the entries in the dataset. WE GET AN ERROR for excessing the limits # OutOfRangeError (see above for traceback): End of sequence # [[Node: IteratorGetNext_1 = IteratorGetNext[output_shapes=[[]], output_types=[DT_INT64], _device="/job:localhost/replica:0/task:0/device:CPU:0"](Iterator)]] # However, we can re-create the dataset and get values from the iterator sess.run(op_dataset_initializer, feed_dict = {pl_size : 10}) # Now the dataset is initialized, we can have the entries obtained from it! for i in range(10) : op_next_element_ = sess.run(op_next_element) print('[INFO] Iteration number : ' + str(i) + ' dataset entry returned by the iterator : ' + str(op_next_element_)) # [INFO] Iteration number : 0 dataset entry returned by the iterator : 0 # [INFO] Iteration number : 1 dataset entry returned by the iterator : 1 # [INFO] Iteration number : 2 dataset entry returned by the iterator : 2 # [INFO] Iteration number : 3 dataset entry returned by the iterator : 3 # [INFO] Iteration number : 4 dataset entry returned by the iterator : 4 # [INFO] Iteration number : 5 dataset entry returned by the iterator : 5 # [INFO] Iteration number : 6 dataset entry returned by the iterator : 6 # [INFO] Iteration number : 7 dataset entry returned by the iterator : 7 # [INFO] Iteration number : 8 dataset entry returned by the iterator : 8 # [INFO] Iteration number : 9 dataset entry returned by the iterator : 9 """ Reinitializable iterators-- Many times, there are multiple datasets that have the same structure. In such cases, it is ideal to define the iterator based on the structure of the datasets, rather than the dataset itself! Reinitializable datasets can achieve this. We need to create a bunch of datasets that have the same structure. Then, we need to use any one of the datasets to define a re-initializable iterator with the common structure of all the datasets. This iterator can be used to get the next element from any of the datasets. """ print('\n\n####################################################################################################') print('REINITIALIZABLE ITERATORS') print('####################################################################################################') # Create multiple datasets with the same structure dataset_d_1 = tf.data.Dataset.from_tensor_slices((np.array([int(x) for x in range(10)]), np.array([int(x) for x in range(10)]))) dataset_d_2 = tf.data.Dataset.from_tensor_slices((np.array([int(x) for x in range(5)]), np.array([int(x) for x in range(5)]))) # Create a map that applies to all entries and perturbs the second component of each entry by a random amount # lambda_d_1_1 = lambda x, y : (x, y + np.random.randint(0, 2)) # DOESN'T WORK!! # lambda_d_2_1 = lambda x, y : (x, y + np.random.randint(-1, 2)) # DOESN'T WORK!! # lambda_d_1_1 = lambda x, y : (x, y + tf.random_uniform([], 0, 2, tf.int64)) # lambda_d_2_1 = lambda x, y : (x, y + tf.random_uniform([], -1, 2, tf.int64)) # EVEN THIS WON'T WORK, AS THE DATASET DEFINITION IS NOT ASSIGNED!! lambda_d_1_1 = lambda x, y : (x, y + np.random.randint(0, 2)) # DOESN'T WORK!! lambda_d_2_1 = lambda x, y : (x, y + np.random.randint(-1, 2)) # DOESN'T WORK!! # Map that function on the dataset dataset_d_1 = dataset_d_1.map(lambda_d_1_1) dataset_d_2 = dataset_d_2.map(lambda_d_2_1) # Create an iterator using the structure of any one of the two datasets-- d_1 or d_2. Here, we would use d_2 itr_reinitializable = tf.data.Iterator.from_structure(dataset_d_2.output_types, dataset_d_2.output_shapes) # Get the op for next elements op_next_element = itr_reinitializable.get_next() # Now, we have created the iterator and the datasets and the next element op. However, we need to map the iterator onto each dataset and then initialize it! sess = tf.Session() op_itr_d_1_init = itr_reinitializable.make_initializer(dataset_d_1) op_itr_d_2_init = itr_reinitializable.make_initializer(dataset_d_2) # Initialize the operations print('##################################################') sess.run(op_itr_d_1_init) # Get elements from the datasets using the SAME NEXT ELEMENT OP! for i in range(10) : print('[INFO] Next entry from dataset d_1 : ' + str(sess.run(op_next_element))) # The key to remember is that we first need to initialize the mapped iterator on the appropriate dataset and then we can access its entries. This can be done as many number of times as we wish print('##################################################') sess.run(op_itr_d_2_init) for i in range(5) : print('[INFO] Next entry from dataset d_2 : ' + str(sess.run(op_next_element))) print('##################################################') # Do it once again! sess.run(op_itr_d_1_init) for i in range(10) : print('[INFO] Next entry from dataset d_1 : ' + str(sess.run(op_next_element))) print('##################################################') sess.run(op_itr_d_2_init) for i in range(5) : print('[INFO] Next entry from dataset d_2 : ' + str(sess.run(op_next_element))) """ Feedable iterators-- Many a times, we need to make multiple datasets, each of which can have an iterator of its own. In these cases, it is a good idea to pass the choice of the iterator via feed_dict so that each sess.run can have possibly different iterator This has the same capabilities as that of the reinitializable iterator, but we can switch between the iterators as and when we want. Note that in the previous case, we were forced to initialize the mapped iterator on a dataset and then only we could access the entries """ print('\n\n####################################################################################################') print('FEEDABLE ITERATORS') print('####################################################################################################') # Create datasets, maps and map the maps on the datasets! dataset_chunk_1 = tf.data.Dataset.range(10) dataset_chunk_2 = tf.data.Dataset.range(5) dataset_d_1 = tf.data.Dataset.zip((dataset_chunk_1, dataset_chunk_1)) dataset_d_2 = tf.data.Dataset.zip((dataset_chunk_2, dataset_chunk_2)) lambda_d_1_1 = lambda x, y : (x, y + tf.random_uniform([], 0, 2, tf.int64)) lambda_d_2_1 = lambda x, y : (x, y + tf.random_uniform([], -1, 2, tf.int64)) dataset_d_1 = dataset_d_1.map(lambda_d_1_1) dataset_d_2 = dataset_d_2.map(lambda_d_2_1) # We first need to create a handle, which is a placeholder of type string handle = tf.placeholder(tf.string, shape = []) # Shape is a single entry # Now, we need to create an iterator from the handle. It requires the handle placeholder and the structure of the desired dataset. Feed in the common structure of the datasets itr_from_handle = tf.data.Iterator.from_string_handle(handle, dataset_d_1.output_types, dataset_d_1.output_shapes) # After getting the iterator, we need to create the next element op op_next_element = itr_from_handle.get_next() # Now, we can create as many iterators as we wish on the datasets. For simplicity, let us create a one-shot iterator per dataset itr_d_1 = dataset_d_1.make_one_shot_iterator() itr_d_2 = dataset_d_2.make_one_shot_iterator() # From these iterators, create handles that can be fed to the handle sess = tf.Session() itr_d_1_handle = sess.run(itr_d_1.string_handle()) itr_d_2_handle = sess.run(itr_d_2.string_handle()) # Now, we want to get the next element. We do so using the SAME next element op, but by using the handle to choose which iterator to use! for i in range(15) : if i < 10 : print('[INFO] The next entry is from d_1 : ' + str(sess.run(op_next_element, { handle : itr_d_1_handle }))) else : print('[INFO] The next entry is from d_2 : ' + str(sess.run(op_next_element, { handle : itr_d_2_handle }))) """ Creating infinite dataset-- The repeat is a special map that can be applied to a dataset, so that it becomes infinite! To demonstrate this, we will keep printing the next entry till 10 seconds! """ print('\n\n####################################################################################################') print('ACCESSING DATASETS INFINITE TIMES') print('####################################################################################################') # Create a simple dataset dataset_d_1 = tf.data.Dataset.from_tensor_slices(np.random.random([10, 5])) # 10 entries of size 5 dataset_d_1 = dataset_d_1.repeat(3) # Repeat the whole dataset 5 times itr_d_1 = dataset_d_1.make_one_shot_iterator() op_next_element = itr_d_1.get_next() for i in range(30) : print('[INFO] Next element position : ' + str(i + 1) + '\tNext entry : ' + str(sess.run(op_next_element))) # Repeat a dataset indefinitely. Use repeat without input dataset_d_1 = tf.data.Dataset.from_tensor_slices(np.random.random([10, 5])) # 10 entries of size 5 dataset_d_1 = dataset_d_1.repeat() itr_d_1 = dataset_d_1.make_one_shot_iterator() op_next_element = itr_d_1.get_next() # Keep printing for 10 seconds!! time_start = time.time() is_continue = True i = 0 while is_continue : i += 1 print('[INFO] Next element position : ' + str(i + 1) + '\tNext entry : ' + str(sess.run(op_next_element))) time_now = time.time() time_till_now = time_now - time_start # if time_till_now > 10 : if time_till_now > 1 : is_continue = False print('[INFO] Time till now : ' + str(time_till_now)) """ Using next_element in the code and adhering to range-- Sometimes, it might be required to use the next entry obtained from the iterator inside a code. Note that only after a run of session can the iterator go to the next position. For instance, if we use the op_next_element in several steps in the tf graph, all the locations will have the same value. Further, if the "iterator goes outside the dataset", then tf.errors.OutOfRangeError is raised. This can be captured and handled The next elements are obtained in the same fashion as is the definition in the case of a nested dataset. """ print('\n\n####################################################################################################') print('USING NEXT ELEMENTS') print('####################################################################################################') # Create a dataset and get an iterator ph_size = tf.placeholder(tf.int64, []) # Single scalar placeholder-- [] dataset_d_1 = tf.data.Dataset.range(ph_size) itr_d_1_initializable = dataset_d_1.make_initializable_iterator() op_next_element = itr_d_1_initializable.get_next() data_consumer = tf.add(tf.add(op_next_element, op_next_element*2), tf.multiply(op_next_element, 3)) # entry = (x + 2*x) + 3*x # Initialize the iterator. The only placeholder is ph_size init_itr_and_dataset = itr_d_1_initializable.initializer sess = tf.Session() sess.run(init_itr_and_dataset, feed_dict = { ph_size : 5 }) # Now, we can access the entries and use them in the code as well for i in range(10) : # We access 10 entries. For first 5 times, we will get the entry, for the next 5 times, we will consume the error try : op_next_element_, data_consumer_ = sess.run([op_next_element, data_consumer]) print('[INFO] Next entry : ' + str(op_next_element_) + '\tData Consumer Value : ' + str(data_consumer_)) except tf.errors.OutOfRangeError : print('[ERROR] The dataset is already exhausted.') # # A good practice is to break the loop as soon as we hit an error of OutOfRngeError type # break # Create nested datasets dataset_d_1 = tf.data.Dataset.from_tensor_slices(tf.random_uniform([10, 5])) # 10 entries, each of size 5 dataset_d_2 = tf.data.Dataset.from_tensor_slices((np.array([x for x in range(10)]), tf.random_uniform([10, 5]))) dataset_d_3 = tf.data.Dataset.zip((dataset_d_1, dataset_d_2)) itr_d_3_initializable = dataset_d_3.make_initializable_iterator() op_next_element = itr_d_3_initializable.get_next() op_next_element_1, (op_next_element_2, op_next_element_3) = op_next_element # Extract elements sess = tf.Session() init_itr_d_3_and_d_3 = itr_d_3_initializable.initializer sess.run(init_itr_d_3_and_d_3) for i in range(10) : if i < 5 : # Print first 5 entries as is print('[INFO] Iteration : ' + str(i) + ' Entry : ' + str(sess.run(op_next_element))) # else : # Print the next 5 entries as their components. This is TRICKY!! THIS GIVES ERROR AFTER PRINTING Component 2 of 6th indexed entry. Note that each call of sess.run takes the iterator to the next entry!! # print('[INFO] Iteration : ' + str(i) + ' Component 1 : ' + str(sess.run(op_next_element_1))) # print('[INFO] Iteration : ' + str(i) + ' Component 2 : ' + str(sess.run(op_next_element_2))) # print('[INFO] Iteration : ' + str(i) + ' Component 3 : ' + str(sess.run(op_next_element_3))) # TRICK : Note that anything that requires the op_next to be evaluated will make it increment after the session run is completed!! else : an_entry = sess.run(op_next_element) print('[INFO] Iteration : ' + str(i) + '\n') print('[INFO]\t\tComponent 1 : ' + str(an_entry[0])) print('[INFO]\t\tComponent 2 : ' + str(an_entry[1][0])) print('[INFO]\t\tComponent 3 : ' + str(an_entry[1][1])) """ tf.contrib.data.make_saveable_from_iterator-- This tensorflow object allows the creation of a saver for an iterator's state. This essentially creates a SaveableObject for the entire input pipeline In order to be able to store the state, we need to add the save-able object into the collection of save-able objects, named tf.GraphKeys.SAVEABLE_OBJECTS """ print('\n\n####################################################################################################') print('SAVING ITERATORS') print('####################################################################################################') # # Create a dataset and an iterator on it # path_saver = './' # ph_size = tf.placeholder(tf.int64, []) # dataset_d_1 = tf.data.Dataset.range(ph_size) # itr_d_1_initializable = dataset_d_1.make_initializable_iterator() # NOT AVAILABLE IN MY VERSION OF TF!! # op_next_element = itr_d_1_initializable.get_next() # # Create a saver for the iterator # saver_itr_ = tf.contrib.data.make_saveable_from_iterator(itr_d_1_initializable) # # Add the saver into the collection of save-ables # tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saver_itr_) # # Initialize the saver # init_itr_d_1 = itr_d_1_initializable.initializer # sess = tf.Session() # saver = tf.train.Saver() # # Initialize the dataset # sess.run(init_itr_d_1, feed_dict = { ph_size : 10 }) # # Run for 5 iterations # for i in range(5) : # print('[INFO] Iteration : ' + str(i) + '\tEntry : ' + str(sess.run(op_next_element))) # # Save everything in a saver # saver.save(path_saver) # Feed as argument the path of checkpoint # # In order to restore the session, define from ph_size to init_ditr_d_1, but not including the last! (i.e., till the line tf.add_to_collection ...) # saver = tf.train.Saver() # sess = tf.Session() # saver.restore(sess, path_saver) """ Creating simple dataset from numpy arrays-- If the memory allows, i.e. if we can load the entire dataset into memory at the same time, then we can create a simple dataset from np arrays itself!! For instance, we can create the mnist dataset and load it as a tf dataset """ print('\n\n####################################################################################################') print('DATASETS FROM NUMPY ARRAYS') print('####################################################################################################') # Input the standard MNIST dataset from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # Get all the training, validation and testing data images_train = mnist.train.images labels_train = mnist.train.labels images_validation = mnist.validation.images labels_validation = mnist.validation.labels images_test = mnist.test.images labels_test = mnist.test.labels # Create the tf dataset for training, validation and testing dataset_train = tf.data.Dataset.from_tensor_slices((images_train, labels_train)) dataset_validation = tf.data.Dataset.from_tensor_slices((images_validation, labels_validation)) dataset_test = tf.data.Dataset.from_tensor_slices((images_test, labels_test)) # We create iterators on each of the datasets and check with the regular dataset methods. Also, instead, we can create a common iterator from structure itr_reinitializable = tf.data.Iterator.from_structure(dataset_test.output_types, dataset_test.output_shapes) op_next_element = itr_reinitializable.get_next() init_itr_mapped_on_train = itr_reinitializable.make_initializer(dataset_train) init_itr_mapped_on_validation = itr_reinitializable.make_initializer(dataset_validation) init_itr_mapped_on_test = itr_reinitializable.make_initializer(dataset_test) # We can iterate on all the datasets one-by-one by initializing the initializers for each of the splits separately. However, we want to show that the iterator behaves exactly the same as the dataset sess = tf.Session() sess.run(init_itr_mapped_on_train) for i in range(1) : batch_from_itr = sess.run(op_next_element) batch_from_dataset = (images_train[i], labels_train[i]) # print('[INFO] Entry from the iteartor : \n') # print(str(batch_from_itr)) # print('[INFO] Entry from the dataset : \n') # print(str(batch_from_dataset)) batch_from_itr_0 = batch_from_itr[0] batch_from_itr_1 = batch_from_itr[1] batch_from_dataset_0 = batch_from_dataset[0] batch_from_dataset_1 = batch_from_dataset[1] if np.all(batch_from_itr_0 == batch_from_dataset_0) and np.all(batch_from_itr_1 == batch_from_dataset_1) : print('[INFO] The entries from the training dataset and the iterator match!!') sess.run(init_itr_mapped_on_validation) for i in range(1) : batch_from_itr = sess.run(op_next_element) batch_from_dataset = (images_validation[i], labels_validation[i]) batch_from_itr_0 = batch_from_itr[0] batch_from_itr_1 = batch_from_itr[1] batch_from_dataset_0 = batch_from_dataset[0] batch_from_dataset_1 = batch_from_dataset[1] if np.all(batch_from_itr_0 == batch_from_dataset_0) and np.all(batch_from_itr_1 == batch_from_dataset_1) : print('[INFO] The entries from the validation dataset and the iterator match!!') sess.run(init_itr_mapped_on_test) for i in range(1) : batch_from_itr = sess.run(op_next_element) batch_from_dataset = (images_test[i], labels_test[i]) batch_from_itr_0 = batch_from_itr[0] batch_from_itr_1 = batch_from_itr[1] batch_from_dataset_0 = batch_from_dataset[0] batch_from_dataset_1 = batch_from_dataset[1] if np.all(batch_from_itr_0 == batch_from_dataset_0) and np.all(batch_from_itr_1 == batch_from_dataset_1) : print('[INFO] The entries from the testing dataset and the iterator match!!') # Clear the memory!! del images_train del images_validation del images_test del labels_train del labels_validation del labels_test """ tf.data.TFRecord-- It is a simple record-oriented and binary data format. It is used in many TF applications It is a type of dataset that can be created from any types of files, by storing their contents as int64. float or byte streams! Once created, the dataset can be read from a simple list of filenames. Once the file names are mentioned, then we need to define a parser function that can read the values Then we can have iteartorson the dataset and extract values """ print('\n\n####################################################################################################') print('TFRECORDS') print('####################################################################################################') # If we know the filenames already, create a tfrecord dataset from TF records (we are creating a dataset and NOT A TFRECORD ITSELF!!) path_files = ['TFRecords/train.tfrecords', 'TFRecords/validation.tfrecords', 'TFRecords/test.tfrecords'] dataset_mnist = tf.data.TFRecordDataset(path_files) # We can check the dataset being created! For simplicity, let us check just the first member itr_dataset_mnist = dataset_mnist.make_initializable_iterator() op_next_element = itr_dataset_mnist.get_next() init_itr_dataset_mnist = itr_dataset_mnist.initializer sess = tf.Session() sess.run(init_itr_dataset_mnist) for i in range(1) : print('[INFO] First entry in the TFRecord format of MNIST : \n') x_1 = sess.run(op_next_element) print(str(x_1)) # This prints a huge pile of garbage! (Actually, it is the biary format stored data. We need to parse it appropriately to convert it to usable format) # HOWEVER, IN MANY CASES, WE JUST KNOW THE PATH TO TFRECORDS, AND NOT ALL FILES IN IT!! Thus, we need to have a way to initialize the tfrecord dataset using a tf.string typed placeholder ph_paths_tfrecords = tf.placeholder(tf.string, shape = [None]) # A placeholder to hold a single dimensional list of strings!! paths_tfrecords = [str('TFRecords/' + str(x)) for x in os.listdir('TFRecords')] dataset_from_tfrecords = tf.data.TFRecordDataset(ph_paths_tfrecords) # dataset_from_tfrecords.map(_parser) # THIS IS NOT INCLUDED AS OF NOW. However, there must be a function to parse the binary data into usable entries dataset_from_tfrecords.repeat() # Create batches indefinitely!! dataset_from_tfrecords.batch(32) # Create batch-size of 32 # Now, we can define an iterator on this dataset itr_dataset_from_tfrecords = dataset_from_tfrecords.make_initializable_iterator() op_next_element = itr_dataset_from_tfrecords.get_next() init_itr_dataset_from_tfrecords = itr_dataset_from_tfrecords.initializer # Now, to create the dataset and to initialize the iterator, we need to know the value of the placeholder, which is in paths_tfrecords sess = tf.Session() sess.run(init_itr_dataset_from_tfrecords, feed_dict = { ph_paths_tfrecords : paths_tfrecords }) for i in range(1) : print('[INFO] First entry in the TFRecord format of MNIST : \n') x_2 = sess.run(op_next_element) print(str(x_2)) """ tf.data.TextLineDataset-- It is a simple dataset created from one or more .txt files. It provides a way to extract lines from one or more text files and procudes a string-valued element per line of the files. In a text file, usually not all the lines contain data. Some are comments (lines beginning with special characters some are header or info lines) To address this, we first need to create a dataset from filenames and then map it with .skip and .filter commands in order to pick only the legit lines """ print('\n\n####################################################################################################') print('DATASET FROM .TXT FILES') print('####################################################################################################') # Create a list of filenames path_txt_files = ['Data_File_1.txt', 'Data_File_2.txt'] # We can create a dataset from a single entry of this file dataset_d_1 = tf.data.TextLineDataset('Data_File_1.txt') # Feed in the file name dataset_d_1 = dataset_d_1.skip(1) # Skip the first line as it is the header dataset_d_1 = dataset_d_1.filter(lambda a_line : tf.not_equal(tf.substr(a_line, 0, 1), '#')) # Filter only those lines (by filter, it is meant that these lines must be included) that do not start with '#' # Create an iterator and access all the data itr_d_1_initializable = dataset_d_1.make_initializable_iterator() op_next_element = itr_d_1_initializable.get_next() init_itr_d_1 = itr_d_1_initializable.initializer # Create a session and print all the entries sess = tf.Session() sess.run(init_itr_d_1) # Run an iterator to get all values for i in range(10) : try : print('[INFO] Iteration : ' + str(i) + ' Entry : ' + str(sess.run(op_next_element))) except tf.errors.OutOfRangeError : print('[ERROR] Dataset is finished!!') # # Carefully observe all the output entries!! # [INFO] Iteration : 0 Entry : b'1,1.34352' # [INFO] Iteration : 1 Entry : b'2,4533.3' # [INFO] Iteration : 2 Entry : b'3,4532.0' # [INFO] Iteration : 3 Entry : b'4,0.0' # [INFO] Iteration : 4 Entry : b'5,-0.0' # [INFO] Iteration : 5 Entry : b'6,-324.2' # [INFO] Iteration : 6 Entry : b'8,8' # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! print('##################################################') # We can create the same dataset from multiple files path_txt_files = ['Data_File_1.txt', 'Data_File_2.txt'] dataset_d_1 = tf.data.TextLineDataset(path_txt_files) dataset_d_1 = dataset_d_1.skip(1) dataset_d_1 = dataset_d_1.filter(lambda a_line : tf.not_equal(tf.substr(a_line, 0, 1), '#')) itr_d_1_initializable = dataset_d_1.make_initializable_iterator() op_next_element = itr_d_1_initializable.get_next() init_itr_d_1 = itr_d_1_initializable.initializer sess = tf.Session() sess.run(init_itr_d_1) for i in range(20) : try : print('[INFO] Iteration : ' + str(i) + ' Entry : ' + str(sess.run(op_next_element))) except tf.errors.OutOfRangeError : print('[ERROR] Dataset is finished!!') # # Carefully note the values! b'idx,val' appears in the list as well. THIS IS NOT WHAT IS WANTED! # [INFO] Iteration : 0 Entry : b'1,1.34352' # [INFO] Iteration : 1 Entry : b'2,4533.3' # [INFO] Iteration : 2 Entry : b'3,4532.0' # [INFO] Iteration : 3 Entry : b'4,0.0' # [INFO] Iteration : 4 Entry : b'5,-0.0' # [INFO] Iteration : 5 Entry : b'6,-324.2' # [INFO] Iteration : 6 Entry : b'8,8' # [INFO] Iteration : 7 Entry : b'idx,val' # [INFO] Iteration : 8 Entry : b'1,1.34352' # [INFO] Iteration : 9 Entry : b'4,0.0' # [INFO] Iteration : 10 Entry : b'5,-0.0' # [INFO] Iteration : 11 Entry : b'6,-324.2' # [INFO] Iteration : 12 Entry : b'7,-323' # [INFO] Iteration : 13 Entry : b'8,8' # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! print('##################################################') # We can create the same dataset from multiple files in the following fashion! path_txt_files = ['Data_File_1.txt', 'Data_File_2.txt'] dataset_d_1 = tf.data.Dataset.from_tensor_slices(path_txt_files) dataset_d_1 = dataset_d_1.flat_map(lambda a_file : tf.data.TextLineDataset(a_file).skip(1).filter(lambda a_line : tf.not_equal(tf.substr(a_line, 0, 1), '#'))) itr_d_1_initializable = dataset_d_1.make_initializable_iterator() op_next_element = itr_d_1_initializable.get_next() init_itr_d_1 = itr_d_1_initializable.initializer sess = tf.Session() sess.run(init_itr_d_1) for i in range(20) : try : print('[INFO] Iteration : ' + str(i) + ' Entry : ' + str(sess.run(op_next_element))) except tf.errors.OutOfRangeError : print('[ERROR] Dataset is finished!!') # # THIS GIVES THE CORRECT RESULTS, AS WE NEEDED AND EXPECTED!! # [INFO] Iteration : 0 Entry : b'1,1.34352' # [INFO] Iteration : 1 Entry : b'2,4533.3' # [INFO] Iteration : 2 Entry : b'3,4532.0' # [INFO] Iteration : 3 Entry : b'4,0.0' # [INFO] Iteration : 4 Entry : b'5,-0.0' # [INFO] Iteration : 5 Entry : b'6,-324.2' # [INFO] Iteration : 6 Entry : b'8,8' # [INFO] Iteration : 7 Entry : b'1,1.34352' # [INFO] Iteration : 8 Entry : b'4,0.0' # [INFO] Iteration : 9 Entry : b'5,-0.0' # [INFO] Iteration : 10 Entry : b'6,-324.2' # [INFO] Iteration : 11 Entry : b'7,-323' # [INFO] Iteration : 12 Entry : b'8,8' # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! # [ERROR] Dataset is finished!! """ tf.data.Dataset.map()-- The .map callable is used in order to manipulate each entry in the dataset It inputs a function f that applies on each of the entries of the dataset and returns a dataset. It inputs a tf.Tensor object (single element of dataset) and returns a tf.Tensor object that represents a single element of the dataset The map can involve np operations, usual mathematical operations and tf operations, the last being the recommended We can have simple examples of map, as seen in previous cases, but we will have more common examples in the next sections """ """ tf.train.Example-- The tfrecords is the recommended format of storing datasets for tensorflow applications It stores all the dataset entries in the form of examples. Each example is of the form tf.train.Example If we start a dataset with tfrecords, we need to map a function that can parse single example from the binary tfrecords This is a special example of map, which parses a tf.train.Example and can possibly process it to spit out the data in the desired usable form """ print('\n\n####################################################################################################') print('TF EXAMPLES') print('####################################################################################################') # Define a parser def _ParseSingleRawExampleFromTFRecords(serialized_data) : """ inputs-- serialized_data : Example in binarized tfrecords """ """ outputs-- parsed_feat_1 : The feature 1 as parsed from the serialized data. This is a binary feature in our case. parsed_label_1 : The label 1 as parsed from the serialized data. This is an int in our case """ feature_dict = { 'feat_1' : tf.FixedLenFeature((), tf.string) , 'label_1' : tf.FixedLenFeature((), tf.int64) } parsed_ex = tf.parse_single_example(serialized_data, feature_dict) feat_1 = parsed_ex['feat_1'] label_1 = parsed_ex['label_1'] return feat_1, label_1 # Create a dataset from tfrecords dataset_train = tf.data.TFRecordDataset(['TFRecords/train.tfrecords']) dataset_train = dataset_train.map(_ParseSingleRawExampleFromTFRecords) # Apply function that can parse the binary data feature and the label # Create an iterator and access all the elements itr_d_train_initializable = dataset_train.make_initializable_iterator() op_next_element = itr_d_train_initializable.get_next() init_itr_d_train = itr_d_train_initializable.initializer sess = tf.Session() sess.run(init_itr_d_train) # Print the first entry for i in range(1) : print('[INFO] The first entry in the dataset : ' + str(sess.run(op_next_element))) # This prints the binary feature of the first MNIST image and the label-7 (which is correct!!) # Define a parser that an decode the raw binary image features into an image def _ParseSingleDecodedExampleFromTFRecords(serialized_data) : """ inputs-- serialized_data : Example in binarized tfrecords """ """ outputs-- parsed_feat_1 : The feature 1 as parsed from the serialized data. This is a binary feature in our case. parsed_label_1 : The label 1 as parsed from the serialized data. This is an int in our case """ feature_dict = { 'feat_1' : tf.FixedLenFeature((), tf.string) , 'label_1' : tf.FixedLenFeature((), tf.int64) } parsed_ex = tf.parse_single_example(serialized_data, feature_dict) feat_1 = parsed_ex['feat_1'] feat_1 = tf.decode_raw(feat_1, tf.uint8) # Convert using decode_raw. The first argument is the feature to convert and the second input is the data type to which we need to convert label_1 = parsed_ex['label_1'] return feat_1, label_1 # Create a dataset from tfrecords dataset_train = tf.data.TFRecordDataset(['TFRecords/train.tfrecords']) dataset_train = dataset_train.map(_ParseSingleDecodedExampleFromTFRecords) # Apply function that can parse the binary data feature and the label # Create an iterator and access all the elements itr_d_train_initializable = dataset_train.make_initializable_iterator() op_next_element = itr_d_train_initializable.get_next() init_itr_d_train = itr_d_train_initializable.initializer sess = tf.Session() sess.run(init_itr_d_train) # Print the first entry for i in range(1) : print('[INFO] The first entry in the dataset : ' + str(sess.run(op_next_element))) print('[INFO] The first entry in the dataset : ' + str(sess.run(op_next_element)[0])) print('[INFO] The first entry in the dataset : ' + str(sess.run(op_next_element)[0].shape)) print('[INFO] The first entry in the dataset : ' + str(sess.run(op_next_element)[1])) print('[INFO] The first entry in the dataset : ' + str(sess.run(op_next_element)[1].shape)) # # This prints the decoded feature. However, the size is not correct, as it is recorded as 2352. # [INFO] The first entry in the dataset : (array([0, 0, 0, ..., 0, 0, 0], dtype=uint8), 7) # [INFO] The first entry in the dataset : [0 0 0 ... 0 0 0] # [INFO] The first entry in the dataset : (2352,) # [INFO] The first entry in the dataset : 1 # [INFO] The first entry in the dataset : () # To get the correct decoded output, we need to KNOW the exact encoding of the inputs. THIS SEEMS TO BE A SHORTCOMING OF THE API! Use the following code to correctly use the API # Define a parser that an decode the raw binary image features into an image def _ParseSingleDecodedExampleFromTFRecords(serialized_data) : """ inputs-- serialized_data : Example in binarized tfrecords """ """ outputs-- parsed_feat_1 : The feature 1 as parsed from the serialized data. This is a binary feature in our case. parsed_label_1 : The label 1 as parsed from the serialized data. This is an int in our case """ feature_dict = { 'feat_1' : tf.FixedLenFeature((), tf.string) , 'label_1' : tf.FixedLenFeature((), tf.int64) } parsed_ex = tf.parse_single_example(serialized_data, feature_dict) feat_1 = parsed_ex['feat_1'] feat_1 = tf.decode_raw(feat_1, tf.int32) # Another encoding! FIND THE CORRECT ENCODING BY HIT-AND-TRIAL!!!! label_1 = parsed_ex['label_1'] return feat_1, label_1 dataset_train = tf.data.TFRecordDataset(['TFRecords/train.tfrecords']) dataset_train = dataset_train.map(_ParseSingleDecodedExampleFromTFRecords) itr_d_train_initializable = dataset_train.make_initializable_iterator() op_next_element = itr_d_train_initializable.get_next() init_itr_d_train = itr_d_train_initializable.initializer sess = tf.Session() sess.run(init_itr_d_train) for i in range(1) : print('[INFO] The first entry in the dataset : ' + str(sess.run(op_next_element))) print('[INFO] The first entry in the dataset : ' + str(sess.run(op_next_element)[0])) print('[INFO] The first entry in the dataset : ' + str(sess.run(op_next_element)[0].shape)) print('[INFO] The first entry in the dataset : ' + str(sess.run(op_next_element)[1])) print('[INFO] The first entry in the dataset : ' + str(sess.run(op_next_element)[1].shape)) # Another way to use this parser is to use ANY kind of files and then define the desired functionality. The following example is hypothetical-- file_names = tf.constant(['Images/im_1.jpg', 'Images/im_2.jpg']) labels = tf.constant([32, 1]) dataset_images = tf.data.Dataset.from_tensor_slices((file_names, labels)) def _ParseImagesFromFiles(a_file_name, a_label) : # NOTE THAT AN ENTRY OF THE DATASET IS a_file_name AND a_label, which mimics the entries of the dataset as of now """ inputs-- a_file_name : Example of the name of file a_label : The corresponding label """ """ outputs-- parsed_loaded_image : Loaded and parsed image parsed_label : Parsed label, which will be identical to the input label """ parsed_loaded_image = cv2.imread(a_file_name.decode()) # We need to decode the file name and then read it via cv2 parsed_label = a_label return parsed_loaded_image, parsed_label # Since this parser function uses functions that are not using tf, we need to map it as a tf.py_func # We are mapping an in-place lambda function. It must input a_file_name and a_label. They must be CONVERTED to an image and a label. This output is a tuple of the image and the label. The image and the label will be obtained from a regular python function. The call tf.py_func allows to do that, with first argument as the function name, the second being the input list and the third being the output datatypes dataset_images = dataset_images.map(lambda a_file_name, a_label : tuple(tf.py_func(_ParseImagesFromFiles, [a_file_name, a_label], [tf.uint8, a_label.dtype]))) """ Creating batches of data-- Almost always, we need batches of data from a dataset. This is achieved using the .batch() method for the class of datasets """ print('\n\n####################################################################################################') print('BATCHING THE DATASET') print('####################################################################################################') # dataset_d_1 = tf.data.Dataset.from_tensor_slices(np.array([x for x in range(10)]).astype(np.float32)) # dataset_d_2 = tf.data.Dataset.from_tensor_slices(tf.random_uniform(shape = [10, 5])) # dataset_d_3 = tf.data.Dataset.zip((dataset_d_1, dataset_d_2)) # dataset_d_3_batch = dataset_d_3.batch(4) # itr_d_3_one_shot = dataset_d_3_batch.make_initializable_iterator() # op_next_element = itr_d_3_one_shot.get_next() # sess = tf.Session() # sess.run(itr_d_3_one_shot.initializer) # for i in range(10) : # print('[INFO] Iteration : ' + str(i) + ' Batch of size 4 : ' + str(sess.run(op_next_element))) # # # TRICK! The dataset gets exhausted after 2 iterations and half the next! # # [INFO] Iteration : 0 Batch of size 4 : (array([0., 1., 2., 3.], dtype=float32), array([[0.8732004 , 0.0504725 , 0.93043816, 0.8808429 , 0.3385867 ], # # [0.43248057, 0.11033738, 0.83129835, 0.4386369 , 0.64893126], # # [0.6333921 , 0.8307407 , 0.95417285, 0.14215422, 0.48749363], # # [0.8133067 , 0.68265915, 0.8404523 , 0.32187164, 0.2798376 ]], # # dtype=float32)) # # [INFO] Iteration : 1 Batch of size 4 : (array([4., 5., 6., 7.], dtype=float32), array([[0.5500783 , 0.68858874, 0.28348708, 0.4319272 , 0.50177824], # # [0.73060036, 0.8014771 , 0.286044 , 0.17267597, 0.09057891], # # [0.3873173 , 0.7133751 , 0.64562154, 0.350726 , 0.61373734], # # [0.7431874 , 0.46514666, 0.4127797 , 0.33395493, 0.58312273]], # # dtype=float32)) # # [INFO] Iteration : 2 Batch of size 4 : (array([8., 9.], dtype=float32), array([[0.04777563, 0.04185534, 0.34166193, 0.6186359 , 0.8059368 ], # # [0.39856946, 0.00511253, 0.92232347, 0.4880129 , 0.24416947]], # # OutOfRangeError (see above for traceback): End of sequence print('##################################################') print('THE CORRECT WAY--') print('##################################################') dataset_d_1 = tf.data.Dataset.from_tensor_slices(np.array([x for x in range(10)]).astype(np.float32)) dataset_d_2 = tf.data.Dataset.from_tensor_slices(tf.random_uniform(shape = [10, 5])) dataset_d_3 = tf.data.Dataset.zip((dataset_d_1, dataset_d_2)) dataset_d_3 = dataset_d_3.repeat() dataset_d_3_batch = dataset_d_3.batch(4) itr_d_3_one_shot = dataset_d_3_batch.make_initializable_iterator() op_next_element = itr_d_3_one_shot.get_next() sess = tf.Session() sess.run(itr_d_3_one_shot.initializer) for i in range(10) : print('[INFO] Iteration : ' + str(i) + ' Batch of size 4 : ' + str(sess.run(op_next_element))) print('##################################################') print('THE INCORRECT WAY--\nThe batching of 10 sized dataset into 4 sized batches creates a 2-sized batch, which is reflected in the values generated from the iterator!! Check the output manually!!') print('##################################################') dataset_d_1 = tf.data.Dataset.from_tensor_slices(np.array([x for x in range(10)]).astype(np.float32)) dataset_d_2 = tf.data.Dataset.from_tensor_slices(tf.random_uniform(shape = [10, 5])) dataset_d_3 = tf.data.Dataset.zip((dataset_d_1, dataset_d_2)) dataset_d_3_batch = dataset_d_3.batch(4) dataset_d_3_batch = dataset_d_3_batch.repeat() # Repeat indefinitely!! THIS WAS MISSING EARLIER! itr_d_3_one_shot = dataset_d_3_batch.make_initializable_iterator() op_next_element = itr_d_3_one_shot.get_next() sess = tf.Session() sess.run(itr_d_3_one_shot.initializer) for i in range(10) : print('[INFO] Iteration : ' + str(i) + ' Batch of size 4 : ' + str(sess.run(op_next_element))) """ Creating padded batches of data-- Applications oriented about sequence modelling involve variable sized inputs. This is achieved using the .padded_batch() method of the Dataset class. We need to provide as the first argument the batch size, the second argument as the padded_shapes. Setting it None takes care of the variable size that might exist in the data and pads the content appropriately """ print('\n\n####################################################################################################') print('PADDED BATCHES') print('####################################################################################################') # # Create a 2-D dataset with variable shapes # dataset_d_1 = tf.data.Dataset.range(5) # def _GetChunkOfIdentityMatrix(x) : # """ # inputs-- # x : # Input, entry of the dataset d_1 # """ # """ # outputs-- # chunk (implicit) : # The matrix Identity_5(0:x, 0:x) # """ # # return np.eye(5)[0:int(x + 1), 0:int(x + 1)].astype(np.float32) # return tf.fill([tf.cast(x, tf.int32), tf.cast(x, tf.int32)], x) # dataset_d_1 = dataset_d_1.map(lambda x : tf.py_func(_GetChunkOfIdentityMatrix, [x], [tf.int32])) # dataset_d_1 = dataset_d_1.repeat() # # dataset_d_1 = dataset_d_1.batch(2) # THIS WILL GIVE ERROR! # print('[DEBUG] Output shape of the dataset : ' + str(dataset_d_1.output_shapes)) # # dataset_d_1 = dataset_d_1.padded_batch(2, padded_shapes = (None, None), padding_values = 0) # itr_d_1_initializable = dataset_d_1.make_initializable_iterator() # op_next_element = itr_d_1_initializable.get_next() # init_itr_d_1 = itr_d_1_initializable.initializer # sess = tf.Session() # sess.run(init_itr_d_1) # for i in range(3) : # print('[INFO] Iteration : ' + str(i) + ' Batch with padding : ' + str(sess.run(op_next_element))) # THE WHOLE EXAMPLE IS A PROBLEM. Advice : Create single dimensional lata and work!! # Create a 1-D dataset with variable shapes dataset_d_1 = tf.data.Dataset.range(5) dataset_d_1 = dataset_d_1.map(lambda x : tf.fill([tf.cast(x, tf.int32)], x)) dataset_d_1 = dataset_d_1.repeat() # dataset_d_1 = dataset_d_1.batch(2) # THIS WILL GIVE ERROR! print('[DEBUG] Output shape of the dataset : ' + str(dataset_d_1.output_shapes)) dataset_d_1 = dataset_d_1.padded_batch(2, padded_shapes = [None], padding_values = tf.constant(7, dtype = tf.int64)) itr_d_1_initializable = dataset_d_1.make_initializable_iterator() op_next_element = itr_d_1_initializable.get_next() init_itr_d_1 = itr_d_1_initializable.initializer sess = tf.Session() sess.run(init_itr_d_1) for i in range(3) : print('[INFO] Iteration : ' + str(i) + ' Batch with padding : ' + str(sess.run(op_next_element))) """ tf.data.Dataset.shuffle(buffer_size)-- Another crucial aspect for creating batches from any dataset is the random shuffling of the data. This can be achieved by the .shuffle method of the Dataset class. The method requires the argument buffer_size, which is the number of elements of the dataset that are loaded into the buffer. It also requires optional seed value as an integer to create the pseudo random entries and the optional reshuffle_each_iteration, which as the name suggests, reshuffles the dataset per iteration and is defaulted to True """ print('\n\n####################################################################################################') print('PADDED BATCHES') print('####################################################################################################') # We just want to see that whether this consumes all the elements of the dataset, even if the buffer size is low dataset_d_1 = tf.data.Dataset.range(10) dataset_d_1 = dataset_d_1.shuffle(buffer_size = 2) dataset_d_1 = dataset_d_1.repeat() itr_d_1_initializable = dataset_d_1.make_initializable_iterator() op_next_element = itr_d_1_initializable.get_next() init_itr_d_1 = itr_d_1_initializable.initializer sess = tf.Session() sess.run(init_itr_d_1) for i in range(20) : print('[INFO] Next entry : ' + str(sess.run(op_next_element))) # # THIS DOES CONSUME ALL THE ENTRIES! It only loads a few of them onto the RAM so as to avoid the high memory consumption # [INFO] Next entry : 0 # [INFO] Next entry : 2 # [INFO] Next entry : 1 # [INFO] Next entry : 3 # [INFO] Next entry : 4 # [INFO] Next entry : 6 # [INFO] Next entry : 7 # [INFO] Next entry : 8 # [INFO] Next entry : 9 # [INFO] Next entry : 5 # [INFO] Next entry : 1 # [INFO] Next entry : 0 # [INFO] Next entry : 3 # [INFO] Next entry : 2 # [INFO] Next entry : 4 # [INFO] Next entry : 6 # [INFO] Next entry : 5 # [INFO] Next entry : 8 # [INFO] Next entry : 7 # [INFO] Next entry : 9 """ TO CONCLUCE WITH A DUMMY EXAMPLE-- It is recommended to use the Dataset.make_one_shot_iterator() for creating a new iterator over the entire dataset It is also recommended to use the tf.train.MonitoredTrainingSession() so as to avoid getting the OutOfRangeError when dataset exhausts If that error is hit, the session's .should_stop() method returns True. This can be exploited to stop the training """ print('\n\n####################################################################################################') print('FINAL EXAMPLE--') print('####################################################################################################') # Create a simple dataset dataset_1 = tf.data.Dataset.range(1000) dataset_1 = dataset_1.map(lambda x : tf.cast(x, tf.float32)) dataset_2 = dataset_1.map(lambda x : ((tf.cast(x, tf.float32)*2) + tf.random_uniform([]))) # 2*x + e dataset = tf.data.Dataset.zip((dataset_1, dataset_2)) dataset = dataset.shuffle(buffer_size = 10) dataset = dataset.repeat(10) # 10 epochs! dataset = dataset.batch(2) # 128 sized batch itr_dataset = dataset.make_one_shot_iterator() op_next_element = itr_dataset.get_next() # This gives us the next element W = tf.Variable(tf.truncated_normal([])) print(W) x = op_next_element[0] y = op_next_element[1] loss = tf.reduce_mean((y - tf.multiply(x, W))*(y - tf.multiply(x, W))) opt = tf.train.AdamOptimizer(1e-10) op_training_step = opt.minimize(loss) # Create a session sess = tf.train.MonitoredTrainingSession() index = 0 print('[INFO]') while not sess.should_stop() : try : _, loss_, x_, y_ = sess.run([op_training_step, loss, x, y]) sys.stdout.write('\r[INFO] Iteration : ' + str(index) + ' Loss : ' + str(loss_) + ' x : ' + str(x_) + ' y : ' + str(y_)) sys.stdout.flush() index += 1 time.sleep(0.5) except tf.errors.OutOfRangeError : # UNNCESSARY!! print('\n[ERROR] Dataset Exhausted!!') break # Print the value of the variables W_ = sess.run(W) print('[INFO] Learned weight W : ' + str(W_)) # NOTE ONLY THE CODE!!
from rest_framework.generics import ListAPIView, RetrieveAPIView, UpdateAPIView, CreateAPIView, DestroyAPIView from .serializers import OrderItemSerializer from items.models import OrderItem from rest_framework.response import Response from django.conf import settings from rest_framework import viewsets # OrderItem APIView class OrderItemViewSet(viewsets.ModelViewSet): serializer_class = OrderItemSerializer queryset = OrderItem.objects.all() ''' class ListOrderItemView(ListAPIView): serializer_class = OrderItemSerializer queryset = OrderItem.objects.all() class DetailOrderItemView(RetrieveAPIView): serializer_class = OrderItemSerializer queryset = OrderItem.objects.all() class CreateOrderItemView(CreateAPIView): serializer_class = OrderItemSerializer queryset = OrderItem.objects.all() def get_response(self): if getattr(settings, 'REST_USE_JWT', False): data = { 'item': self.item, 'user': self.user, 'size': self.size, } serializer = serializer_class(instance=data, context={'request': self.request}) response = Response(serializer.data, status=status.HTTP_200_OK) if getattr(settings, 'REST_USE_JWT', False): from rest_framework_jwt.settings import api_settings as jwt_settings if jwt_settings.JWT_AUTH_COOKIE: response.set_cookie(jwt_settings.JWT_AUTH_COOKIE, self.item, self.user, httponly=True) return response class UpdateOrderItemView(UpdateAPIView): serializer_class = OrderItemSerializer queryset = OrderItem.objects.all() class DeleteOrderItemView(DestroyAPIView): serializer_class = OrderItemSerializer queryset = OrderItem.objects.all() '''
import unittest from flask import abort, url_for from flask_testing import TestCase from os import getenv from application import app, db from application.models import Tracks, Artists, Genres from application.routes import delete class UnitBase(TestCase): def create_app(self): #pass in test configurations config_name = "testing" app.config.update( SQLALCHEMY_DATABASE_URI='mysql+pymysql://'+str(getenv('MYSQL_USER'))+':' \ +str(getenv('MYSQL_PASSWORD'))+'@'+str(getenv('MYSQL_HOST'))+'/'+str(getenv('MYSQL_DB_TEST'))) return app def setup(self): #creates and drops database # Will be called for every test db.session.commit() db.drop_all() db.create_all() #creates test artists artist1 = Artists(name = "keeno", default_genre = "liquid") artist2 = Artists(name = "dilated peoples", default_genre = "hiphop") genre1 = Genres(name="liquid", folder_path = "/opt/flask-app/music/liquid") genre2 = Genres(name = "hiphop", folder_path = "/opt/flask-app/music/hiphop") track1 = Tracks(title = "You Can't Hide, You Can't Run (prod. by Evidence)", filename = "test.mp3", album = "20/20", artist_id = "2",genre_id = "2") track2 = Tracks(title = "Guesswork", filename = "test2.mp3", album = "All The Shimmering Things", artist_id = "1", genre_id = "1") track3 = Tracks(title = "deletetest", filename = "musicfileexample1.mp3", album = "album_test", artist_id = "1", genre_id = "1") #saves users to database db.session.add(artist1) db.session.add(artist2) db.session.add(genre1) db.session.add(genre2) db.session.add(track1) db.session.add(track2) db.session.add(track3) db.session.commit() def TearDown(self): # drops all created databases print ("TESTING") #db.session.remove() #db.drop_all() class UnitTest(UnitBase): # testing accessability of webpages def test_sort_with_url(self): # is testing page reachable response = self.client.get(url_for('sort')) self.assertEqual(response.status_code, 200) def test_amend_directory_url(self): response = self.client.get(url_for('amend_directory')) self.assertEqual(response.status_code, 200) def test_update_artist_genre_url(self): response = self.client.get(url_for('update_artist_genre')) self.assertEqual(response.status_code, 200) def test_delete_url(self): response = self.client.get(url_for('delete')) self.assertEqual(response.status_code, 200) """ def test_amend_directory_post(self): # create test genre with create functionality genretest = Genres(name="testgenre", folder_path = "/opt/flask-app/music/testgenre") # save post to database db.session.add(genretest) db.session.commit() self.assertEqual(Genres.query.count(), 3) """ """ def test_delete_delete(self): #tests the delete functionality of the app Tracks.query.filter_by(title = "deletetest").delete() self.assertEqual(Tracks.query.count(),2) """
"""Testing changing editor status, but only with 'approve': 'accept'""" import json import requests def test_changing_editor_status(): """ Mustn't have emails 'editor@test.agh.edu.pl' and 'admin_test3@admin.agh.edu.pl' in database or test will fail. :return: None """ url_user = 'http://127.0.0.1:5000/user' url_user_auth = 'http://127.0.0.1:5000/user/auth' url_user_editor = 'http://127.0.0.1:5000/user/editorRequests' data = {'name': 'proper_name', 'email': 'editor@test.agh.edu.pl', 'password': '12345', 'editorRequest': True} assert requests.post(url_user, json=data).status_code == 200 data = {'name': 'proper_name', 'email': 'editor@test.agh.edu.pl'} assert requests.post(url_user_editor, json=data).status_code == 401 data = {'email': 'editor@test.agh.edu.pl', 'password': '12345'} response = requests.post(url_user_auth, json=data) non_admin_login = json.loads(response.content.decode('utf-8')) data['approval'] = 'accept' assert requests.post( url_user_editor, data=data, headers={'token': non_admin_login['token']} ).status_code == 403 data = {'name': 'admin_test', 'email': 'admin_test3@admin.agh.edu.pl', 'password': 'admin_test', 'editorRequest': False} assert requests.post(url_user, json=data).status_code == 200 data = {'email': 'admin_test3@admin.agh.edu.pl', 'password': 'admin_test'} response = requests.post(url_user_auth, json=data) admin_login = json.loads(response.content.decode('utf-8')) data = {'name': 'proper_name', 'email': 'editor@test.agh.edu.pl', 'approval': 'accept'} response = requests.post( url_user_editor, json=data, headers={'token': admin_login['token']} ) assert response.status_code == 200 response = requests.get( url_user_editor, headers={'token': admin_login['token']} ) data = json.loads(response.content.decode('utf-8')) assert len(data) == 0 data = {'name': 'proper_name', 'email': 'invalid@test.agh.edu.pl', 'approval': 'accept'} response = requests.post( url_user_editor, json=data, headers={'token': admin_login['token']} ) assert response.status_code == 409
#!/usr/bin/env python3 import argparse import re from typing import List, Dict, Any from core.kernel import Kernel from core.command import Command from core.task import Task from core.operation import Operation from core.simple_operation import SimpleOperation parser = argparse.ArgumentParser(prog="cb-repair", description='CGC Benchmark plugin for automatic program repair tools.') challenge_parser = argparse.ArgumentParser(add_help=False) challenge_parser.add_argument('-v', '--verbose', help='Verbose output.', action='store_true') challenge_parser.add_argument('-ns', '--no_status', help='No status output.', action='store_true') challenge_parser.add_argument('--excl', help='Flag for not skipping excluded challenges.', action='store_true') challenge_parser.add_argument('-l', '--log_file', type=str, default=None, help='Log file to write the results to.') subparsers = parser.add_subparsers() subparsers.required = True def add_operation(name: str, operation: Operation, description: str): operation_parser = add_simple_operation(name, operation, description) operation_parser.add_argument('-wd', '--working_directory', type=str, help='The working directory.', required=True) operation_parser.add_argument('--script', type=str, help='Writes the command in the specified file.', required=False) operation_parser.add_argument('-pf', '--prefix', type=str, default=None, help='Path prefix for extra compile and test files for the unknown arguments') operation_parser.add_argument('-r', '--regex', type=str, default=None, help='File containing the regular expression to parse unknown arguments into known') return operation_parser def add_simple_operation(name: str, simple_operation: SimpleOperation, description: str): simple_operation_parser = add_command(name, simple_operation, description) simple_operation_parser.add_argument('-cn', '--challenge', type=str, help='The challenge name.', required=True) return simple_operation_parser def add_task(name: str, task: Task, description: str): task_parser = add_command(name, task, description) task_parser.add_argument('--challenges', type=str, nargs='+', required=False, help='The challenges to be checked.') return task_parser def add_command(name: str, command: Command, description: str): kernel_parser = subparsers.add_parser(name=name, help=description, parents=[challenge_parser]) kernel_parser.set_defaults(command=command) kernel_parser.set_defaults(name=name) return kernel_parser def parse_unknown(regex: str, unknown: List[str], **kwargs) -> Dict[str, Any]: if regex and unknown: args_matches = {} with open(regex, "r") as f: exp = f.readline() exp = exp.splitlines()[0] for arg in unknown: match = re.match(exp, arg) if match: for key, value in match.groupdict().items(): if key in args_matches: if isinstance(args_matches[key], list): args_matches[key].append(value) else: args_matches[key] = [args_matches[key], value] else: args_matches[key] = value kwargs.update(args_matches) return kwargs return kwargs def run(kernel: Kernel, command: Command, **kwargs): if "regex" in kwargs: kwargs = parse_unknown(**kwargs) cmd = command(kernel=kernel, **kwargs) cmd() import tasks.catalog import tasks.sanity import tasks.score import tasks.clean import tasks.stats import tasks.init_polls import operations.info import operations.make import operations.compile import operations.simple.checkout import operations.simple.genpolls import operations.simple.manifest import operations.simple.patch import operations.test import operations.test_coverage
# 1000 미만의 자연수 구하기 n = 1 while n< 1000: print(n) n += 1 for n in range(1,1000): print(n) result = 0 for n in range(1,1000): if n %3 ==0 or n % 5 ==0: # print(n) result += n print(result)
# Generated by Django 2.1.3 on 2018-11-16 12:33 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Item', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('purchaser_name', models.CharField(max_length=200)), ('item_description', models.CharField(max_length=400)), ('item_price', models.IntegerField(default=0)), ('purchase_count', models.IntegerField(default=1)), ('merchant_address', models.CharField(max_length=600)), ('merchant_name', models.CharField(max_length=400)), ('pub_date', models.DateTimeField(verbose_name='date published')), ], ), ]
from setuptools import setup,find_packages setup( name = 'speedrecorder', url = 'https://github.com/rweyant/speedtest-cli', description='A function that records your internet speed', version = '0.1', packages = find_packages(), install_requires=[ 'ipgetter', 'requests'] )
def main(): passportDictionary = { "byr": "req", "iyr": "req", "eyr": "req", "hgt": "req", "hcl": "req", "ecl": "req", "pid": "req", "cid": "opt" } validEyeColors = [ "amb", "blu", "brn", "gry", "grn", "hzl", "oth" ] input_data = [] with open("input.txt") as input: for line in input: input_data.append(line.strip()) passports = [] thisPassport = {} for line in input_data: if line != '': fields = line.split() for field in fields: key, value = field.split(':') thisPassport[key] = value else: passports.append(thisPassport) thisPassport = {} valid_passports = [] invalid_passports = [] for passport in passports: passportOK = True for key in passportDictionary: if passportDictionary[key] == "req": if key not in passport: passportOK = False break if passportOK: valid_passports.append(passport) else: invalid_passports.append(passport) print("Number of valid passports (p1): {}".format(len(valid_passports))) valid_passports = [] invalid_passports = [] for passport in passports: passportOK = True for key in passportDictionary: if passportDictionary[key] == "req" and passportOK: if key in passport: valueOK = True value = passport[key] if key == "byr": valueOK = value.isdigit() and 1920 <= int(value) <= 2002 elif key == "iyr": valueOK = value.isdigit() and 2010 <= int(value) <= 2020 elif key == "eyr": valueOK = value.isdigit() and 2020 <= int(value) <= 2030 elif key == "hgt": unit = value[-2:] if unit == "in": valueOK = 59 <= int(value[:-2]) <= 76 elif unit == "cm": valueOK = 150 <= int(value[:-2]) <= 193 else: valueOK = False elif key == "hcl": try: valueOK = value[0] == '#' and len(value[1:]) == 6 and int(value[1:], 16) except ValueError: valueOK = false elif key == "ecl": valueOK = value in validEyeColors elif key == "pid": valueOK = len(value) == 9 and value.isdigit() if not valueOK: passportOK = False break else: passportOK = False break if passportOK: valid_passports.append(passport) else: invalid_passports.append(passport) print("Number of valid passports (p2): {}".format(len(valid_passports))) if __name__ == "__main__": main()
from django.db import models # database for the cities class City(models.Model): city = models.CharField(max_length=100) abb = models.CharField(max_length=5) def __str__(self): return f'{self.city} ({self.abb})' # Database for the hotels, connected with cities class Hotel(models.Model): city = models.ForeignKey(City, on_delete=models.CASCADE) hotel_city_abb = models.CharField(max_length=10, default=0) hotel = models.CharField(max_length=100) def __str__(self): return f'{self.hotel} in {self.city}'
from django.conf import settings from django.contrib.auth.views import PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView, PasswordResetCompleteView from django.conf.urls import url from django.conf.urls.static import static from django.urls import path from . import views urlpatterns = [ url(r'^$', views.index, name="login_register"), url(r'^register$', views.register, name="register"), url(r'^login/$', views.login, name="login"), url(r'^logout/$', views.logout, name="logout"), url(r'^reset_password/$', PasswordResetView.as_view(template_name="accounts/password_reset.html"), name="password_reset"), url(r'^reset_password_sent/$', PasswordResetDoneView.as_view(template_name="accounts/password_reset_sent.html"), name="password_reset_done"), path('reset/<uidb64>/<token>/', PasswordResetConfirmView.as_view(template_name="accounts/password_reset_form.html"), name="password_reset_confirm"), url(r'^reset_password_complete/$', PasswordResetCompleteView.as_view(template_name="accounts/password_reset_done.html"), name="password_reset_complete"), path('area', views.area_handle, name="area"), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-11-19 11:09 from __future__ import unicode_literals import django.contrib.gis.db.models.fields from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('map', '0004_auto_20171113_0124'), ] operations = [ migrations.CreateModel( name='Setor', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('nome', models.CharField(max_length=50, verbose_name='nome')), ('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)), ], ), ]
from odoo import api, fields, models from datetime import datetime from odoo.exceptions import ValidationError class Planning(models.Model): _name = 'mrp.plan' _description = 'MRP Planning' def change_color_on_kanban(self): for record in self: color = 0 if record.state == 'draft': color = 4 elif record.state == 'approve': color = 10 elif record.state == 'cancel': color = 9 record.color = color name = fields.Char('Name',readonly=True) # location_dest_id = fields.Many2one('stock.location','Destination Loaction') product_id = fields.Many2one('product.product','Product',domain="[('bom_ids','!=',False)]") plan_date = fields.Datetime('Plan Date',default=datetime.today()) production_date = fields.Datetime('Production Date') plan_qty = fields.Float('Plan Quantity') uom_id = fields.Many2one('uom.uom','Product Uom') bom_id = fields.Many2one('mrp.bom','Bom',domain="[('product_tmpl_id','=',product_id)]") user_id = fields.Many2one('res.users','Responsible') is_subcontract = fields.Boolean('Subcontract',default=False,compute="onchange_subcontract") state = fields.Selection([('draft','Draft'),('approve','Approve'),('cancel','Cancel')],copy=False,default="draft") production_id = fields.Many2one('mrp.production','Production',readonly=True) color = fields.Integer('Color Index', compute="change_color_on_kanban") progress = fields.Integer('Production Progress',default=10,compute="onchange_widget") max_progress = fields.Integer(default=100) @api.depends('production_id') def onchange_widget(self): for rec in self: if rec.production_id: if rec.production_id.state == 'confirmed': rec.progress = 25 elif rec.production_id.state == 'planned': rec.progress = 50 elif rec.production_id.state == 'progress': rec.progress = 75 elif rec.production_id.state == 'done': rec.progress = 100 elif rec.production_id.state == 'cancel': rec.progress = 0 @api.model def create(self,vals): vals['name'] = self.env['ir.sequence'].next_by_code('mrp.plan') or _('New') return super(Planning,self).create(vals) @api.onchange('product_id') def onchange_bom(self): if self.product_id: print(self.product_id) product_id = self.env['product.template'].search([('name','=',self.product_id.name)],limit=1) print(product_id) bom = self.env['mrp.bom'].search([('product_tmpl_id','=',product_id.id)],limit=1) self.bom_id = bom self.uom_id = self.product_id.uom_id.id self.plan_qty = 1 @api.onchange('production_date') def onchange_date(self): if self.production_date and self.plan_date: if self.production_date < self.plan_date: raise ValidationError('Production Date should be greater than Plan Date...!!') @api.depends('bom_id') def onchange_subcontract(self): for rec in self: if rec.bom_id: rec.is_subcontract = rec.bom_id.is_subcontract # def confirm(self): # if self.state: # self.ensure_one() # template = self.env['ir.model.data'].get_object('mrp_plan', 'email_template_mrp_plan') # self.env['mail.template'].browse(template.id).send_mail(self.id,force_send=True) # self.state = 'confirm' def cancel(self): if self.state: self.state = 'cancel' def approve(self): if self.product_id: picking_type_id = self.env['stock.picking.type'].search([('code','=','mrp_operation'),('warehouse_id.partner_id','=',self.env.user.company_id.id)]) # product_id = self.env['product.product'].search([('name','=','')]) mrp_order = { 'product_id':self.product_id.id, 'planned_date':self.plan_date, 'product_qty':self.plan_qty, 'product_uom_id':self.uom_id.id, 'date_planned_start':self.production_date, 'bom_id':self.bom_id.id, 'user_id':self.user_id.id, # 'location_dest_id':self.location_dest_id.id, 'is_subcontract':self.is_subcontract, 'partner_id':self.bom_id.partner_id.id, 'picking_type_id':picking_type_id.id, 'location_src_id':picking_type_id.default_location_src_id.id, 'planned_id':self.id or '', } mrp = self.env['mrp.production'].create(mrp_order) self.production_id = mrp template = self.env['ir.model.data'].get_object('mrp_plan', 'email_template_mrp_plan') self.env['mail.template'].browse(template.id).send_mail(self.id,force_send=True) self.state = 'approve' def manufacture_order(self): return { 'type': 'ir.actions.act_window', 'res_model': 'mrp.production', 'res_id': self.production_id.id, 'view_type': 'form', 'view_mode': 'form', # 'context': self.env.context, 'target': 'current', } class Production(models.Model): _inherit = 'mrp.production' planned_date = fields.Char('Planned Date') planned_id = fields.Many2one('mrp.plan','Production Plan')
fname = raw_input("Enter file name: ") fh = open(fname) #opened files lst = list() for line in fh: #read it by line line = line.rstrip() #white space is reduced for each line words = line.split() #line is split into a list of words #print "###", words for i in words: if i in lst: continue else: lst.append(i) lst.sort() print lst
from flask import Flask app = Flask(__name__) from app.clientes import main app.register_blueprint(main) app.run(debug=True,port=3000)
import rw import networkx as nx import numpy as np import pickle import sys filename=sys.argv[1] usf_graph, usf_items = rw.read_csv("./snet/USF_animal_subset.snet") usf_graph_nx = nx.from_numpy_matrix(usf_graph) usf_numnodes = len(usf_items) numsubs = 50 numlists = 3 listlength = 35 numsims = 1 #methods=['rw','goni','chan','kenett','fe'] methods=['uinvite'] toydata=rw.Data({ 'numx': numlists, 'trim': listlength }) fitinfo=rw.Fitinfo({ 'startGraph': "goni_valid", 'goni_size': 2, 'goni_threshold': 2, 'followtype': "avg", 'prune_limit': np.inf, 'triangle_limit': np.inf, 'other_limit': np.inf }) #toygraphs=rw.Graphs({ # 'numgraphs': 1, # 'graphtype': "steyvers", # 'numnodes': 280, # 'numlinks': 6, # 'prob_rewire': .3}) # generate data for `numsub` participants, each having `numlists` lists of `listlengths` items seednum=0 # seednum=150 (numsubs*numlists) means start at second sim, etc. for simnum in range(numsims): data = [] # Xs using usf_item indices datab = [] # Xs using ss_item indices (nodes only generated by subject) numnodes = [] items = [] # ss_items startseed = seednum # for recording for sub in range(numsubs): Xs = rw.genX(usf_graph_nx, toydata, seed=seednum)[0] data.append(Xs) # renumber dictionary and item list itemset = set(rw.flatten_list(Xs)) numnodes.append(len(itemset)) ss_items = {} convertX = {} for itemnum, item in enumerate(itemset): ss_items[itemnum] = usf_items[item] convertX[item] = itemnum items.append(ss_items) Xs = [[convertX[i] for i in x] for x in Xs] datab.append(Xs) seednum += numlists listnum=10 for b_start in [1, 2, 3, 0.75, 0.5]: for a_start in [1, 2, 3, 0.75, 0.5]: for zib_p in [.4, .6, .3, .7, .5]: fitinfo.prior_method = "zeroinflatedbetabinomial" fitinfo.zib_p = zib_p fitinfo.prior_a = a_start fitinfo.prior_b = b_start uinvite_graphs, priordict = rw.hierarchicalUinvite(datab[:listnum], items[:listnum], numnodes[:listnum], toydata, fitinfo=fitinfo) uinvite_group_graph = rw.priorToGraph(priordict, usf_items) alldata=dict() alldata['uinvite_graphs'] = uinvite_graphs alldata['priordict'] = priordict alldata['uinvite_group_graph'] = uinvite_group_graph alldata['datab'] = datab alldata['items'] = items alldata['numnodes'] = numnodes filename = "zibb_p" + str(int(p*10)) + "_a" + str(a_start).replace('.','') + "_b" + str(b_start).replace('.','') + ".pickle" fh=open(filename,"w") pickle.dump(alldata,fh) fh.close() costlist = [rw.costSDT(uinvite_group_graph, usf_graph), rw.cost(uinvite_group_graph, usf_graph)] costlist = rw.flatten_list(costlist) for i in costlist: print i, ",", print rw.probXhierarchical(datab[:listnum], uinvite_graphs[:listnum], items[:listnum], priordict, toydata)
import time import csv import sys import math import pandas as pd import numpy as np from sklearn.feature_selection import SelectFpr import random import collections from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import AdaBoostClassifier , RandomForestClassifier, GradientBoostingClassifier #from sklearn.cross_validation import train_test_split df_labs_train = pd.read_csv('id_time_labs_train.csv') df_vitals_train = pd.read_csv('id_time_vitals_train.csv') df_age_train = pd.read_csv('id_age_train.csv') df_variables_combined_train = df_labs_train.merge(df_vitals_train , on = ['ID','TIME']) df_variables_combined_train = df_variables_combined_train.drop(['L13', 'L17'], axis=1) # Dropping the columns of L13 and L17 df_variables_combined_train['L20'] = df_variables_combined_train['L3']/df_variables_combined_train['L20'] #L20 now contains ratio of L3/L20 df_labs_val = pd.read_csv('id_time_labs_val.csv') df_vitals_val = pd.read_csv('id_time_vitals_val.csv') df_age_val = pd.read_csv('id_age_val.csv') df_variables_combined_val = df_labs_val.merge(df_vitals_val , on = ['ID','TIME']) df_variables_combined_val = df_variables_combined_val.drop(['L13', 'L17'], axis=1) # Dropping the columns of L13 and L17 df_variables_combined_val['L20'] = df_variables_combined_val['L3']/df_variables_combined_val['L20'] #L20 now contains ratio of L3/L20 #id_time_vitals = sys.argv[1] #id_time_labs = sys.argv[2] #id_age = sys.argv[3] #df_labs_test = pd.read_csv(id_time_labs) #df_vitals_test = pd.read_csv(id_time_vitals) #df_age_test = pd.read_csv(id_age) #df_variables_combined_test = df_labs_test.merge(df_vitals_test , on = ['ID','TIME']) #df_variables_combined_test = df_variables_combined_test.drop(['L13', 'L17'], axis=1) # Dropping the columns of L13 and L17 #df_variables_combined_test['L20'] = df_variables_combined_test['L3']/df_variables_combined_test['L20'] # selected_features = ['L1_min', 'L2_std', 'L3_ratio_of_count', 'L4_last', 'L5_last', 'L6_min', 'L6_last', 'L7_last', 'L8_last', 'L9_min', 'L10_last', 'L11_last', 'L12_last', 'L14_exp_avg', 'L15_last', 'L15_mean', 'L18_exp_avg', 'L19_ratio_of_count', 'L20_ratio_of_count', 'L21_last', 'L23_max', 'L25_last', 'V1_min', 'V1_mean', 'V1_exp_avg', 'V1_last', 'V2_std', 'V2_ratio_of_count', 'V2_exp_avg', 'V2_last', 'V2_min', 'V3_last', 'V3_min', 'V3_exp_avg', 'V4_mean', 'V4_last', 'V4_std', 'V4_exp_avg', 'V4_min', 'V5_exp_avg', 'V5_last', 'V5_std', 'V5_mean', 'V6_last', 'V6_min', 'V6_exp_avg', 'AGE', 'L15_first_in_icu', 'L22_first_in_icu'] variables = df_variables_combined_train.columns.values.tolist() variables.remove('ID') variables.remove('ICU') variables.remove('TIME') suffixes = ['_last', '_exp_avg', '_mean', '_min', '_first_in_icu', '_std', '_ratio_of_count', '_max'] Lnames = ['L0'] Lnames = Lnames + variables[0:23] Lnames.insert(13, 'L13') Lnames.insert(17, 'L17') Vnames = ['V0'] Vnames = Vnames + variables[-6:] normal_values = { 'L1': 7.40 , 'L2': 40.0 , 'L3':87.5 , 'L4':140.0 , 'L5':4.3 ,'L6':25.0, 'L7':13.5, 'L8':0.89 , 'L9':7.25, 'L10':43.0 , 'L11':275.0 , 'L12':1.1 , 'L14':87.299555 , 'L15':1.35 , 'L16':0.05 , 'L18' : 125.0 ,'L19':85.0 , 'L20':4.5 , 'L21':4.4 , 'L22':95.5 , 'L23':39.546018 , 'L25':1.95,'V1':120.0 , 'V2':70.504731 , 'V3':83.006913 ,'V4':18, 'V5':97.5 , 'V6':98.2 } def get_predicted_labels_and_median_time(): output_df = pd.read_csv('output.csv') output_df_grouped_by_id = output_df.groupby('ID') #this is the dataframe with our prediction labels for patients temp = output_df_grouped_by_id.max() temp = temp.reset_index() temp = temp.drop('TIME', axis = 1) temp.columns = ['ID' , 'LABEL'] predicted_labels_df = temp #the list of predicted labels to return predicted_labels = np.array(predicted_labels_df['LABEL']).tolist() #2.for the first time you predicted death for a patient #for this first select only those rows which have timestamp predictions = 1 temp = output_df[output_df['PREDICTION'] == 1] # group this temp dataframe by ID and drop the TIME stamp column , as this dataframe contains the first timestamp where a patient's death was predicted , if it was # then take the first() temp = temp.groupby('ID').first() temp = temp.drop('PREDICTION' , axis = 1) first_prediction_of_death_df = temp.reset_index() #3.for the last timestamp recorded for a patient #for this simply group the output_df dataframe by ID and take last() temp = output_df_grouped_by_id.last() temp = temp.drop('PREDICTION' , axis = 1) last_timestamp_df = temp.reset_index() #now calculate the mean prediction time temp = first_prediction_of_death_df.merge(last_timestamp_df , on = ['ID'] , how = 'inner' , suffixes = ('_first_timestamp_of_one' , '_last_timestamp')) temp['PREDICTION_TIME'] = temp['TIME_last_timestamp'] - temp['TIME_first_timestamp_of_one'] median_prediction_time = temp['PREDICTION_TIME'].median() print 'median_prediction_time ' + str(median_prediction_time) return predicted_labels , median_prediction_time """ given two lists , one with predicted labels , one with actual labels calculate calculate the no. of TNs , TPs , FPs , FNs and hence the specificity and sensitivity of predictions """ def get_results(): #the four parameters of evaluation actual_labels_df = pd.read_csv(open('id_label_val.csv')) #filename hardcoded actual = np.array(actual_labels_df['LABEL']).tolist() predicted , median_prediction_time = get_predicted_labels_and_median_time() tn = 0 tp = 0 fn = 0 fp = 0 for i in range(len(predicted)): if (predicted[i] == 1) & (actual[i] == 1): tp = tp + 1 elif (predicted[i] == 1) & (actual[i] == 0): fp = fp + 1 elif (predicted[i] == 0) & (actual[i] == 0): tn = tn + 1 else: fn = fn + 1 """ specificity = tn / (tn + fp) sensitivity = tp / (tp + fn) accuracy = (tp + tn) / (tp + tn + fp + fn) """ specificity = float(tn) / (tn + fp) sensitivity = float(tp)/ (tp + fn) # just noticed , accuracy not required # accuracy = float(tp + tn) / (tp + tn + fp + fn) print 'spec ' + str(specificity) print 'sens ' + str(sensitivity) print 'tp ', tp print 'tn ', tn print 'fp ', fp print 'fn ', fn return specificity , sensitivity , median_prediction_time, fp, fn, tp, tn def evaluate_score(): #get the following values specificity , sensitivity , median_prediction_time, fp , fn , tp , tn = get_results() median_prediction_time = float(median_prediction_time)/3600 if median_prediction_time < 72.0: median_prediction_time_clipped_at_72 = median_prediction_time else: median_prediction_time_clipped_at_72 = 72 if specificity < 0.99: return -1 elif sensitivity == 0.0: return -2 elif median_prediction_time_clipped_at_72 < 5.0 : return -3 else: median_prediction_time_score = float(median_prediction_time_clipped_at_72) / 72 final_score = (0.75*sensitivity) + (0.2*median_prediction_time_score) + (0.05*specificity) print final_score return final_score def f_first_for_series(col): col = col[col.isnull() == False] if len(col) == 0: return None return col.iloc[0] def f_exp_avg_for_series(col, alpha): exp_avg = None col = col[col.isnull() == False] flag = False for element in col: if not flag: exp_avg = element flag = True else: exp_avg = (alpha * element) + ( (1-alpha) * exp_avg ) return exp_avg def f_ratio_for_series(col): return float( col.count() )/col.shape[0] def f_last_for_series(col): col = col[ col.isnull() == False ] col = col.values if len(col) == 0: return None return col[-1] def fill_normal_in_missing(df): suffixes = ['_min' , '_max' , '_std' , '_last' , '_mean', '_exp_avg', '_first_in_icu'] # dict of normal values #getting list of the variable names #variables_names = df_variables_combined_train.columns.values[2:31].tolist() for name in variables: for suffix in suffixes: if (name+suffix) in selected_features: if suffix == '_std': df[name+suffix].fillna(0, inplace = True) else: df[name+suffix].fillna(normal_values[name] , inplace = True) return df feature_dict1 = {0: 'L1', 1: 'L2', 2: 'L3', 3: 'L4', 4: 'L5', 5: 'L6', 6: 'L6', 7: 'L7', 8: 'L8', 9: 'L9', 10: 'L10', 11: 'L11', 12: 'L12', 13: 'L14', 14: 'L15', 15: 'L15', 16: 'L18', 17: 'L19', 18: 'L20', 19: 'L21', 20: 'L23', 21: 'L25', 22: 'V1', 23: 'V1', 24: 'V1', 25: 'V1', 26: 'V2', 27: 'V2', 28: 'V2', 29: 'V2', 30: 'V2', 31: 'V3', 32: 'V3', 33: 'V3', 34: 'V4', 35: 'V4', 36: 'V4', 37: 'V4', 38: 'V4', 39: 'V5', 40: 'V5', 41: 'V5', 42: 'V5', 43: 'V6', 44: 'V6', 45: 'V6', 46: 'AGE', 47: 'L15', 48: 'L22'} for i in feature_dict1: feature_dict1[i] = feature_dict1[i].split('_')[0] def fill_na_values(feature_list): normal_values = { 'L1': 7.40 , 'L2': 40.0 , 'L3':87.5 , 'L4':140.0 , 'L5':4.3 ,'L6':25.0, 'L7':13.5, 'L8':0.89 , 'L9':7.25, 'L10':43.0 , 'L11':275.0 , 'L12':1.1 , 'L14':87.299555 , 'L15':1.35 , 'L16':0.05 , 'L18' : 125.0 ,'L19':85.0 , 'L20':4.5 , 'L21':4.4 , 'L22':95.5 , 'L23':39.546018 , 'L25':1.95,'V1':120.0 , 'V2':70.504731 , 'V3':83.006913 ,'V4':18, 'V5':97.5 , 'V6':98.2 } for i in range(len(feature_list)): if pd.isnull( feature_list[i] ): feature_list[i] = normal_values[feature_dict1[i]] return feature_list # def get_patient_feature_values_val(patient_id, df_patient): # #features = ['L1_min', 'L2_std', 'L3_ratio_of_count', 'L4_last', 'L5_last', 'L6_min', 'L6_last', 'L7_last', 'L8_last', 'L9_min', 'L10_last', 'L11_last', 'L12_last', 'L14_exp_avg', 'L15_last', 'L15_mean', 'L18_exp_avg', 'L19_ratio_of_count', 'L20_ratio_of_count', 'L21_last', 'L23_max', 'L25_last', 'V1_min', 'V1_mean', 'V1_exp_avg', 'V1_last', 'V2_std', 'V2_ratio_of_count', 'V2_exp_avg', 'V2_last', 'V2_min', 'V3_last', 'V3_min', 'V3_exp_avg', 'V4_mean', 'V4_last', 'V4_std', 'V4_exp_avg', 'V4_min', 'V5_exp_avg', 'V5_last', 'V5_std', 'V5_mean', 'V6_last', 'V6_min', 'V6_exp_avg'] # list_to_return = [] # list_to_return.append(df_patient['L1'].min()) # list_to_return.append(df_patient['L2'].std() ) # list_to_return.append(f_ratio_for_series( df_patient['L3'] ) ) # list_to_return.append(f_last_for_series( df_patient['L4'] ) ) # list_to_return.append(f_last_for_series( df_patient['L5'] ) ) # list_to_return.append(df_patient['L6'].min() ) # list_to_return.append(f_last_for_series( df_patient['L6'] ) ) # list_to_return.append(f_last_for_series( df_patient['L7'] ) ) # list_to_return.append(f_last_for_series( df_patient['L8'] ) ) # list_to_return.append(df_patient['L9'].min() ) # list_to_return.append(f_last_for_series( df_patient['L10'] ) ) # list_to_return.append(f_last_for_series( df_patient['L11'] ) ) # list_to_return.append(f_last_for_series( df_patient['L12'] ) ) # list_to_return.append(f_exp_avg_for_series( df_patient['L14'], 0.3) ) # list_to_return.append(f_last_for_series( df_patient['L15'] ) ) # list_to_return.append(df_patient['L15'].mean() ) # list_to_return.append(f_exp_avg_for_series( df_patient['L18'], 0.3 ) ) # list_to_return.append(f_ratio_for_series( df_patient['L19'] ) ) # list_to_return.append(f_ratio_for_series( df_patient['L20'] ) ) # list_to_return.append(f_last_for_series( df_patient['L21'] ) ) # list_to_return.append(df_patient['L23'].max() ) # list_to_return.append(f_last_for_series( df_patient['L25'] ) ) # list_to_return = list_to_return + [ df_patient['V1'].min(), df_patient['V1'].mean(), f_exp_avg_for_series( df_patient['V1'], 0.3), f_last_for_series( df_patient['V1'])] # list_to_return = list_to_return + [ df_patient['V2'].std(), f_ratio_for_series( df_patient['V2']), f_exp_avg_for_series( df_patient['V2'], 0.3), f_last_for_series( df_patient['V2']), df_patient['V2'].min() ] # list_to_return = list_to_return + [ f_last_for_series( df_patient['V3']), df_patient['V3'].min(), f_exp_avg_for_series( df_patient['V3'], 0.3)] # list_to_return = list_to_return + [ df_patient['V4'].mean(), f_last_for_series( df_patient['V4']), df_patient['V4'].std(), f_exp_avg_for_series( df_patient['V4'], 0.3), df_patient['V4'].min()] # list_to_return = list_to_return + [ f_exp_avg_for_series( df_patient['V5'], 0.3), f_last_for_series( df_patient['V5']), df_patient['V5'].std(), df_patient['V5'].mean()] # list_to_return = list_to_return + [ f_last_for_series( df_patient['V6']), df_patient['V6'].min(), f_exp_avg_for_series( df_patient['V6'], 0.3)] # age = df_age_val[ df_age_val['ID'] == patient_id ]['AGE'].iloc[0] # list_to_return.append(age) # final_list = fill_na_values(list_to_return) # return final_list #def get_patient_feature_values_test(patient_id, df_patient): # #features = ['L1_min', 'L2_std', 'L3_ratio_of_count', 'L4_last', 'L5_last', 'L6_min', 'L6_last', 'L7_last', 'L8_last', 'L9_min', 'L10_last', 'L11_last', 'L12_last', 'L14_exp_avg', 'L15_last', 'L15_mean', 'L18_exp_avg', 'L19_ratio_of_count', 'L20_ratio_of_count', 'L21_last', 'L23_max', 'L25_last', 'V1_min', 'V1_mean', 'V1_exp_avg', 'V1_last', 'V2_std', 'V2_ratio_of_count', 'V2_exp_avg', 'V2_last', 'V2_min', 'V3_last', 'V3_min', 'V3_exp_avg', 'V4_mean', 'V4_last', 'V4_std', 'V4_exp_avg', 'V4_min', 'V5_exp_avg', 'V5_last', 'V5_std', 'V5_mean', 'V6_last', 'V6_min', 'V6_exp_avg'] # list_to_return = [] # # list_to_return.append(df_patient['L1'].min()) # list_to_return.append(df_patient['L2'].std() ) # list_to_return.append(f_ratio_for_series( df_patient['L3'] ) ) # list_to_return.append(f_last_for_series( df_patient['L4'] ) ) # list_to_return.append(f_last_for_series( df_patient['L5'] ) ) # list_to_return.append(df_patient['L6'].min() ) # list_to_return.append(f_last_for_series( df_patient['L6'] ) ) # list_to_return.append(f_last_for_series( df_patient['L7'] ) ) # list_to_return.append(f_last_for_series( df_patient['L8'] ) ) # list_to_return.append(df_patient['L9'].min() ) # list_to_return.append(f_last_for_series( df_patient['L10'] ) ) # list_to_return.append(f_last_for_series( df_patient['L11'] ) ) # list_to_return.append(f_last_for_series( df_patient['L12'] ) ) # list_to_return.append(f_exp_avg_for_series( df_patient['L14'], 0.3) ) # list_to_return.append(f_last_for_series( df_patient['L15'] ) ) # list_to_return.append(df_patient['L15'].mean() ) # list_to_return.append(f_exp_avg_for_series( df_patient['L18'], 0.3 ) ) # list_to_return.append(f_ratio_for_series( df_patient['L19'] ) ) # list_to_return.append(f_ratio_for_series( df_patient['L20'] ) ) # list_to_return.append(f_last_for_series( df_patient['L21'] ) ) # list_to_return.append(df_patient['L23'].max() ) # list_to_return.append(f_last_for_series( df_patient['L25'] ) ) # list_to_return = list_to_return + [ df_patient['V1'].min(), df_patient['V1'].mean(), f_exp_avg_for_series( df_patient['V1'], 0.3), f_last_for_series( df_patient['V1'])] # list_to_return = list_to_return + [ df_patient['V2'].std(), f_ratio_for_series( df_patient['V2']), f_exp_avg_for_series( df_patient['V2'], 0.3), f_last_for_series( df_patient['V2']), df_patient['V2'].min() ] # list_to_return = list_to_return + [ f_last_for_series( df_patient['V3']), df_patient['V3'].min(), f_exp_avg_for_series( df_patient['V3'], 0.3)] # list_to_return = list_to_return + [ df_patient['V4'].mean(), f_last_for_series( df_patient['V4']), df_patient['V4'].std(), f_exp_avg_for_series( df_patient['V4'], 0.3), df_patient['V4'].min()] # list_to_return = list_to_return + [ f_exp_avg_for_series( df_patient['V5'], 0.3), f_last_for_series( df_patient['V5']), df_patient['V5'].std(), df_patient['V5'].mean()] # list_to_return = list_to_return + [ f_last_for_series( df_patient['V6']), df_patient['V6'].min(), f_exp_avg_for_series( df_patient['V6'], 0.3)] # age = df_age_test[ df_age_test['ID'] == patient_id ]['AGE'].iloc[0] # list_to_return.append(age) # # final_list = fill_na_values(list_to_return) # # return final_list alpha = 0.3 #def feature_extraction_train(): # """ # will return a pd.Dataframe with 291 features extracted # from time series as columns and 2 columns more i.e. ID and age. # So total 293 columns # """ # df_without_time_train = df_variables_combined_train.drop(['TIME'], axis=1) # df_grouped_by_id_train = df_without_time_train.groupby('ID') # # df_without_time_in_icu_only_train = df_without_time_train[df_without_time_train['ICU'] == 1] # df_grouped_by_id_in_icu_only_train = df_without_time_in_icu_only_train.groupby('ID') # L_15_group2 = df_grouped_by_id_in_icu_only_train['L15'] # L_22_group2 = df_grouped_by_id_in_icu_only_train['L22'] # # L1group = df_grouped_by_id_train['L1'] # L2group = df_grouped_by_id_train['L2'] # L3group = df_grouped_by_id_train['L3'] # L4group = df_grouped_by_id_train['L4'] # L5group = df_grouped_by_id_train['L5'] # L6group = df_grouped_by_id_train['L6'] # L7group = df_grouped_by_id_train['L7'] # L8group = df_grouped_by_id_train['L8'] # L9group = df_grouped_by_id_train['L9'] # L10group = df_grouped_by_id_train['L10'] # L11group = df_grouped_by_id_train['L11'] # L12group = df_grouped_by_id_train['L12'] # L14group = df_grouped_by_id_train['L14'] # L15group = df_grouped_by_id_train['L15'] # #L16group = df_grouped_by_id_train['L16'] # L18group = df_grouped_by_id_train['L18'] # L19group = df_grouped_by_id_train['L19'] # L20group = df_grouped_by_id_train['L20'] # L21group = df_grouped_by_id_train['L21'] # #L22group = df_grouped_by_id_train['L22'] # L23group = df_grouped_by_id_train['L23'] # #L24group = df_grouped_by_id_train['L24'] # L25group = df_grouped_by_id_train['L25'] # V1group = df_grouped_by_id_train['V1'] # V2group = df_grouped_by_id_train['V2'] # V3group = df_grouped_by_id_train['V3'] # V4group = df_grouped_by_id_train['V4'] # V5group = df_grouped_by_id_train['V5'] # V6group = df_grouped_by_id_train['V6'] # # # # df_L1 = L1group.agg( {Lnames[1]+suffixes[3] : np.min} ).reset_index() # df_L2 = L2group.agg( {Lnames[2]+suffixes[5] : np.std } ).reset_index() # x = pd.merge( df_L1, df_L2, on = 'ID') # # # df_L3 = L3group.agg( { Lnames[3]+suffixes[6] : lambda x: f_ratio_for_series(x) } ).reset_index() # x = pd.merge( x, df_L3, on = 'ID') # # df_L4 = L4group.agg( { Lnames[4]+suffixes[0] : lambda x: f_last_for_series(x) } ).reset_index() # x = pd.merge( x, df_L4, on = 'ID') # # df_L5 = L5group.agg( { Lnames[5]+suffixes[0] : lambda x: f_last_for_series(x) } ).reset_index() # x = pd.merge( x, df_L5, on = 'ID') # # df_L6 = L6group.agg( { Lnames[6]+suffixes[0] : lambda x: f_last_for_series(x), Lnames[6]+suffixes[3] : np.min } ).reset_index() # x = pd.merge( x, df_L6, on = 'ID') # # df_L7 = L7group.agg( { Lnames[7]+suffixes[0] : lambda x: f_last_for_series(x) } ).reset_index() # x = pd.merge( x, df_L7, on = 'ID') # # df_L8 = L8group.agg( { Lnames[8]+suffixes[0] : lambda x: f_last_for_series(x) } ).reset_index() # x = pd.merge( x, df_L8, on = 'ID') # # df_L9 = L9group.agg( { Lnames[9]+suffixes[3] : np.min } ).reset_index() # x = pd.merge( x, df_L9, on = 'ID') # # df_L10 = L10group.agg( { Lnames[10]+suffixes[0] : lambda x: f_last_for_series(x) } ).reset_index() # x = pd.merge( x, df_L10, on = 'ID') # # df_L11 = L11group.agg( { Lnames[11]+suffixes[0] : lambda x: f_last_for_series(x) } ).reset_index() # x = pd.merge( x, df_L11, on = 'ID') # # df_L12 = L12group.agg( { Lnames[12]+suffixes[0] : lambda x: f_last_for_series(x) } ).reset_index() # x = pd.merge( x, df_L12, on = 'ID') # # df_L14 = L14group.agg( { Lnames[14]+suffixes[1] : lambda x: f_exp_avg_for_series(x, alpha) } ).reset_index() # x = pd.merge( x, df_L14, on = 'ID') # # df_L15 = L15group.agg( { Lnames[15]+suffixes[0] : lambda x: f_last_for_series(x), Lnames[15]+suffixes[2] : np.mean } ).reset_index() # x = pd.merge( x, df_L15, on = 'ID') # # df_L18 = L18group.agg( { Lnames[18]+suffixes[1] : lambda x: f_exp_avg_for_series(x, alpha) } ).reset_index() # x = pd.merge( x, df_L18, on = 'ID') # # df_L19 = L19group.agg( { Lnames[19]+suffixes[6] : lambda x: f_ratio_for_series(x) } ).reset_index() # x = pd.merge( x, df_L19, on = 'ID') # # df_L20 = L20group.agg( { Lnames[20]+suffixes[6] : lambda x: f_ratio_for_series(x) } ).reset_index() # x = pd.merge( x, df_L20, on = 'ID') # # df_L21 = L21group.agg( { Lnames[21]+suffixes[0] : lambda x: f_last_for_series(x) } ).reset_index() # x = pd.merge( x, df_L21, on = 'ID') # # df_L23 = L23group.agg( { Lnames[23]+suffixes[7] : np.max } ).reset_index() # x = pd.merge( x, df_L23, on = 'ID') # # df_L25 = L25group.agg( { Lnames[25]+suffixes[0] : lambda x: f_last_for_series(x) } ).reset_index() # x = pd.merge( x, df_L25, on = 'ID') # # df_V1 = V1group.agg( { Vnames[1]+suffixes[0] : lambda x: f_last_for_series(x), Vnames[1]+suffixes[1] : lambda x: f_exp_avg_for_series(x, alpha) ,Vnames[1]+suffixes[2] : np.mean, Vnames[1]+suffixes[3] : np.min} ).reset_index() # x = pd.merge( x, df_V1, on = 'ID') # # df_V2 = V2group.agg( { Vnames[2]+suffixes[0] : lambda x: f_last_for_series(x), Vnames[2]+suffixes[1] : lambda x: f_exp_avg_for_series(x, alpha) ,Vnames[2]+suffixes[3] : np.min, Vnames[2]+suffixes[5] : np.std, Vnames[2]+suffixes[6] : lambda x: f_ratio_for_series(x)} ).reset_index() # x = pd.merge( x, df_V2, on = 'ID') # # df_V3 = V3group.agg( { Vnames[3]+suffixes[0] : lambda x: f_last_for_series(x), Vnames[3]+suffixes[1] : lambda x: f_exp_avg_for_series(x, alpha) ,Vnames[3]+suffixes[3] : np.min} ).reset_index() # x = pd.merge( x, df_V3, on = 'ID') # # df_V4 = V4group.agg( { Vnames[4]+suffixes[0] : lambda x: f_last_for_series(x), Vnames[4]+suffixes[1] : lambda x: f_exp_avg_for_series(x, alpha) ,Vnames[4]+suffixes[2] : np.mean, Vnames[4]+suffixes[3] : np.min, Vnames[4]+suffixes[5] : np.std} ).reset_index() # x = pd.merge( x, df_V4, on = 'ID') # # df_V5 = V5group.agg( { Vnames[5]+suffixes[0] : lambda x: f_last_for_series(x), Vnames[5]+suffixes[1] : lambda x: f_exp_avg_for_series(x, alpha) ,Vnames[5]+suffixes[2] : np.mean, Vnames[5]+suffixes[5] : np.std} ).reset_index() # x = pd.merge( x, df_V5, on = 'ID') # # df_V6 = V6group.agg( { Vnames[6]+suffixes[0] : lambda x: f_last_for_series(x), Vnames[6]+suffixes[1] : lambda x: f_exp_avg_for_series(x, alpha) ,Vnames[6]+suffixes[3] : np.min} ).reset_index() # x = pd.merge( x, df_V6, on = 'ID') # # # # x = pd.merge( x, df_age_train, on = 'ID') # appending age # # df_L15_2 = L_15_group2.agg( {'L15_first_in_icu' : lambda x: f_first_for_series(x)} ).reset_index() # x = pd.merge( x, df_L15_2, on = 'ID') # # df_L22_2 = L_22_group2.agg( {'L22_first_in_icu' : lambda x: f_first_for_series(x)} ).reset_index() # x = pd.merge( x, df_L22_2, on = 'ID') # # # #finally the labels # df_labels_train = pd.read_csv('id_label_train.csv') # x = pd.merge(x, df_labels_train , on = 'ID') # # #print x.columns.values # #so y is our final dataframe which will be passed to the featue selection algorithms # y = fill_normal_in_missing(x) # #print y[y.isnull() == True] # return y # starting_time = time.time() # """ # training starts here # """ # #df_train = feature_extraction_train() df_train = pd.read_csv('buffer_train.csv') df_X_train = df_train.drop(['ID', 'LABEL', 'Unnamed: 0'], axis=1) #df_X_train = df_train.drop(['ID', 'LABEL'], axis=1) X_train = df_X_train.values #X_train[X_train.columns.values.tolist()] = X_train[X_train.columns.values.tolist()].astype(np.float32) Y_train = df_train[ 'LABEL' ].values # """ # gbc = GradientBoostingClassifier() # gbc.fit(X_train, Y_train) score_evaluation_df = pd.read_csv('id_time_features.csv') X_val = score_evaluation_df.drop(['ID','TIME'],axis = 1) # """ clf = AdaBoostClassifier(base_estimator = DecisionTreeClassifier(min_samples_leaf =7) , n_estimators=75) clf.fit(X_train , Y_train) predicted_probabilities = clf.predict_proba(X_val.values) #predicted_labels = clf.predict(X_val).tolist() # predicted_labels = [] # for i in range(len(predicted_probabilities)): # if predicted_probabilities[i][1] > 0.6: # predicted_labels.append(1) # else : # predicted_labels.append(0) # temp = score_evaluation_df[['ID' , 'TIME']] # temp['PREDICTION'] = pd.Series(predicted_labels) # temp['PROBABILITY'] = pd.Series(predicted_probabilities[:,1]) # output_df = temp # with open('output.csv' , 'wb') as file: # wr = csv.writer(file) # wr.writerow(['ID' , 'TIME' , 'PREDICTION']) # wr.writerows(output_df.values) # final_score = evaluate_score() # print final_score #rf = RandomForestClassifier(random_state=0) #f = RandomForestClassifier(random_state=0).fit(X_train, Y_train) #y_prob_train = f.predict_proba(X_train) #y_prob_test = f.predict_proba(X_val) #y_pred_test_rf = f.predict(X_val) #f_t = ThresholdingOptimization().fit(y_prob_train, cost_mat_train, Y_train) #y_pred_test_rf_t = f_t.predict(y_prob_test) #c.fit(X_train, Y_train, cost_mat_train) #y_pred_test_c = c.predict(X_val, cost_mat_val) #y_prob_test_c = c.predict_proba(X_val) #rf.fit(X_train, Y_train) #y_pred_test_rf = rf.predict(X_val) #y_prob_test_rf = rf.predict_proba(X_val) ##y_prob_train_rf = rf.predict_proba(X_train) #predicted_y_c = y_pred_test_c.tolist() #predicted_y_c = y_pred_test_c.tolist() #predicted_y_rf = y_pred_test_rf.tolist() #predicted_y_csrf = y_pred_test_csrf.tolist() """ training ends here """ """ Validation starts here """ output = [] w = csv.writer(open('id_time_features.csv','wb')) w.writerow(['ID','TIME']+selected_features) min_id_val = df_age_val['ID'].min() max_id_val = df_age_val['ID'].max() grouped_by_id_val = df_variables_combined_val.groupby('ID') print 'Reached the most time-taking part\n' for i in range(min_id_val , max_id_val + 1): print 'On patient_ID ' , i current_patient_df_val = grouped_by_id_val.get_group(i) counter = 0 for j,row in current_patient_df_val.iterrows(): flag1 = 0 l15_first_in_icu_val = normal_values['L15'] flag2 = 0 l22_first_in_icu_val = normal_values['L22'] if row[-1] == 1: if (not flag1) & ( not math.isnan(row[15]) ): l15_first_in_icu_val = row[15] flag1 = 1 if (not flag2) & ( not math.isnan(row[21]) ): l22_first_in_icu_val = row[21] flag2 = 1 data_so_far_df_val = current_patient_df_val.head(counter + 1) #pass this data to the function which will do stuff with this and return a np.array which is supposed to be the #input feature vector for our classifier model feature_vector_val = get_patient_feature_values_val(i, data_so_far_df_val.drop(['ID','TIME'],axis = 1)) feature_vector_val.append(l15_first_in_icu_val) feature_vector_val.append(l22_first_in_icu_val) feature_vector_val = np.array(feature_vector_val) feature_vector_val = np.array( [feature_vector_val]) w.writerow([i , row[1]]+feature_vector_val.tolist()[0] ) y_predicted_val = clf.predict(feature_vector_val)[0] """ Space left for the classification code of prediciton . At the end of this code , we will have a value as 0 or 1 which is the PREDICTION for this patient for this Timestamp Let's have it returned to the variable 'predicted' """ output.append([row[0] , row[1] , y_predicted_val]) counter = counter + 1 print 'Patient i done\n' #now write the output to a csv file wr = csv.writer(open('output1.csv' , 'wb')) """ this is for the header , just for your own evaluation , but DON'T include this for Submission """ wr.writerow(['ID' , 'TIME' , 'PREDICTION']) wr.writerows(output) # final_score = evaluate_score() # print final_score """ Validation ends here """ """ Testing starts here """ #output_test = [] # #min_id_test = df_age_test['ID'].min() #max_id_test = df_age_test['ID'].max() # #grouped_by_id_test = df_variables_combined_test.groupby('ID') # #for i in range(min_id_test , max_id_test): # # current_patient_df_test = grouped_by_id_test.get_group(i) # for j,row in current_patient_df_test.iterrows(): # #lets assume the index of ICU in the dataframe we have is 'index' # #start predicting if the patient goes to ICU # """Here DON'T forget to append L15 , L22 to # the last of the np.array you will be passing to the model, # just do np.append """ # # flag = 0 # if row[0] == 1: # here in row[0], 0 is used because the column number of ICU in current_patient_df is 0 # if not flag: # l15_first_in_icu_test = row[15] # l22_first_in_icu_test = row[21] # flag = 1 # data_so_far_df_test = current_patient_df_test.head(j+1) # #pass this data to the function which will do stuff with this and return a np.array which is supposed to be the # #input feature vector for our classifier model # # feature_vector_test = get_patient_feature_values(i, data_so_far_df_test.drop(['ID','TIME'],axis = 1)) # # feature_vector_test.append(l15_first_in_icu_test) # feature_vector_test.append(l22_first_in_icu_test) # # feature_vector_test = np.array(feature_vector_test) # feature_vector_test = np.array( [feature_vector_test] ) # # # y_predicted_test = c.predict(feature_vector_test)[0] # # """ # Space left for the classifierode of prediciton . # At the end of this code , we will have a value as 0 or 1 which is the PREDICTION for this patient for this Timestamp # Let's have it returned to the variable 'predicted' # """ # # output_test.append([row[0] , row[1] , y_predicted_test]) # ##now write the output to a csv file #wr = csv.writer(open('output_test.csv' , 'wb')) #""" this is for the header , just for your own evaluation , but DON'T include this for Submission """ ##wr.writerows(['ID' , 'TIME' , 'PREDICTION']) #wr.writerows(output_test) # # #""" #Testing ends here #"""
#coding=utf-8 __author__ = 'hqx' class AppRunConfig(object): capabilities = { 'platformName':'Android', # 'platformVersion':'5.0.2', 'deviceName':'dq6lburkcqhylb6d', #钉钉 'appPackage':'com.alibaba.android.rimet', 'appActivity':'com.alibaba.android.rimet.biz.LaunchHomeActivity', #'unicodeKeyboard':True, #'resetKeyboard':True, 'noReset':True} #'unicodeKeyboard':True, #'resetKeyboard':False}
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models import json as simplejson class Site(models.Model): _name = 'htc.site' _inherit = 'mail.thread' site_group_id = fields.Many2one("htc.site.group", string="Site Group Code", requierd=True) site_name = fields.Char("Site Name", required=True) site_code = fields.Char("Site Code", required=True) delivery_method = fields.Selection([("ftp","FTP"),('email','Email'),('real_time','Real Time'),('batch','Batch')],string="Delivery Method", required=True) group_ids = fields.One2many("htc.group", "site_id", string="Groups") daily_counter_ids = fields.One2many("htc.daily_counter","site_id", string="Daily Counts") _sql_constraints = [ ('prefix_code_unique', 'UNIQUE(site_code)', "Can't be duplicate value for Prefix and Site Group Code!") ] server_address = fields.Char("Server Address", requierd=True) user_id = fields.Char("User Name", requierd=True) password = fields.Char("Password", requierd=True) http_port = fields.Char("Http Port") https_port = fields.Char("Https Port") timezone_name = fields.Char("Timezone Name") ip_range=fields.Char("IP Range", requierd=True) interface_code = fields.Char("Interface Code") @api.multi def name_get (self): result = [] for record in self: code = record.site_code result.append((record.id, code)) return result
#!/usr/bin/env python import tornado.web import tornado.httpserver import tornado.database from data.conversation import ConversationData from data.message import MessageData from data.deal import DealData from data.user import UserData import json class ConvHandler(tornado.web.RequestHandler): @property def db(self): return self.application.db def validInt(self, v): try: return int(v) except: return None def validLong(self, v): try: return long(v) except: return None def post(self): type = self.get_argument("t", None) if not type: return try: type = int(type) except: return if type==0: deal_id = self.get_argument("dealid", None) uid = self.get_argument("uid", None) #the one who sends the message with_uid = self.get_argument("to", None) #the one who receives the message message = self.get_argument("msg", None) if (not deal_id) or (not uid) or (not with_uid) or (not message): self.write("{\"err\": \"not enough arguments\"}") return if self.validLong(deal_id)==None: print "invalid deal_id in ConvHandler.py post" return if self.validLong(uid)==None: print "invalid uid_id in ConvHandler.py post" return if self.validLong(with_uid)==None: print "invalid with_uid in ConvHandler.py post" return #insert the message into message table and get the message id message_data = MessageData(self.db) mid = message_data.insert_message(message) if mid==None: self.write("{\"err\": \"error in saving message\"}") return #insert the conversation into the conversation table twice conversation_data = ConversationData(self.db) cid = conversation_data.insert_new_conv(deal_id, uid, with_uid, mid, 1, 1) if cid==None: self.write("{\"err\": \"error in saving conversation\"}") return if uid!=with_uid: cid = conversation_data.insert_new_conv(deal_id, with_uid, uid, mid, 0, 0) if cid==None: self.write("{\"err\": \"error in saving conversation\"}") return self.write("{\"success\": \"true\"}") elif type==1: deal_id = self.get_argument("dealid", None) uid = self.get_argument("uid", None) if (not deal_id) or (not uid): self.write("{\"err\": \"not enough arguments\"}") return print deal_id if self.validLong(deal_id) == None: print "invalid deal_id in ConvHandler.py post" return if self.validLong(uid) == None: print "invalid uid_id in ConvHandler.py post" return # retrieve all contact ranked by date user_data = UserData(self.db) conversation_data = ConversationData(self.db) contacts = conversation_data.retrieve_contacts(deal_id, uid) result = [] for contact in contacts: with_uid = contact["with_uid"] user_info = user_data.retrieve(with_uid) entry = {} entry["username"] = user_info["username"] entry["id"] = with_uid # retrieve the unread number for each contact count = conversation_data.retrieve_unread_message_num(deal_id, uid, with_uid) entry["unread"] = count[0]["count"]; result.append(entry) self.write(json.dumps(result)) elif type==2: deal_id = self.get_argument("dealid", None) uid = self.get_argument("uid", None) with_uid = self.get_argument("with_uid", None) if (not deal_id) or (not uid) or (not with_uid): self.write("{\"err\": \"not enough arguments\"}") return conversation_data = ConversationData(self.db) messages = conversation_data.retrieve_messages(deal_id, uid, with_uid) if (not messages) or len(messages)==0: return message_data = MessageData(self.db) result = [] for message in messages: entry = {} msg_id = message["msg_id"] content= message_data.retrieve_message(msg_id) entry["message"] = content["message"] entry["msg_id"] = msg_id entry["created_at"] = str(message["created_at"]) entry["is_sender"] = message["is_sender"] result.append(entry) self.write(json.dumps(result)) elif type==3: deal_id = self.get_argument("dealid", None) uid = self.get_argument("uid", None) with_uid = self.get_argument("with_uid", None) if (not deal_id) or (not uid) or (not with_uid): self.write("{\"err\": \"not enough arguments\"}") return conversation_data = ConversationData(self.db) conversation_data.make_messages_read(deal_id, uid, with_uid) def get(self): uid = self.get_argument("uid", None) if not uid: return limit = 4 offset = self.get_argument("p", None) if offset==None: offset=0 else: try: offset=int(offset) except: return; conversation_data = ConversationData(self.db) entries = conversation_data.retrieve_deals(uid, offset*limit, limit) if entries!=None: deal_data = DealData(self.db) deals = [] for entry in entries: deal_info = deal_data.retrieve_deal_info(entry["dealid"]) entry = {} entry["simple_desc"] = deal_info["simple_desc"] entry["created_at"] = str(deal_info["created_at"]) entry["id"] = deal_info["id"] entry["min_price"] = deal_info["min_price"] entry["max_price"] = deal_info["max_price"] deals.append(entry) self.render("conversation.html", deals=deals, offset=offset, uid=uid) else: self.render("error.html", type=0, msg="No conversation exists")
"""Author - Rahul Mehta """ import torch import pandas as pd import numpy as np import seaborn as sns from SRL_NN_Classifier import SRL_LSTM from sklearn.metrics import classification_report from SRL_dataset import SRL_dataset import torch.optim as optim from torch.utils.data import DataLoader from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import torch.nn as nn import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix import argparse def label_encode(y): le = LabelEncoder() y = le.fit_transform(y) return(y,list(le.classes_),le) def model_accuracy(predict,y): true_predict=(predict==y).float() acc=true_predict.sum()/len(true_predict) return(acc) def train_nn(model,dataloader,testloader,epochs,optimizer,criterion): epoch_list = [] train_loss_list = [] test_loss_list = [] train_acc_list = [] test_acc_list = [] for epoch in range(epochs): total_loss = 0.0 total_acc=0.0 for emb,y in dataloader: batch_size = emb.shape[0] #print(y.shape) #print(emb.view([1, 64,300]).shape) preds = model(emb.view([1,batch_size,300])) #print(preds[0]) #print(preds.shape) loss = criterion(preds, y) #print("Loss {}".format(loss)) preds = torch.argmax(preds,dim=1) #print(preds) acc = sum(preds == y) / float(batch_size) #acc=model_accuracy(preds, y) optimizer.zero_grad() loss.backward() optimizer.step() total_loss += loss.item() total_acc+=acc.item() print("train loss on epoch {epoch} is {loss} and training accuracy {accuracy}".format(epoch=epoch,loss=(total_loss/len(dataloader)),accuracy=(total_acc/len(dataloader)))) #print(f"accuracy on epoch {epoch} = {total_acc/len(dataloader)}") train_acc_list.append((total_acc/len(dataloader))) train_loss_list.append((total_loss/len(dataloader))) test_loss = 0.0 test_acc=0.0 all_preds =np.zeros(0) all_y = np.zeros(0) for emb,y in testloader: batch_size = emb.shape[0] #print(y.shape) #print(emb.view([1, 64,300]).shape) preds = model(emb.view([1,batch_size,300])) #print(preds[0]) #print(preds.shape) loss = criterion(preds, y) #print("Loss {}".format(loss)) #print(y) preds = torch.argmax(preds,dim=1) #print(preds) acc = sum(preds == y) / float(batch_size) #acc=model_accuracy(preds, y) all_preds = np.append(all_preds,np.array(preds)) all_y = np.append(all_y,np.array(y)) optimizer.zero_grad() loss.backward() optimizer.step() test_loss+=loss.item() test_acc+=acc.item() print("test loss on epoch {epoch} is {loss} and test accuracy {accuracy}".format(epoch=epoch,loss=(test_loss/len(testloader)),accuracy=(test_acc/len(testloader)))) #print(f"accuracy on epoch {epoch} = {total_acc/len(dataloader)}") test_acc_list.append((test_acc/len(testloader))) test_loss_list.append((test_loss/len(testloader))) epoch_list.append(epoch) return(train_loss_list,test_loss_list,train_acc_list,test_acc_list,all_preds,all_y,epoch_list) if __name__ == "__main__": # Read dataset df = pd.read_csv("../data/processed/interim.txt") df['srl'],classes,le = label_encode(df['srl']) #df = df.head(1000) #Train and Test X_train, X_test, y_train, y_test = train_test_split(df.drop(['srl'],axis=1), df['srl'],test_size=0.33,random_state=123) print(X_train.shape,X_test.shape,y_train.shape,y_test.shape) df_train = pd.concat([X_train,y_train],axis=1) df_test = pd.concat([X_test,y_test],axis=1) train_dataset = SRL_dataset(df_train) test_dataset = SRL_dataset(df_test) parser = argparse.ArgumentParser() parser.add_argument('--EMBEDDING_DIM', type=int) parser.add_argument('--NUM_HIDDEN_NODES', type=int) #parser.add_argument('--NUM_CLASSES', type=int) parser.add_argument('--epochs', type=int) parser.add_argument('--batchsize', type=int) parser.add_argument('--learning_rate', type=float) args = parser.parse_args() # Hyperparameters # EMBEDDING_DIM = 300 # NUM_HIDDEN_NODES =100 # NUM_OUTPUT_NODES = 1 # NUM_CLASSES = 23 # epochs = 50 # batchsize=64 # learning_rate =0.0001 EMBEDDING_DIM = args.EMBEDDING_DIM NUM_HIDDEN_NODES = args.NUM_HIDDEN_NODES NUM_OUTPUT_NODES = 1 NUM_CLASSES = 23 epochs = args.epochs batchsize= args.batchsize learning_rate = args.learning_rate print(EMBEDDING_DIM,NUM_HIDDEN_NODES,epochs,batchsize,learning_rate) model = SRL_LSTM(embeddings_dim=EMBEDDING_DIM,hidden_dim=NUM_HIDDEN_NODES,output_dim =NUM_OUTPUT_NODES,num_class=NUM_CLASSES,pretrained_embeddings=None) print(model) optimizer = optim.Adam(model.parameters(), lr = learning_rate) criterion = nn.CrossEntropyLoss() # Dataset dataloader=DataLoader(dataset=train_dataset,batch_size=batchsize,shuffle=False,num_workers=0) testloader=DataLoader(dataset=test_dataset,batch_size=batchsize,shuffle=False,num_workers=0) # Conf matrix train_loss, test_loss, train_acc,test_acc,preds,Y,epoch_list = train_nn(model,dataloader,testloader,epochs,optimizer,criterion) conf_matrix = pd.DataFrame(confusion_matrix(Y, preds)) print(conf_matrix) sns.set(font_scale=1.4) # for label size sns_plot = sns.heatmap(conf_matrix, annot=True, annot_kws={"size": 8}).get_figure() # font size class_report = pd.DataFrame(classification_report(Y, preds,output_dict=True)).transpose() df_results = pd.DataFrame(list(zip(train_loss,test_loss,train_acc,test_acc)),columns=['Train Loss','Test Loss','Train Accuracy','Test Accuracy']) plt.figure(figsize=(10,5)) plt.title("Training and Validation Accuracy") plt.plot(epoch_list, train_acc) plt.plot(epoch_list,test_acc) plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.legend(['Train Accuracy','Test Accuracy']) plt.savefig('../data/results/NN_train_test_accuracy_bilstm_50e.png') plt.figure(figsize=(10,5)) plt.title("Training and Validation Loss") plt.plot(epoch_list, train_loss) plt.plot(epoch_list,test_loss) plt.xlabel("Epochs") plt.ylabel("Loss") plt.legend(['Train Loss','Test Loss']) plt.savefig('../data/results/NN_train_test_loss_bilstm_50e.png') class_report.to_csv("../data/results/classification_report_bilstm_50e.csv") sns_plot.savefig("../data/results/confusion_matrix_bilstm_50e.png") df_results.to_csv("../data/results/classifier_report_bilstm_50e.csv",index=None,sep=',') PATH = '../models/srl_hindi_bilstm_50e.pth' torch.save(model.state_dict(), PATH)
from django.shortcuts import render from .models import Article, ArticleClassification, Tag, Mood from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger from django.db.models import Q from django.views.generic.base import View # from django.views.decorators.csrf import csrf_exempt # Create your views here. def int_page(total, per): if total / per > total // per: return total // per + 1 else: return total // per def index(request): articles_object = Article.objects.filter(is_object=True).all() # 筛选出是项目的文章 if len(articles_object) > 6: articles_object = articles_object[:6] article_news = Article.objects.order_by('-public_time') # 将所有项目按时间排序查找,最新发布栏 article_top = Article.objects.filter(is_recommend=True).all()[0] # 置顶 classes = ArticleClassification.objects.all() # 所有分类 tags = Tag.objects.all() # 所有文章标签 kwgs = { "article_top": article_top, "articles": articles_object, "article_news": article_news, "classes": classes, "tags": tags } return render(request, "index.html", kwgs) def about(request): return render(request, "about.html") class _Article(View): per_page = 4 articles = Article.objects.order_by('-public_time') classes = ArticleClassification.objects.all() # 所有分类 tags = Tag.objects.all() # 所有文章标签 paginator = Paginator(articles, per_page) article_news = Article.objects.order_by('-public_time') page_num = int_page(len(articles),per_page) def get(self, request): search_articles = self.paginator.page(1) kwgs = { "page_num": self.page_num, "articles": search_articles, "article_news": self.articles, "classes": self.classes, "tags": self.tags } return render(request, "article.html", kwgs) def post(self, request): page = int(request.POST.get("page", 1)) try: search_articles = self.paginator.page(page) except(EmptyPage, InvalidPage, PageNotAnInteger) as ex: search_articles = self.paginator.page(1) kwgs = { "articles": search_articles, "article_news": search_articles, } return render(request, "request_result.html", kwgs) def article_detail(request, id): id = int(id) article = Article.objects.get(id=id) ip = request.META["REMOTE_ADDR"] sessionid = ip + str(id) if not request.session.get(sessionid): article.view_times += 1 article.save() request.session[sessionid] = True try: article_pre = Article.objects.get(id=id-1) except: article_pre = None try: article_next = Article.objects.get(id=id+1) except: article_next = None article_news = Article.objects.order_by('-public_time') kwgs = { "article": article, "article_news": article_news, "article_pre": article_pre, "article_next": article_next } return render(request, "article_detail.html", kwgs) class _Mood(View): moods = Mood.objects.order_by('-date') per_page = 10 paginator = Paginator(moods, per_page) def get(self, request): page_num = int_page(len(self.moods), self.per_page) moods = self.paginator.page(1) kwgs = { "moods": moods, "page_num": page_num } return render(request, "moodList.html", kwgs) def post(self, request): page = int(request.POST.get("page", 1)) moods = self.paginator.page(page) kwgs = { "moods": moods } return render(request, "Request_mood_list.html", kwgs) class Search(View): per_page = 3 def get(self, request): keywords = request.GET.get("keywords", "") # 取得关键字 articles = Article.objects.filter(Q(abstract__icontains=keywords) # 数据库查询 | Q(title__icontains=keywords) | Q(content__icontains=keywords)) paginator = Paginator(articles, self.per_page) # 分页 search_articles = paginator.page(1) search_article_num = len(articles) # 总文章数 page_num = int_page(search_article_num, self.per_page) article_news = Article.objects.order_by('-public_time') # 将所有项目按时间排序查找,最新发布栏 classes = ArticleClassification.objects.all() # 所有分类 tags = Tag.objects.all() # 所有文章标签 if len(articles) == 0: kwgs = { "article_news": article_news, "classes": classes, "tags": tags, "keywords": keywords } return render(request, "Noresult.html", kwgs) kwgs = { "search_article_num": search_article_num, "page_num": page_num, "articles": search_articles, "article_news": article_news, "classes": classes, "tags": tags, "keywords": keywords } return render(request, "search.html", kwgs) def post(self, request): keywords = request.GET.get("keywords", "") # 取得关键字 page = int(request.POST.get("page", 1)) # 获取分页页数 articles = Article.objects.filter(Q(abstract__icontains=keywords) # 数据库查询 | Q(title__icontains=keywords) | Q(content__icontains=keywords)) paginator = Paginator(articles, self.per_page) # 分页 search_articles = paginator.page(page) kwgs = { "articles": search_articles, "keywords": keywords } return render(request, "request_result.html", kwgs) def Class(request, id): articles = Article.objects.filter(article_class=id).all() article_news = Article.objects.order_by('-public_time') classes = ArticleClassification.objects.all() # 所有分类 tags = Tag.objects.all() # 所有文章标签 kwgs = { "articles": articles, "article_news": article_news, "classes": classes, "tags": tags } return render(request, "article.html", kwgs) def tag(request, id): articles = Article.objects.filter(article_tag=id).all() article_news = Article.objects.order_by('-public_time') classes = ArticleClassification.objects.all() # 所有分类 tags = Tag.objects.all() # 所有文章标签 kwgs = { "articles": articles, "article_news": article_news, "classes": classes, "tags": tags } return render(request, "article.html", kwgs)
from direct.showbase import GarbageReport from otp.ai.AIBaseGlobal import * from otp.ai.MagicWordGlobal import * from otp.avatar import DistributedAvatarAI from otp.avatar import PlayerBase from otp.distributed.ClsendTracker import ClsendTracker from otp.otpbase import OTPGlobals class DistributedPlayerAI(DistributedAvatarAI.DistributedAvatarAI, PlayerBase.PlayerBase, ClsendTracker): def __init__(self, air): DistributedAvatarAI.DistributedAvatarAI.__init__(self, air) PlayerBase.PlayerBase.__init__(self) ClsendTracker.__init__(self) self.friendsList = [] self.DISLname = '' self.DISLid = 0 self.adminAccess = 0 if __dev__: def generate(self): self._sentExitServerEvent = False DistributedAvatarAI.DistributedAvatarAI.generate(self) def announceGenerate(self): DistributedAvatarAI.DistributedAvatarAI.announceGenerate(self) ClsendTracker.announceGenerate(self) self._doPlayerEnter() def _announceArrival(self): self.sendUpdate('arrivedOnDistrict', [self.air.districtId]) def _announceExit(self): self.sendUpdate('arrivedOnDistrict', [0]) def _sendExitServerEvent(self): self.air.writeServerEvent('avatarExit', self.doId, '') if __dev__: self._sentExitServerEvent = True def delete(self): if __dev__: del self._sentExitServerEvent self._doPlayerExit() ClsendTracker.destroy(self) if __dev__: GarbageReport.checkForGarbageLeaks() DistributedAvatarAI.DistributedAvatarAI.delete(self) def isPlayerControlled(self): return True def setLocation(self, parentId, zoneId): DistributedAvatarAI.DistributedAvatarAI.setLocation(self, parentId, zoneId) if self.isPlayerControlled(): if not self.air._isValidPlayerLocation(parentId, zoneId): self.notify.info('booting player %s for doing setLocation to (%s, %s)' % (self.doId, parentId, zoneId)) self.air.writeServerEvent('suspicious', self.doId, 'invalid setLocation: (%s, %s)' % (parentId, zoneId)) self.requestDelete() def _doPlayerEnter(self): self.incrementPopulation() self._announceArrival() def _doPlayerExit(self): self._announceExit() self.decrementPopulation() def incrementPopulation(self): self.air.incrementPopulation() def decrementPopulation(self): simbase.air.decrementPopulation() def b_setChat(self, chatString, chatFlags): self.setChat(chatString, chatFlags) self.d_setChat(chatString, chatFlags) def d_setChat(self, chatString, chatFlags): self.sendUpdate('setChat', [chatString, chatFlags]) def setChat(self, chatString, chatFlags): pass def d_setMaxHp(self, maxHp): DistributedAvatarAI.DistributedAvatarAI.d_setMaxHp(self, maxHp) self.air.writeServerEvent('setMaxHp', self.doId, '%s' % maxHp) def d_setSystemMessage(self, aboutId, chatString): self.sendUpdate('setSystemMessage', [aboutId, chatString]) def d_setCommonChatFlags(self, flags): self.sendUpdate('setCommonChatFlags', [flags]) def setCommonChatFlags(self, flags): pass def d_friendsNotify(self, avId, status): self.sendUpdate('friendsNotify', [avId, status]) def friendsNotify(self, avId, status): pass def setAccountName(self, accountName): self.accountName = accountName def getAccountName(self): return self.accountName def setDISLid(self, id): self.DISLid = id def getDISLid(self): return self.DISLid def d_setFriendsList(self, friendsList): self.sendUpdate('setFriendsList', [friendsList]) def setFriendsList(self, friendsList): self.friendsList = friendsList self.notify.debug('setting friends list to %s' % self.friendsList) def getFriendsList(self): return self.friendsList def setAdminAccess(self, access): self.adminAccess = access def d_setAdminAccess(self, access): self.sendUpdate('setAdminAccess', [access]) def b_setAdminAccess(self, access): self.setAdminAccess(access) self.d_setAdminAccess(access) def getAdminAccess(self): return self.adminAccess def extendFriendsList(self, friendId, friendCode): for i in xrange(len(self.friendsList)): friendPair = self.friendsList[i] if friendPair[0] == friendId: self.friendsList[i] = (friendId, friendCode) return self.friendsList.append((friendId, friendCode)) @magicWord(category=CATEGORY_ADMINISTRATOR, types=[str]) def system(message): """ Broadcasts a message to the server. """ # TODO: Make this go through the UberDOG, rather than the AI server. for doId, do in simbase.air.doId2do.items(): if isinstance(do, DistributedPlayerAI): if str(doId)[0] != str(simbase.air.districtId)[0]: do.d_setSystemMessage(0, message) @magicWord(category=CATEGORY_ADMINISTRATOR, types=[str, str, int]) def accessLevel(accessLevel, storage='PERSISTENT', showGM=1): """ Modify the target's access level. """ accessName2Id = { 'user': CATEGORY_USER.defaultAccess, 'u': CATEGORY_USER.defaultAccess, 'communitymanager': CATEGORY_COMMUNITY_MANAGER.defaultAccess, 'community': CATEGORY_COMMUNITY_MANAGER.defaultAccess, 'c': CATEGORY_COMMUNITY_MANAGER.defaultAccess, 'moderator': CATEGORY_MODERATOR.defaultAccess, 'mod': CATEGORY_MODERATOR.defaultAccess, 'm': CATEGORY_MODERATOR.defaultAccess, 'creative': CATEGORY_CREATIVE.defaultAccess, 'creativity': CATEGORY_CREATIVE.defaultAccess, 'c': CATEGORY_CREATIVE.defaultAccess, 'programmer': CATEGORY_PROGRAMMER.defaultAccess, 'coder': CATEGORY_PROGRAMMER.defaultAccess, 'p': CATEGORY_PROGRAMMER.defaultAccess, 'administrator': CATEGORY_ADMINISTRATOR.defaultAccess, 'admin': CATEGORY_ADMINISTRATOR.defaultAccess, 'a': CATEGORY_ADMINISTRATOR.defaultAccess, 'systemadministrator': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess, 'systemadmin': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess, 'sysadministrator': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess, 'sysadmin': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess, 'system': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess, 'sys': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess, 's': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess } try: accessLevel = int(accessLevel) except: if accessLevel not in accessName2Id: return 'Invalid access level!' accessLevel = accessName2Id[accessLevel] if accessLevel not in accessName2Id.values(): return 'Invalid access level!' target = spellbook.getTarget() invoker = spellbook.getInvoker() target.b_setAdminAccess(accessLevel) if showGM: target.b_setGM(accessLevel) temporary = storage.upper() in ('SESSION', 'TEMP', 'TEMPORARY') if not temporary: target.air.dbInterface.updateObject( target.air.dbId, target.getDISLid(), target.air.dclassesByName['AccountAI'], {'ACCESS_LEVEL': accessLevel}) if not temporary: target.d_setSystemMessage(0, '{0} set your access level to {1}!'.format(invoker.getName(), accessLevel)) return "{0}'s access level has been set to {1}.".format(target.getName(), accessLevel) else: target.d_setSystemMessage(0, '{0} set your access level to {1} temporarily!'.format(invoker.getName(), accessLevel)) return "{0}'s access level has been set to {1} temporarily.".format(target.getName(), accessLevel) @magicWord(category=CATEGORY_ADMINISTRATOR, types=[str, str, int]) def ownLevel(accessLevel, storage='PERSISTENT', showGM=1): """ Modify the target's access level. """ accessName2Id = { 'user': CATEGORY_USER.defaultAccess, 'u': CATEGORY_USER.defaultAccess, 'communitymanager': CATEGORY_COMMUNITY_MANAGER.defaultAccess, 'community': CATEGORY_COMMUNITY_MANAGER.defaultAccess, 'c': CATEGORY_COMMUNITY_MANAGER.defaultAccess, 'moderator': CATEGORY_MODERATOR.defaultAccess, 'mod': CATEGORY_MODERATOR.defaultAccess, 'm': CATEGORY_MODERATOR.defaultAccess, 'creative': CATEGORY_CREATIVE.defaultAccess, 'creativity': CATEGORY_CREATIVE.defaultAccess, 'c': CATEGORY_CREATIVE.defaultAccess, 'programmer': CATEGORY_PROGRAMMER.defaultAccess, 'coder': CATEGORY_PROGRAMMER.defaultAccess, 'p': CATEGORY_PROGRAMMER.defaultAccess, 'administrator': CATEGORY_ADMINISTRATOR.defaultAccess, 'admin': CATEGORY_ADMINISTRATOR.defaultAccess, 'a': CATEGORY_ADMINISTRATOR.defaultAccess, 'systemadministrator': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess, 'systemadmin': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess, 'sysadministrator': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess, 'sysadmin': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess, 'system': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess, 'sys': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess, 's': CATEGORY_SYSTEM_ADMINISTRATOR.defaultAccess } try: accessLevel = int(accessLevel) except: if accessLevel not in accessName2Id: return 'Invalid access level!' accessLevel = accessName2Id[accessLevel] if accessLevel not in accessName2Id.values(): return 'Invalid access level!' target = spellbook.getTarget() invoker = spellbook.getInvoker() invoker.b_setAdminAccess(accessLevel) if showGM: invoker.b_setGM(accessLevel) temporary = storage.upper() in ('SESSION', 'TEMP', 'TEMPORARY') if not temporary: invoker.air.dbInterface.updateObject( invoker.air.dbId, invoker.getDISLid(), invoker.air.dclassesByName['AccountAI'], {'ACCESS_LEVEL': accessLevel}) if not temporary: invoker.d_setSystemMessage(0, '{0} set your access level to {1}!'.format(invoker.getName(), accessLevel)) return "{0}'s access level has been set to {1}.".format(target.getName(), accessLevel) else: invoker.d_setSystemMessage(0, '{0} set your access level to {1} temporarily!'.format(invoker.getName(), accessLevel)) return "{0}'s access level has been set to {1} temporarily.".format(target.getName(), accessLevel) @magicWord(category=CATEGORY_ADMINISTRATOR, types=[int, str, int]) def setGuild(guildNum, storage='PERSISTENT', showGuildBadge=1): #Admin command to set a user's guild, particularly a clan leader to their new guild, leaders can later invite their own members and other leaders by themselves target = spellbook.getTarget() invoker = spellbook.getInvoker() if guildNum <= 99 and guildNum >= 1: target.b_setGM(guildNum) target.d_setSystemMessage(0, 'Guild ID successfully set to {0} by STAFF MEMBER {1}.'.format(guildNum, invoker.getName())) invoker.d_setSystemMessage(0, 'Guild ID successfully set as {0} to user {1}.'.format(guildNum, target.getName())) else: return "Invalid guild number! 1-99, numeric characters." @magicWord(category=CATEGORY_ADMINISTRATOR, types=[int, str, int]) def myGuild(guildNum, storage='PERSISTENT', showGuildBadge=1): #Allow an admin to set their own guild, this is only used on the person who runs the actual command in the chat target = spellbook.getTarget() invoker = spellbook.getInvoker() if guildNum <= 99999 and guildNum >= 1: invoker.b_setGM(guildNum) invoker.d_setSystemMessage(0, 'Guild ID successfully set to {0} by yourself, STAFF MEMBER {1}.'.format(guildNum, invoker.getName())) else: return "Invalid guild number! 1-99, numeric characters." @magicWord(category=CATEGORY_ADMINISTRATOR) def invite(): target = spellbook.getTarget() #New Person Being Invited invoker = spellbook.getInvoker() #Clan Leader Using Command guildId = invoker.getAdminAccess() #Get the guild of the person who RUNS / SENDS the command, obviously target.b_setGM(guildId) #Add target to guild, this basically is for badge purposes, staff permissions don't start until 100 (Guilds are 1-99 only, which also obviously avoids all "greater than" permissions for staff ranges) invoker.d_setSystemMessage(0, 'You have added user {0} to your guild. They have been notified.'.format(target.getName())) #Let the clan leader know it worked target.d_setSystemMessage(0, 'ATTENTION, {0}! Guild Leader {1} has added you to their guild! (Guild ID: {2}) Congratulations!'.format(target.getName(), invoker.getName(), guildId)) #Notify the new member target.d_setSystemMessage(0, 'You may leave this guild at any time using ~leaveguild. Have fun!') #New member send message 2
from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import get_object_or_404, render from django.urls import reverse from django.views import generic from django.utils import timezone from .models import CandidateRegistrationModel from .forms import CandidateRegistrationForm class IndexView(generic.ListView): template_name = 'uplyft/index.html' def register(request): if request.method == 'POST': form = CandidateRegistrationForm(request.POST) if form.is_valid(): # process the data in form.cleaned_data as required return HttpResponseRedirect('/uplyft/') else: form = CandidateRegistrationForm() return render(request, 'uplyft/register.html', {'form': form})
from django.db import models from django.contrib.auth.models import User from tours.models import Tour # Create your models here. class Review(models.Model): tour = models.ForeignKey(Tour, on_delete=models.CASCADE, related_name="reviews") user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="reviews") review = models.CharField(max_length=200) rating = models.IntegerField() def __str__(self): return f"{self.id} {self.tour} {self.rating}"
# coding: utf-8 """ CMS Performance API Use these endpoints to get a time series view of your website's performance. # noqa: E501 The version of the OpenAPI document: v3 Generated by: https://openapi-generator.tech """ try: from inspect import getfullargspec except ImportError: from inspect import getargspec as getfullargspec import pprint import re # noqa: F401 import six from hubspot.cms.performance.configuration import Configuration class PublicPerformanceResponse(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = {"data": "list[PerformanceView]", "domain": "str", "path": "str", "start_interval": "int", "end_interval": "int", "interval": "str", "period": "str"} attribute_map = {"data": "data", "domain": "domain", "path": "path", "start_interval": "startInterval", "end_interval": "endInterval", "interval": "interval", "period": "period"} def __init__(self, data=None, domain=None, path=None, start_interval=None, end_interval=None, interval=None, period=None, local_vars_configuration=None): # noqa: E501 """PublicPerformanceResponse - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration.get_default_copy() self.local_vars_configuration = local_vars_configuration self._data = None self._domain = None self._path = None self._start_interval = None self._end_interval = None self._interval = None self._period = None self.discriminator = None self.data = data if domain is not None: self.domain = domain if path is not None: self.path = path self.start_interval = start_interval self.end_interval = end_interval self.interval = interval if period is not None: self.period = period @property def data(self): """Gets the data of this PublicPerformanceResponse. # noqa: E501 :return: The data of this PublicPerformanceResponse. # noqa: E501 :rtype: list[PerformanceView] """ return self._data @data.setter def data(self, data): """Sets the data of this PublicPerformanceResponse. :param data: The data of this PublicPerformanceResponse. # noqa: E501 :type data: list[PerformanceView] """ if self.local_vars_configuration.client_side_validation and data is None: # noqa: E501 raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data @property def domain(self): """Gets the domain of this PublicPerformanceResponse. # noqa: E501 :return: The domain of this PublicPerformanceResponse. # noqa: E501 :rtype: str """ return self._domain @domain.setter def domain(self, domain): """Sets the domain of this PublicPerformanceResponse. :param domain: The domain of this PublicPerformanceResponse. # noqa: E501 :type domain: str """ self._domain = domain @property def path(self): """Gets the path of this PublicPerformanceResponse. # noqa: E501 :return: The path of this PublicPerformanceResponse. # noqa: E501 :rtype: str """ return self._path @path.setter def path(self, path): """Sets the path of this PublicPerformanceResponse. :param path: The path of this PublicPerformanceResponse. # noqa: E501 :type path: str """ self._path = path @property def start_interval(self): """Gets the start_interval of this PublicPerformanceResponse. # noqa: E501 :return: The start_interval of this PublicPerformanceResponse. # noqa: E501 :rtype: int """ return self._start_interval @start_interval.setter def start_interval(self, start_interval): """Sets the start_interval of this PublicPerformanceResponse. :param start_interval: The start_interval of this PublicPerformanceResponse. # noqa: E501 :type start_interval: int """ if self.local_vars_configuration.client_side_validation and start_interval is None: # noqa: E501 raise ValueError("Invalid value for `start_interval`, must not be `None`") # noqa: E501 self._start_interval = start_interval @property def end_interval(self): """Gets the end_interval of this PublicPerformanceResponse. # noqa: E501 :return: The end_interval of this PublicPerformanceResponse. # noqa: E501 :rtype: int """ return self._end_interval @end_interval.setter def end_interval(self, end_interval): """Sets the end_interval of this PublicPerformanceResponse. :param end_interval: The end_interval of this PublicPerformanceResponse. # noqa: E501 :type end_interval: int """ if self.local_vars_configuration.client_side_validation and end_interval is None: # noqa: E501 raise ValueError("Invalid value for `end_interval`, must not be `None`") # noqa: E501 self._end_interval = end_interval @property def interval(self): """Gets the interval of this PublicPerformanceResponse. # noqa: E501 :return: The interval of this PublicPerformanceResponse. # noqa: E501 :rtype: str """ return self._interval @interval.setter def interval(self, interval): """Sets the interval of this PublicPerformanceResponse. :param interval: The interval of this PublicPerformanceResponse. # noqa: E501 :type interval: str """ if self.local_vars_configuration.client_side_validation and interval is None: # noqa: E501 raise ValueError("Invalid value for `interval`, must not be `None`") # noqa: E501 allowed_values = ["ONE_MINUTE", "FIVE_MINUTES", "TEN_MINUTES", "FIFTEEN_MINUTES", "THIRTY_MINUTES", "ONE_HOUR", "FOUR_HOURS", "TWELVE_HOURS", "ONE_DAY", "ONE_WEEK"] # noqa: E501 if self.local_vars_configuration.client_side_validation and interval not in allowed_values: # noqa: E501 raise ValueError("Invalid value for `interval` ({0}), must be one of {1}".format(interval, allowed_values)) # noqa: E501 self._interval = interval @property def period(self): """Gets the period of this PublicPerformanceResponse. # noqa: E501 :return: The period of this PublicPerformanceResponse. # noqa: E501 :rtype: str """ return self._period @period.setter def period(self, period): """Sets the period of this PublicPerformanceResponse. :param period: The period of this PublicPerformanceResponse. # noqa: E501 :type period: str """ allowed_values = ["ONE_MINUTE", "FIVE_MINUTES", "TEN_MINUTES", "FIFTEEN_MINUTES", "THIRTY_MINUTES", "ONE_HOUR", "FOUR_HOURS", "TWELVE_HOURS", "ONE_DAY", "ONE_WEEK"] # noqa: E501 if self.local_vars_configuration.client_side_validation and period not in allowed_values: # noqa: E501 raise ValueError("Invalid value for `period` ({0}), must be one of {1}".format(period, allowed_values)) # noqa: E501 self._period = period def to_dict(self, serialize=False): """Returns the model properties as a dict""" result = {} def convert(x): if hasattr(x, "to_dict"): args = getfullargspec(x.to_dict).args if len(args) == 1: return x.to_dict() else: return x.to_dict(serialize) else: return x for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) attr = self.attribute_map.get(attr, attr) if serialize else attr if isinstance(value, list): result[attr] = list(map(lambda x: convert(x), value)) elif isinstance(value, dict): result[attr] = dict(map(lambda item: (item[0], convert(item[1])), value.items())) else: result[attr] = convert(value) return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PublicPerformanceResponse): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, PublicPerformanceResponse): return True return self.to_dict() != other.to_dict()
import psycopg2 import re from backend.pg import PGBackend ''' @author: anant bhardwaj @date: Oct 3, 2013 DataHub DB wrapper for backends (only postgres implemented) ''' class Connection: def __init__(self, user, password, db_name=None): self.backend = PGBackend(user, password, db_name=db_name) def execute_sql(self, query, params=None): return self.backend.execute_sql(query, params) def list_databases(self): return self.backend.list_databases() def list_tables(self): return self.backend.list_tables() def close(self): self.backend.clos() def test(): con = Connection(user='postgres', password='postgres') print con.list_databases() try: print con.execute_sql(''' drop database test ''') print con.list_databases() except: pass print con.execute_sql(''' create database test ''') print con.list_databases() con = Connection(user='postgres', password='postgres', db_name='test') print con.list_tables() print con.execute_sql( ''' create table person (id integer, name varchar(20)) ''') con = Connection(user='postgres', password='postgres', db_name='test') print con.list_tables() print con.execute_sql(''' select * from person ''') print con.execute_sql(''' insert into person values (1, 'anant') ''') con = Connection(user='postgres', password='postgres', db_name='test') print con.execute_sql(''' select * from person ''') if __name__ == '__main__': test()
from django.db import models from django.shortcuts import render from random import randrange import math # Create your models here. class Ingatlan(models.Model): tipus = models.CharField(max_length=30) méret = models.CharField(max_length=30) cím = models.CharField(max_length=50) ár = models.CharField(max_length=30) árm2 = models.CharField(max_length=30) szoba = models.CharField(max_length=30) erkély = models.CharField(max_length=30) elértípus = models.CharField(max_length=30) elérhetőség = models.CharField(max_length=50) kép = models.ImageField(upload_to='pics') class Meta: verbose_name = ("ingatlan") verbose_name_plural = ("ingatlanok") def __str__(self): return self.cím def feladas(post): print("POST request érkezet!!! :)") teljesár = 0 if (post['ar1'] == "0" or post['ar1'] == ""): teljesár = f"{post['ar2']} Ezer" if (post['ar2'] == "0" or post['ar2'] == ""): teljesár = f"{post['ar1']} Millió " if (post['ar1'] != "0" and post['ar2'] != "0" and post['ar1'] != "" and post['ar2'] != ""): teljesár = f"{post['ar1']} Millió {post['ar2']} Ezer" if (post['tipus'] == "" or post['meret'] == "" or post['iszam'] =="" or post['telep']=="" or post['hazszam'] =="" or post['szoba'] == "" or post['erkely'] =="" or post['eleradat']=="" or post['kep']=="" or (post['ar1']=="" and post['ar2']=="") or (post['ar1']=="0" and post['ar2']=="0")): print("Sikertelen kísérlet") return False Ingatlan.objects.create(tipus = post['tipus'], méret =post['meret'], cím = f"{post['iszam']} {post['telep']}, {post['utca']} {post['hazszam']}", ár = teljesár, árm2 = 0, szoba=post['szoba'],erkély=post['erkely'], elértípus = post['elerhetoseg'], elérhetőség=post['eleradat'], kép =f"pics/{post['kep']}") return True
# # 따라하며 배우는 파이썬과 데이터과학(생능출판사 2020) # 3.5 거듭제곱 연산자 : **, 74쪽 # a = 1000 r = 0.05 n = 10 print(a * (1 + r) ** n) bottom = float(input('직각삼각형의 밑변의 길이를 입력하시오: ')) height = float(input('직각삼각형의 높이를 입력하시오: ')) hypotenuse = (bottom ** 2 + height ** 2) ** 0.5 print('빗변은', hypotenuse, '입니다')
import os alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ. ' def func1(): number = 0 arr1 = [] for i in os.listdir('.'): if os.path.isfile(i): j = 0 check1 = True check2 = 0 for j in range(len(i)): if i[j] not in alphabet: check1 = False if i[j] == '.': check2 += 1 if check1 == True and check2 <= 1: number += 1 arr1.append(i) print('Найдено файлов, название которых состоит \ только из латинских символов: ' + str(number)) return arr1 def func2(arr): arr2 = [] for i in arr: if i[0:i.find('.')] not in arr2: arr2.append(i[0:i.find('.')]) for k in arr2: print (k) return func2(func1())
import time import serial import struct COM = "COM10" print "GBA Dumper + Arduino DUE" st = time.clock() # buffer clear ser = serial.Serial(port=COM, baudrate=115200) time.sleep(0.1) ser.close() # restart ser = serial.Serial(port=COM, baudrate=115200) s = 0 for i in xrange(4): s <<= 8 s = s + ord(ser.read(1)) print "ROM SIZE: " + hex(s) d = [] for i in xrange(s/2): d.append(ord(ser.read(1))) d.append(ord(ser.read(1))) if(i % 0x1000 == 0 and i != 0): print "received " + hex(i*2) ser.close() f = open("test.gba", "wb+") for x in d: f.write(struct.pack("B", x)) f.close() print "done" ed = time.clock() print "total " + str(int(ed - st) / 60) + " min"
import pandas as pd from sqlalchemy import create_engine strConnection = 'postgresql://weather_user:159753@159.65.233.116:5432/weather_tool' engine = create_engine(strConnection, pool_pre_ping=True) query_CitiesFind = 'SELECT iata_code FROM cities c WHERE NOT EXISTS (SELECT * FROM airports a2 WHERE a2.iata_code = c.iata_code)' dfCities = pd.read_sql(query_CitiesFind, engine) lstCities = dfCities['iata_code'].to_list() if len(lstCities) > 0 : airports=pd.read_csv('/home/gleo/weather_tool/data') airports=airports[(airports["scheduled_service"]=='yes') & (airports["type"].str.contains("medium") | airports["type"].str.contains("large"))] airports.drop(["id", "ident", "iso_region","continent","type","scheduled_service","local_code","home_link","wikipedia_link","keywords"],axis=1,inplace=True) airports=airports[airports['iata_code'].isin(lstCities)].reset_index().drop(labels='index', axis=1) airports.to_sql('airports', engine, if_exists='append')
from typing import List, Dict, Set, Tuple """ Beats 50% in terms of runtime Beats 30% in terms of memory usage It probably took me around 2 hours to code this, but debugging it was quite easy ( as opposed to some other problems I worked on). First submission was succesful This was labelled as hard. """ """ For the moment, I just added methods for creating all the sets i'll need. I also added an occupied dict. The idea is to write a backtracking algorithm that traverses the matrix left to right, top to bottom. The indices are passed as function parameters. """ NUMBERS = {1, 2, 3, 4, 5, 6, 7, 8, 9} def get_region(roc: int): """ roc - row or column """ region = ( (0 <= roc and roc < 3)*0 + (3 <= roc and roc < 6)*1 + (6 <= roc and roc < 9)*2 ) return region def get_square(row: int, col: int): sqr_row = get_region(row) sqr_col = get_region(col) return 3*sqr_row + sqr_col def get_row_square_and_column(row: int, col: int, sets: List[List[Set[int]]]) -> Tuple[Set[int], Set[int], Set[int]]: """ Gets the column, row and square sets corresponding to the given row and column """ try: row_set = sets[0][row] except IndexError as e: print(row) print(e) raise col_set = sets[1][col] sqr_set = sets[2][get_square(row, col)] return (row_set, col_set, sqr_set) def build_seen_sets( sets: List[List[Set[int]]], matrix: List[List[str]], occupied_dict: Dict[Tuple[int, int], bool] ): """ Build a set of all seen numbers for each row, column and square """ for row in range(9): for col in range(9): number = matrix[row][col] if number != ".": (row_set, col_set, sqr_set) = get_row_square_and_column( row, col, sets ) row_set.add(int(number)) col_set.add(int(number)) sqr_set.add(int(number)) occupied_dict[(row, col)] = True def get_available_numbers(row: int, col: int, sets: List[List[Set[int]]]) -> Set[int]: ( row_set, col_set, sqr_set ) = get_row_square_and_column( row, col, sets ) not_available = row_set.union(col_set) not_available = sqr_set.union(not_available) return NUMBERS - not_available def add_num_to_sets( num: str, row: int, col: int, sets: List[List[Set[int]]] ) -> None: ( row_set, col_set, sqr_set ) = get_row_square_and_column( row, col, sets ) row_set.add(int(num)) col_set.add(int(num)) sqr_set.add(int(num)) def remove_num_from_sets(num: str, row: int, col: int, sets: List[List[Set[int]]]) -> None: ( row_set, col_set, sqr_set ) = get_row_square_and_column( row, col, sets ) row_set.remove(int(num)) col_set.remove(int(num)) sqr_set.remove(int(num)) def traverse( row: int, column: int, sets: List[List[Set[int]]], matrix: List[List[str]], occupied_dict: Dict[Tuple[int, int], bool] ): next_row = row next_col = column + 1 if column == 8: next_row = row + 1 next_col = 0 if occupied_dict.get((row, column)) is not None: if row == 8 and column == 8: return True return traverse(next_row, next_col, sets, matrix, occupied_dict) numbers = get_available_numbers(row, column, sets) for number in numbers: add_num_to_sets(str(number), row, column, sets) if row == 8 and column == 8: matrix[row][column] = str(number) return True if traverse(next_row, next_col, sets, matrix, occupied_dict): matrix[row][column] = str(number) return True remove_num_from_sets(str(number), row, column, sets) return False class Solution: def solveSudoku(self, board: List[List[str]]) -> None: """ Do not return anything, modify board in-place instead. """ columns_sets_list: List[Set[int]] = [set() for _ in range(9)] rows_sets_list: List[Set[int]] = [set() for _ in range(9)] squares_sets_list: List[Set[int]] = [set() for _ in range(9)] sets = [rows_sets_list, columns_sets_list, squares_sets_list] occupied_spaces: Dict[Tuple[int, int], bool] = dict() build_seen_sets(sets, board, occupied_spaces) traverse(0, 0, sets, board, occupied_spaces) solution = Solution() board = [ [".", ".", ".", ".", ".", ".", ".", ".", "."], [".", ".", ".", ".", ".", ".", ".", ".", "."], [".", ".", ".", ".", ".", ".", ".", ".", "."], [".", ".", ".", ".", ".", ".", ".", ".", "."], [".", ".", ".", ".", ".", ".", ".", ".", "."], [".", ".", ".", ".", ".", ".", ".", ".", "."], [".", ".", ".", ".", ".", ".", ".", ".", "."], [".", ".", ".", ".", ".", ".", ".", ".", "."], [".", ".", ".", ".", ".", ".", "8", ".", "6"] ] solution.solveSudoku(board) for l in board: print(l)
#!/usr/bin/python # vim: set fileencoding=\xe2 import re from urlparse import urlparse from pprint import pprint print ''' _____ _ _____ _ / ____| | |/ ____| | | | (___ __ _ __| | | ___ __| | ___ \___ \ / _` |/ _` | | / _ \ / _` |/ _ \ ____) | (_| | (_| | |___| (_) | (_| | __/ |_____/ \__,_|\__,_|\_____\___/ \__,_|\___| ------------- Zone-h Cleaner V.1 ------------ --------- Powered By SadCode Official ------- ''' urls = raw_input('Text File To Clean ? : ') with open(urls, 'r') as urls: for line in urls: url = line.rstrip() and line.split('\t') bond = url[7].replace('...', '') benz = url[7].split('/') clean = benz[0] if 'http://' not in clean: url = 'http://'+clean with open('hd.txt', 'a') as myfile: myfile.write('http://'+clean) myfile.write('\n')
from relogic.components.component import Component from relogic.structures.structure import Structure from typing import List class SRLComponent(Component): """ """ def execute(self, inputs: List[Structure]): counter = 0 expanded_inputs = [] mapping = [] for idx, structure in enumerate(inputs): for predicate_index, predicate_text in structure.predicates: expanded_inputs.append( structure.__class__( tokens=structure.tokens, predicate_text=predicate_text, predicate_index=predicate_index)) mapping.append(idx) for results in self._predictor.predict(expanded_inputs): _, batch_labels, _ = results for labels in batch_labels: inputs[mapping[counter]].srl_labels.append(list(labels)) counter += 1
# Assembler to machine code for Amoeba import sys commands = { 'LDA':'0000', 'STA':'0001', 'LDAN':'0010', 'ADD':'0011', 'SUB':'0100', 'MLT':'0101', 'DIV':'0110', 'JF':'0111', 'JB':'1000', 'JFE':'1001', 'JBE':'1010', 'CLO':'1011', 'PAN':'1100', 'PAC':'1101', 'NOP':'1110', 'END':'1111' } try: fn = sys.argv[1] except IndexError: fn = 'program.txt' with open(fn, 'r') as f: out = [] for line in f.readlines(): split = line.split(' ') cmd = commands[split[0]] out.append(cmd + split[1]) with open(fn, 'w') as f: for line in out: f.write(line) f.write('\n')
# Copyright 2019-2021 VMware, Inc. # SPDX-License-Identifier: Apache-2.0 import json from bson import ObjectId import datetime # to handle the objectID and datetime class JSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, ObjectId): return str(o) if isinstance(o, datetime.datetime): return o.strftime('%Y-%m-%d %H:%M:%S') return json.JSONEncoder.default(self, o)
import paho.mqtt.client as paho broker="broker.hivemq.com" #broker="iot.eclipse.org" #define callback def on_message(client, userdata, message): print("received message =",message.payload) client= paho.Client() ######Bind function to callback client.on_message=on_message ##### print("connecting to broker ",broker) client.connect(broker)#connect print("subscribing ") client.subscribe("house/bulb1")#subscribe print("looping forever ") client.loop_forever() #stop loop
def is_armstrong_number(number): """ Check to see if the given number is an Armstrong Number. An armstrong number is a number that the sum of its digits each raised to the power of the number of digits in the number is equal to the number. For example * 9 is an Armstrong number, because 9 = 9^1 = 9 * 10 is *not* an Armstrong number, because 10 != 1^2 + 0^2 = 1 * 153 is an Armstrong number, because: 153 = 1^3 + 5^3 + 3^3 = 1 + 125 + 27 = 153 Parameters; number: Should be a postive integer. """ if int(number) != number: # I don't think a floating point number can be an armstrong number # so I am going to return false return False if number < 0: # I don't know what to do with the minus sign. So I am # calling it false, couldn't see anything on wikipedia # about negative numbers so I an executive decision and # saying it isn't allowed. return False if number == 0: # I don't think my code handles this case otherwise. # I consider 0^1 = 0 which is Armstrong in my book. return True # used to store the digits, I decided I didn't want to do any # string manipulation on a number. Using modulus math instead. digits = [] # use a different variable so we can compare against the original value num = number while num != 0: digit = num % 10 digits.append(digit) num = (num - digit) // 10 # Looks like // does integer division. num_digits = len(digits) running_total = 0 for digit in digits: running_total += digit ** num_digits return running_total == number
# -*- coding: utf-8 -*- import sys import tweepy import webbrowser import pickle import os from time import time, strftime, localtime CONSUMER_KEY = 'zXhZvsaUSLbnk1E2KjcNw' CONSUMER_SECRET = 'dqinsFtcx7aO7TKJYzTr3s8JpgWjwCMjFMnVwm41plg' ACCESS_TOKEN = '9175042-LgX3UMYB9qSauD7DvEjp0Nsle3vttognI0Fy0KHZ4M' ACCESS_TOKEN_SECRET = 'iAMQCAwdZF2RuTI8jEaV6BxPDcLlQaiY2J7dFYj4wg' auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) api = tweepy.API(auth) def create_directories(directory): print "Creating directories" sub_dirs = ["raw_data", "ctr_data", "feature_data", "feature_files", "support_data"] if not os.path.exists(directory): print "\t" + directory os.makedirs(directory) for d in sub_dirs: temp_dir = directory + "/" + d if not os.path.exists(temp_dir): print "\t" + temp_dir os.makedirs(temp_dir) def get_tweets(user_id): tweets = [] print "Getting tweets for user", user_id i = 0 for status in tweepy.Cursor(api.user_timeline, id=user_id).items(): i += 1 if i % 500 == 0: print i tweets.append(status) fname = user_id + "/raw_data/tweets.pkl" print "Dumping :", fname pickle.dump(tweets, open(fname, "w")) if __name__ == "__main__": if len(sys.argv) != 2: print "Enter a Username" else: user_id = sys.argv[1] print "Starting processing for ", user_id create_directories(user_id) get_tweets(user_id)
import re def count_animals(sentence): return sum([int(n) for n in sentence.split() if n.isdigit()]) def count_animalsB(sentence): return sum(map(int, re.findall(r'\d+',sentence))) #spent time for my solution: 0.03591169500000002 #spent time for the best practice : 0.06533677399999993
from games.cards.Class_card import Card from random import * class DeckOfCards: def __init__(self): self.list1=[] self.deck() self.shuffle() def deck(self): for suit in range(1,5): for value in range(2,15): c1=Card(value,suit) self.list1.append(c1) def shuffle(self): shuffle(self.list1) def deal_one(self): num= choice(self.list1) self.list1.remove(num) return num #self.list2.append(num) #self.list1.pop(num) def show(self): print(self.list1) #d1=DeckOfCards() #print() #print(d1.list1)
#Build List of Cities from Property list #Call scrapy import subprocess import sqlite3 import json import re def gen_city_urls(): state_map = { "OR":"Oregon", "TX":"Texas", "AZ":"Arizona", "CA":"California", "IL":"Illinois", "OK":"Oklahoma", "NJ":"New-Jersey", "AR":"Arkansas", "AL":"Alabama", "WA":"Washington", "CO":"Colorado", "FL":"Florida", "MS":"Mississippi", "NM":"New-Mexico", "ID":"Idaho", "TN":"Tennessee", "PR":"Puerto-Rico", "LA":"Louisiana", "MO":"Missouri", "NV":"Nevada", "SC":"South-Carolina", "MN":"Minnesota", "AS":"American-Samoa", "NC":"North-Carolina", "AK":"Alaska", "WV":"West-Virginia", "GA":"Georgia", } print("Loading database...") #Setup connection to database conn = sqlite3.connect('storage/properties.db') conn.row_factory = sqlite3.Row #establish connection cursor c = conn.cursor() #execute SQL statement via the cursor c.execute('SELECT * FROM HouseItem') #store all properties from database in array rows = c.fetchall() #http://stackoverflow.com/questions/3286525/return-sql-table-as-json-in-python properties = [dict(ix) for ix in rows] print("Analyzing cities...") cities = [] for p in properties: if (p['state'] != None): if p['state'] in state_map.keys(): city = p['city'].replace(" ","-") + "-" + state_map[p['state']] if city in cities: pass else: cities.append(city) else: print ("No State Code Mapping: "+p['state']) urls = [] for c in cities: urls.append("http://www.city-data.com/city/"+c+".html") print("Num Cities Processed: " + str(len(cities))) return urls #subprocess.call('rm process/cities.json', shell=True) #subprocess.call('scrapy crawl city -o process/cities.json', shell=True)
# Generated by Django 3.0 on 2019-12-17 07:24 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('image', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='image', name='pub_date', ), ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.shortcuts import render from django.http import * from django.template import loader, RequestContext from models import * from django.core import exceptions from ttsx_app.forms import add_serverform # Create your views here. def index(request): ServerList1 = ServerList.objects.all()[:3] return render(request, 'index.html', locals()) def base(request): return render(request, 'base.html') # def logins(request): # if request.method == POST: # # return render(request, 'login.html', locals()) def add_server(request): add_servers = add_serverform(request.POST) if add_servers.is_valid(): add_servers.save() # return render(request, 'register.html', locals()) return HttpResponse("添加成功") else: add_servers = add_serverform() return render(request, 'register.html', locals())
import os import numpy as np from skimage.filters import threshold_local from scipy.ndimage.measurements import center_of_mass from lsst.ts.wep.cwfs.Image import Image class AdapThresImage(Image): def generateMultiDonut(self, spaceCoef, magRatio, theta): """ Gemerate multiple donut images. Only one neightboring star will be generated for test, which is the baseline of LSST. Arguments: spaceCoef {[float]} -- Spacing coefficient to decide the distance between donuts. magRatio {[float]} -- Magnitude ratio of new donut compared with the original one. theta {[float]} -- Theta angle of generated neighboring star. Returns: [float] -- Image of donuts. [float] -- Neighboring donut (x, y) position. """ # Check the inputs if (spaceCoef < 0): print("spaceCoef should be greater than zero.") return -1 elif (magRatio < 0 or magRatio > 1): print("magRatio should be postive and less than 1.") return -1 # Get the center and radius of self-donut selfX, selfY, selfR, imgBinary = self.getCenterAndR_ef(checkEntropy = True) # Get the position of new donut based on spaceCoef and theta newX = selfX + spaceCoef*selfR*np.cos(theta) newY = selfY + spaceCoef*selfR*np.sin(theta) # Calculate the frame size and shift the center of donuts lengthX = max(selfX, newX) - min(selfX, newX) + 5*selfR lengthY = max(selfY, newY) - min(selfY, newY) + 5*selfR length = int(max(lengthX, lengthY)) # Enforce the length to be even for the symmetry if (length%2 == 1): length += 1 shiftX = length/2.0 - (selfX + newX)/2 shiftY = length/2.0 - (selfY + newY)/2 # Get the new coordinate selfX += shiftX selfY += shiftY newX += shiftX newY += shiftY # Generate image of multiple donuts imageMain = np.zeros([length, length]) imageNeighbor = np.zeros([length, length]) m, n = self.image.shape # Get the shifted main donut image imageMain[int(selfY-m/2):int(selfY+m/2), int(selfX-n/2):int(selfX+n/2)] += self.image # Get the shifted neighboring donut image imageNeighbor[int(newY-m/2):int(newY+m/2), int(newX-n/2):int(newX+n/2)] += magRatio*self.image # Get the synthesized multi-donut image image = imageMain + imageNeighbor return image, imageMain, imageNeighbor, newX, newY def getCenterAndR_adap(self, blockSize=33): """ Calculate the weighting center and radius of circle based on the adapative threshold. Arguments: blockSize {[int]} -- Block size for adaptive threshold. This value should be odd. Returns: [float] -- Values of weighting center (realcx, realcy) and radius (realR). """ # Adaptive threshold delta = 1 times = 0 while (delta > 1e-2) and (times < 10): img = self.image.copy() imgBinary = (img > threshold_local(img, blockSize)).astype(float) # Calculate the weighting radius realR = np.sqrt(np.sum(imgBinary) / np.pi) # Calculte the nearest odd number of radius for the blockSize if (int(realR)%2 == 0): oddRearR = int(realR+1) else: oddRearR = int(realR) # Critera check of while loop delta = abs(blockSize - oddRearR) times += 1 # New value of blockSize blockSize = oddRearR # Calculate the center of mass realcy, realcx = center_of_mass(imgBinary) # The values of (realcx, realcy, realR) will be (nan, nan, 0.0) for the invalid image. if (not np.isfinite([realcx, realcy]).any()): print("Can not fit donut to circle.") return realcx, realcy, realR, imgBinary if __name__ == "__main__": pass
# Условие # Последовательность Фибоначчи определяется так: # φ0 = 0, φ1 = 1, φn = φn−1 + φn−2. # По данному числу n определите n-е число Фибоначчи φn. # Эту задачу можно решать и циклом for. n = int(input()) a = 0 b = 1 for i in range(n - 1): a, b = b, a + b print(b)
import pandas as pd from ipps import IppsData from DRGtoMDC import DRGtoMDC def main(): """ Entry point for main logic """ # IppsData object know how to read the input data and add the necessary fields we need ipps = IppsData() # load data initializes the object ipps.load() # .data property returns a pandas DataFrame ipps_data = ipps.data drg_mdc = DRGtoMDC() drg_mdc.add_mdc_codes_to_ipps(ipps_data) # this is how you can save the ipps data to a file if want to look at it in Excel #ipps_data.to_csv("IPPS_Provider_Data_Merged.csv") # this is how to select the columns you want from the DataFrame mdc_codes_and_regions = ipps_data[["MDC", "Region", "Average_Total_Payments"]] print(type(mdc_codes_and_regions)) print("Number of MDC Code and Regions:\n{}\n".format(len(mdc_codes_and_regions))) # this is how to group the values and perform an aggregate function mdc_codes_and_regions_summary = mdc_codes_and_regions.groupby(["MDC", "Region"])["Average_Total_Payments"].sum() print("Group by MDC Code and Regions:\n{}\n".format(mdc_codes_and_regions_summary.head(8))) # this is how to perform a search within the data mdc_codes_and_regions_search = mdc_codes_and_regions[(mdc_codes_and_regions["MDC"] == 23) & (mdc_codes_and_regions["Region"] == "WEST") & (mdc_codes_and_regions["Average_Total_Payments"] > 8000)] print("Search Results:\n{}\n".format(mdc_codes_and_regions_search)) values_as_np_ndarray = mdc_codes_and_regions_search.values print(type(values_as_np_ndarray)) print("Search Results:\n{}\n".format(values_as_np_ndarray)) # this is how to get unique values for a specific column print("Unique Regions (inc NaN):\n{}\n".format(mdc_codes_and_regions.Region.unique())) # this is how to get unique not null values for a specific column print("Unique Regions V1:\n{}\n".format(mdc_codes_and_regions.Region[pd.isnull(mdc_codes_and_regions.Region) == False].unique())) # or print("Unique Regions V2:\n{}\n".format(mdc_codes_and_regions.Region[mdc_codes_and_regions.Region.notnull()].unique())) # this is how you can set a value to columns with Null/NaN. It does not modify the source data!!! print("Unique Regions (nan is set to UNK):\n{}\n".format(mdc_codes_and_regions.Region.fillna("UNK").unique())) print("done") if __name__ == "__main__": main()
"""滤波器""" import cv2 import numpy as np from matplotlib import pyplot as plt # from . import utils def strokeEdges(src, dst, blurKsize=7, edgeKsize=5): """ 边缘检测 :param src:源图像 :param dst:目标图像 :param blurKsize:模糊滤波器的滤波核(奇数) :param edgeKsize:边缘检测滤波器的滤波核(奇数) """ if blurKsize >= 3: bluredSrc = cv2.medianBlur(src, blurKsize) graySrc = cv2.cvtColor(bluredSrc, cv2.COLOR_BGR2GRAY) else: graySrc = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) cv2.Laplacian(graySrc, cv2.CV_8U, graySrc, ksize=edgeKsize) normalizedInverseAlpha = (1.0/255)*(255-graySrc) channels = cv2.split(src) for channel in channels: channel[:] = channel*normalizedInverseAlpha cv2.merge(channels, dst) def contourDetection(src, threshold=50, epsilon=0.005, **kwargs): """ 轮廓检测,在源图像src绘制 :param src: 源图像 :param threshold: 二值化阈值 :param epsilon: 轮廓近似多边形与原轮廓的差别 :param kwargs: 轮廓检测方法 ex: style="contours" :return: """ ret, thresh = cv2.threshold(cv2.cvtColor(src.copy(), cv2.COLOR_BGR2GRAY), threshold, 255, cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if kwargs["style"] == "contours": cv2.drawContours(src, contours, -1, (0, 0, 255), 1) else: for c in contours: # 目标轮廓的外接矩形 if kwargs["style"] == "boundingRect": x, y, w, h = cv2.boundingRect(c) cv2.rectangle(src, (x, y), (x + w, y + h), (0, 0, 255), 1) # 包围目标的最小矩形 elif kwargs["style"] == "minAreaRect": rect = cv2.minAreaRect(c) box = cv2.boxPoints(rect) box = np.int0(box) cv2.drawContours(src, [box], 0, (255, 0, 0), 1) # 包围目标的最小闭圆 elif kwargs["style"] == "minEnclosingCircle": (x, y), radius = cv2.minEnclosingCircle(c) center = (int(x), int(y)) radius = int(radius) src = cv2.circle(src, center, radius, (0, 255, 0), 1) # 包围目标的近似多边形 elif kwargs["style"] == "approxPolyDP": diff = epsilon * cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, diff, True) cv2.drawContours(src, [approx], -1, (0, 0, 255), 1) # 包围目标的近似多边形 elif kwargs["style"] == "convexHull": hull = cv2.convexHull(c) cv2.drawContours(src, [hull], -1, (0, 0, 255), 1) class VConvolutionFilter(object): """自定义核的通用卷积滤波器""" def __init__(self, kernel): self._kernel = kernel # 卷积核 def apply(self, src, dst): """将滤波器应用到BGR或gary源图像上得到目标图像""" cv2.filter2D(src, -1, self._kernel, dst) class SharpenFilter(VConvolutionFilter): """半径为1-pixel的锐化滤波器""" def __init__(self): kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]) VConvolutionFilter.__init__(self, kernel) class BlurFilter(VConvolutionFilter): """半径为2像素的模糊滤波器""" def __init__(self): kernel = np.array([[0.04, 0.04, 0.04, 0.04, 0.04], [-0.04, 0.04, 0.04, 0.04, 0.04], [0.04, 0.04, 0.04, 0.04, 0.04], [0.04, 0.04, 0.04, 0.04, 0.04], [0.04, 0.04, 0.04, 0.04, 0.04]]) VConvolutionFilter.__init__(self, kernel) class EmbossFilter(VConvolutionFilter): """半径为1像素的浮雕滤波器""" def __init__(self): kernel = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]]) VConvolutionFilter.__init__(self, kernel) if __name__ == "__main__": img = cv2.imread("../data/test/3.jpg") contourDetection(img, threshold=50, style="contours") # cv2.cvtColor(img, cv2.COLOR_BGR2RGB) cv2.namedWindow("1", cv2.WINDOW_AUTOSIZE) cv2.imshow("1", img) cv2.waitKey()
# -*- coding: utf-8 -*- """ Created on Sat Dec 5 03:26:55 2020 @author: polin """ import requests import json code = "82e622ccc27b4ad0af0918182329a742" region = 'westeurope' def extract(obj, arr, code): if isinstance(obj, dict): for k, v in obj.items(): if isinstance(v, (dict, list)): extract(v, arr, code) elif k == code: arr.append(v) elif isinstance(obj, list): for item in obj: extract(item, arr, code) return arr def translate(string, lg): json = [{'Text': string}] headers = { 'Ocp-Apim-Subscription-code': code, 'Ocp-Apim-Subscription-Region': region, 'Content-Type': 'application/json' } url = "https://api.cognitive.microsofttranslator.com/translate?api-version=3.0&to=" + lg request = requests.post(url, headers=headers, json=json) values = extract(request.json(), [], 'text') return ''.join(values) lg = input('Your language: ') file = open("text.txt", "r", encoding="utf-8") output = open("translate_text.txt", "a+") for line in file.readlines(): string = translate(line, lg) output.write(string) print('done')
#!/usr/bin/env python3 import argparse import os,sys import time import numpy as np import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.utils.data from torch.utils.data import DataLoader import torch.optim as optim from torch.optim import lr_scheduler from torch.autograd import Variable from dataset_pred import MyDataset import torchvision from torchvision import datasets , models , transforms from torchvision.models import densenet121 from torch.nn import functional as F from model import densenet121_cls import shutil import cv2 os.environ['CUDA_VISIBLE_DEVICES']='1' def arg_parse(): parser = argparse.ArgumentParser(description='Torch') parser.add_argument('date', default='20190226', type=str) parser.add_argument('-j', '--workers', default=1, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('-b', '--batch-size', default=32, type=int, metavar='N', help='batch size') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--crop_size', dest='crop_size',default=224, type=int, #448 help='crop size') parser.add_argument('--scale_size', dest = 'scale_size',default=448, type=int, help='the size of the rescale image') args = parser.parse_args() return args def main(): args = arg_parse() # read img path make_label_list(args) print("==> make_label_list OK...") # Create dataloader print("==> Creating dataloader...") test_list = '/media/renyz/data8g/4Rogen/label_name_'+args.date+'.txt' data_dir = '' test_loader = get_test_set(data_dir, test_list, args) # load the network print("==> Loading the network ...") model = densenet121(pretrained=False) model.classifier = densenet121_cls(num_classes = 3) # load my model args.resume = '/home/renyz/CheXpert/model/densenet121-0215.pkl' if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint) print("=> loaded checkpoint '{}'" .format(args.resume)) else: print("=> no checkpoint found at '{}'".format(args.resume)) exit() model.cuda() cudnn.benchmark = True #for predict prediction = predict(test_loader, model, 0, args) print('finish predict') if not os.path.isdir('/media/renyz/data8g/4Rogen/tmp/'+args.date+'/'): os.makedirs('/media/renyz/data8g/4Rogen/tmp/'+args.date+'/') total_label_file = '/media/renyz/data8g/4Rogen/tmp/'+args.date+'/label_'+args.date+'.txt' total_label_file_obj = open(total_label_file,'w') count = 0 with open(test_list, 'r') as f: for l in f.readlines(): total_label_file_obj.write(l.replace('\n','')+str(int(float(prediction[count])))+'\n') count = count + 1 total_label_file_obj.close() print('finish make label!') move(args) def predict(loader, model, epoch, args): print('begin predict!') model.eval() start_test = True for i, input in enumerate(loader): if (i%20) == 0: print('batch processing:',i) input_var = torch.autograd.Variable(input, volatile=True).cuda() # compute output ori_out = model(input_var) if start_test: total_output_ori = ori_out.data.float() start_test = False else: total_output_ori = torch.cat((total_output_ori, ori_out.data.float()) , 0) #break _,predict_ori = torch.max(total_output_ori,1) #np.savetxt('/media/renyz/data8g/4Rogen/tmp/label_pred_'+args.date+'.txt', torch.squeeze(predict_ori).float().cpu().numpy()) #print('ok') return torch.squeeze(predict_ori).float().cpu().numpy() def get_test_set(data_dir,test_list,args): # Data loading code # normalize for different pretrain model: normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) crop_size = args.crop_size scale_size = args.scale_size # center crop test_data_transform = transforms.Compose([ transforms.Resize((scale_size,scale_size)), transforms.CenterCrop(crop_size), transforms.ToTensor(), normalize, ]) test_set = MyDataset(data_dir, test_list, test_data_transform) test_loader = DataLoader(dataset=test_set, num_workers=args.workers,batch_size=args.batch_size, shuffle=False) return test_loader def make_label_list(args): img_path = '/media/renyz/data8g/4Rogen/CHEST-DX-'+args.date+'/' floder_list = os.listdir(img_path) label_path = '/media/renyz/data8g/4Rogen/label_name_'+args.date+'.txt' fobj_label = open(label_path, 'w') for flodername in floder_list: for img_name in os.listdir(img_path+flodername): #print(img_path+flodername+'/'+img_name) fobj_label.write(img_path+flodername+'/'+img_name+', \n') fobj_label.close() return 0 def resize(img_path, dest_path): #img_path = '/media/renyz/data8g/4Rogen/tmp/20190223/filter.1/' floder_list = os.listdir(img_path) #dest_path = '/media/renyz/data8g/4Rogen/tmp/20190223/filter.1(resized)/' if not os.path.isdir(dest_path): os.makedirs(dest_path) for img_name in floder_list: print('resize image name:',img_name) img_obj = cv2.imread(img_path+img_name) h , w = img_obj.shape[:2] res = cv2.resize(img_obj,(int(w*0.2),int(h*0.2))) cv2.imwrite(dest_path+img_name,res) return 0 def move(args): img_path = '/media/renyz/data8g/4Rogen/CHEST-DX-'+args.date+'/' floder_list = os.listdir(img_path) dest_path = '/media/renyz/data8g/4Rogen/tmp/'+args.date+'/' label_path = dest_path + 'label_'+args.date+'.txt' fobj_label = open(label_path, 'r') if not os.path.isdir(dest_path+'filter.0/'): os.makedirs(dest_path+'filter.0/') if not os.path.isdir(dest_path+'filter.1/'): os.makedirs(dest_path+'filter.1/') if not os.path.isdir(dest_path+'filter.2/'): os.makedirs(dest_path+'filter.2/') for line in fobj_label.readlines(): img_name = line.split('/')[-1].split(',')[0] label_num = line.split(' ')[1].replace('\n','') print('move imgname:',img_name) if float(label_num) == 0: shutil.copyfile(line.split(',')[0],dest_path+'filter.0/'+img_name) elif float(label_num) == 1: shutil.copyfile(line.split(',')[0],dest_path+'filter.1/'+img_name) elif float(label_num) == 2: shutil.copyfile(line.split(',')[0],dest_path+'filter.2/'+img_name) fobj_label.close() print('finish move!') resize(dest_path+'filter.1/', dest_path+'filter.1(resized)/') print('finish resize!') if __name__=="__main__": main()
# This file is MACHINE GENERATED! Do not edit. # Generated by: tensorflow/python/tools/api/generator/create_python_api.py script. """Public API for tf.random namespace. """ from __future__ import print_function as _print_function from tensorflow._api.v1.compat.v2.random import experimental from tensorflow.python import categorical from tensorflow.python import random_gamma as gamma from tensorflow.python import random_normal as normal from tensorflow.python import random_poisson_v2 as poisson from tensorflow.python import random_shuffle as shuffle from tensorflow.python import random_uniform as uniform from tensorflow.python import stateless_categorical from tensorflow.python import stateless_random_normal as stateless_normal from tensorflow.python import stateless_random_uniform as stateless_uniform from tensorflow.python import stateless_truncated_normal from tensorflow.python import truncated_normal from tensorflow.python.framework.random_seed import set_seed from tensorflow.python.ops.candidate_sampling_ops import all_candidate_sampler from tensorflow.python.ops.candidate_sampling_ops import fixed_unigram_candidate_sampler from tensorflow.python.ops.candidate_sampling_ops import learned_unigram_candidate_sampler from tensorflow.python.ops.candidate_sampling_ops import log_uniform_candidate_sampler from tensorflow.python.ops.candidate_sampling_ops import uniform_candidate_sampler del _print_function
from datetime import datetime as dt, date, timedelta import os import mysql.connector import constant import Decrypt from twilio.rest import Client ''' Just sends a text message to the user ''' #Fake data, purpose of this is just showing the work I done for this project account_sid = Decrypt.TW_SID auth_token = Decrypt.TW_TOKEN Office_PN = "+15555555555" client = Client(account_sid, auth_token) #Initalizing data Today = dt.now().date() Oneday = Today + timedelta(days=1) Twoday = Today + timedelta(days=2) Threeday = Today + timedelta(days=3) Nextweek = Today + timedelta(days=7) Twoweeks = Today + timedelta(days=14) OfficePH = "" #Data Format dateformat = "%m-%d-%Y" #connects the database #reminder User mydb = mysql.connector.connect( host="127.0.0.1", user="root", password="test123", database="test" ) mycursor = mydb.cursor() mycursor.execute("SELECT * FROM PATIENTS") myresult = mycursor.fetchall() ListMax = mycursor.rowcount #Storing data Fname = [] Lname = [] date = [] time = [] location = [] PhoneNumber = [] #Input data from database into array for patients in myresult: Fname.append(patients[constant.FNAME]) Lname.append(patients[constant.LNAME]) date.append(dt.strptime(patients[constant.APPOINTMENT_DATE], '%m-%d-%Y').date()) time.append(patients[constant.APPOINTMENT_TIME]) location.append(patients[constant.LOCATION]) PhoneNumber.append(patients[constant.PATIENTPN]) #Loop to send a message for i in range(ListMax): if(PhoneNumber[i] is not None or PhoneNumber != 'Null'): #Phone Number for Office if(location[i] == "Portsmouth"): OfficePH = "(757)485-2222" elif(location[i] == "Buffalo"): OfficePH = "(716)893-2211" #1 week notice if(date[i] == Nextweek): client.api.account.messages.create( to=PhoneNumber[i], from_=Office_PN, body=Fname[i] + " " + Lname[i] + ",\nWe have confirmed your appointment on our schedule! Your appointment on " + date[i].strftime('%m/%d/%Y') + " at " + time[i] + " at the " + location[i] + ". If you have any questions about your upcoming visit, please call us at " + OfficePH + " and please allot 1 hour for the appointment.") #3 days notice elif(date[i] == Threeday): client.api.account.messages.create( to=PhoneNumber[i], from_=Office_PN, body=Fname[i] + " " + Lname[i] + ",\nFriendly Reminder! Your appointment is on " + date[i].strftime('%m/%d/%Y') + " at " + time[i] + " at the " + location[i] + ". If you have any questions about your upcoming visit, please call us at " + OfficePH + " and please allot 1 hour for the appointment.") #2 days notice elif(date[i] == Twoday): client.api.account.messages.create( to=PhoneNumber[i], from_=Office_PN, body=Fname[i] + " " + Lname[i] + ",\nFriendly Reminder! Your appointment is on " + date[i].strftime('%m/%d/%Y') + " at " + time[i] + " at the " + location[i] + ". If you have any questions about your upcoming visit, please call us at " + OfficePH + " and please allot 1 hour for the appointment.") #1 day notice elif(date[i] == Oneday): client.api.account.messages.create( to=PhoneNumber[i], from_=Office_PN, body=Fname[i] + " " + Lname[i] + ",\nFriendly Reminder! Your appointment is on " + date[i].strftime('%m/%d/%Y') + " at " + time[i] + " at the " + location[i] + ". If you have any questions about your upcoming visit, please call us at " + OfficePH + " and please allot 1 hour for the appointment.")
# Copyright [2018] [Wang Yinghao] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import os import logging import threading from sys import platform from tqdm import tqdm from const import RAWEXT, MAX_CONCURRENT_RENDER_THREAD if platform == "linux" or platform == "linux2": raise NotImplementedError("dng converter is not available in Linux") elif platform == "darwin": adobeDNGPathCMD = "/Applications/Adobe\ DNG\ Converter.app/Contents/MacOS/Adobe\ DNG\ Converter -c -e %s" elif platform == "win32": adobeDNGPathCMD = "\"C:\\Program Files\\Adobe\\Adobe DNG Converter\\Adobe DNG Converter.exe\" -c -e %s" logger = logging.getLogger(__name__) def render_single(s, file): # logger.debug(adobeDNGPathCMD % file) tqdm.write(adobeDNGPathCMD % file) subprocess.call(adobeDNGPathCMD % file, shell=True) s.release() def raw2dng(d): s = threading.Semaphore(MAX_CONCURRENT_RENDER_THREAD) threads = [] for dir, _, files in os.walk(d): logger.info("Currently processing %s folder, %d file(s) to process." % (d, len(files))) for f in tqdm(files): s.acquire() EXT = f.split('.')[-1].upper() if EXT in RAWEXT: t = threading.Thread(target=render_single, name="Rdr_"+f, args=(s, os.path.join(dir, f))) threads.append(t) t.start() for x in threads: x.join()
#!/usr/bin/env python # # Paulo Sherring 2020 # Portions Copyright 2007 Neal Norwitz # Portions Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' TODO: * Correct pointer positioning: void VIDEOIF::VIDEOIF_Layer::vSetPictureData(const * VIDEOIN_tstPicData pstPicdata) ~~~~~~^This should be here: ^ * Correct implementation calls: oVIDEOIF_Layer->vSetSendNextDecodedFrameMessage(bool boEnableSendNext); ~~~~~~^ This need to be dropped. Only call with variable name * Generate ctor and dtor on implementation file. ''' import argparse import fnmatch import os import sys import tempfile from mycpp import __version__ from mycpp import ast from mycpp import tokenize from mycpp import utils def match_file(filename, exclude_patterns): """Return True if file is a C++ file or a directory.""" base_name = os.path.basename(filename) if base_name.startswith('.'): return False for pattern in exclude_patterns: if fnmatch.fnmatch(base_name, pattern): return False if find_warnings.is_header_file(filename): return True if find_warnings.is_cpp_file(filename): return True if os.path.isdir(filename): return True return False def find_files(filenames, exclude_patterns): """Yield filenames.""" while filenames: name = filenames.pop(0) if os.path.isdir(name): for root, directories, children in os.walk(name): filenames += [os.path.join(root, f) for f in sorted(children) if match_file(os.path.join(root, f), exclude_patterns)] directories[:] = [d for d in directories if match_file(os.path.join(root, d), exclude_patterns)] else: yield name def generate_mocked_header(entire_ast): # For Header File: HoutFile = '' for classNode in entire_ast: if isinstance(classNode,ast.Class): HoutFile += '#ifndef MOCK_{}_{}\n#define MOCK_{}_{}\n'.format(classNode.name.upper(),'H',classNode.name.upper(),'H') for namespacelevel in classNode.namespace: HoutFile += 'namespace %s\n{\n' % (namespacelevel) HoutFile += 'class %sMock:\n{\n'%(classNode.name) if classNode.body is not None: for funcNode in classNode.body: if (isinstance(funcNode,ast.Function)): if funcNode.return_type is not None: # We don't generate mocked headers for Ctors and Dtors HoutFile += "\tMOCK_METHOD%d(%s, %s%s);\n" % (len(funcNode.parameters), funcNode.name,funcNode.get_return_type(),funcNode.get_argument_signature_types()) HoutFile += '} // endOfMockClass\n' for namespacelevel in reversed(classNode.namespace): HoutFile += '} // namespace%s\n' % (namespacelevel) #print(HoutFile) HoutFile += '#endif //MOCK_{}_{}\n\n'.format(classNode.name.upper(),'H') else: ''' Forward declaration only? let it be''' pass else: pass file = open('out.h','w') file.write(HoutFile) file.close() return HoutFile ##print('\n\n\n') def generate_mocked_impl(entire_ast): ### For Implementation File: IoutFile = '' for classNode in entire_ast: if isinstance(classNode,ast.Class): IoutFile += '#include "mock_%s.h"\n#include "%s.h"\n' % (classNode.name.lower(),classNode.name.lower()) IoutFile += '\nusing ::testing::NiceMock;\n' IoutFile += '\nstd::shared_ptr<NiceMock<' for namespacelevel in classNode.namespace: IoutFile += namespacelevel + '::' IoutFile += '%sMock>> o%s;\n\n' % (classNode.name,classNode.name) # for namespacelevel in classNode.namespace: # IoutFile += 'namespace %s\n{\n' % (namespacelevel) IoutFile += '\n' if classNode.body is not None: for funcNode in classNode.body: if (isinstance(funcNode,ast.Function)): ret = '' ret += funcNode.get_return_type() for namespacelevel in funcNode.namespace: ret += namespacelevel + '::' ret += classNode.name ret += '::' + funcNode.name + funcNode.get_argument_signature_types_names() ret += '\n' IoutFile += ret ret = '' if funcNode.return_type is None: #Ctor, do nothing. ret += '{\n\n}\n\n' else: if funcNode.return_type.name == 'void': ret += '{\n\to%s->%s%s;\n}\n\n' % (classNode.name, funcNode.name, funcNode.get_argument_signature_names()) else: ret += '{\n\treturn o%s->%s%s;\n}\n\n' % (classNode.name, funcNode.name, funcNode.get_argument_signature_names()) IoutFile += ret IoutFile += '} // endOfMockClass\n' IoutFile += '\n' # for namespacelevel in reversed(classNode.namespace): # IoutFile += '} // namespace%s\n' % (namespacelevel) #print(IoutFile) else: pass file = open('out.cpp','w') file.write(IoutFile) file.close() return IoutFile ##print('\n\n\n') def mockFile(inputData): bHasTempFile = False if os.path.isfile(inputData): targetFile = inputData bHasTempFile = False else: temporaryFile = tempfile.NamedTemporaryFile("w+",delete=False) temporaryFile.writelines(inputData) targetFile = temporaryFile.name temporaryFile.close() bHasTempFile = True try: source = utils.read_file(targetFile) if source is None: return None builder = ast.builder_from_source(source, targetFile, [], [], False) entire_ast = list([_f for _f in builder.generate() if _f]) header = generate_mocked_header(entire_ast) impl = generate_mocked_impl(entire_ast) except tokenize.TokenError as exception: #print('{}: token error: {}'.format(targetFile, exception), file=sys.stderr) return '','' except (ast.ParseError, UnicodeDecodeError) as exception: #print('{}: parsing error: {}'.format(targetFile, exception), file=sys.stderr) return '','' if bHasTempFile == True: os.remove(targetFile) return header, impl if __name__ == "__main__": try: sys.exit(mockFile('in.h')) except KeyboardInterrupt: sys.exit(1)
import datetime from django.conf import settings from django.db import models from django.contrib.auth.models import AbstractBaseUser, UserManager, PermissionsMixin class User(AbstractBaseUser, PermissionsMixin): email = models.EmailField(unique=True) username = models.CharField(max_length=100, blank=True, null=True) date_joined = models.DateTimeField(auto_now=True) USERNAME_FIELD = "email" REQUIRED_FIELDS = ['username'] objects = UserManager() def __str__(self): return self.email class VM(models.Model): user = models.OneToOneField(User, on_delete='CASCADE') base_url = models.CharField(max_length=255, unique=True) port = models.IntegerField(unique=True) last_modified = models.DateTimeField() class Meta: unique_together = ('user', 'base_url', 'port') def __str__(self): return '%s: %s' % (self.user.username, self.base_url) def is_active(self): if (self.last_modified - datetime.datetime.now()).seconds < settings.TIMEOUT: return True
from django.conf.urls import url, include from rest_framework import routers from core.views import ProviderViewSet, UpdateViewSet from rest_framework_swagger.views import get_swagger_view router = routers.DefaultRouter() router.register(r'provider', ProviderViewSet) router.register(r'update', UpdateViewSet) schema_view = get_swagger_view(title="API Docs") urlpatterns = [ url(r'^docs/', schema_view), ] urlpatterns += router.urls
from selenium import webdriver from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.keys import Keys from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By import time,sys,pickle from selenium.webdriver.common.action_chains import ActionChains btc = "3MUT2imvKa2V4BZ6cBGAfZFeQ5PS9kopdk" chrome_options = Options() #chrome_options.add_argument("--headless --window-size=1920,1080") chrome_options.binary_location = r"/app/.apt/usr/bin/google-chrome" driver = webdriver.Chrome(chrome_options=chrome_options) #driver = webdriver.Chrome(executable_path="chromedriver.exe", chrome_options=chrome_options) while True: print(">> MADE BY KRISH!!") print(">> Opening Website") driver.get("http://claimfreebtc.win/") print(">> Closing Ad") try: driver.find_element_by_xpath('//*[@id="sticky_bar_logo"]/div[2]/button').click() except: print(">> Ad Close Failed : Maybe No AD!") print(">> Input BTC address : " + btc) address = driver.find_element_by_name('addy') address.send_keys(btc) print(">> Solving Captcha") source = driver.find_element_by_xpath('//*[@id="captchmeslider"]') dest = driver.find_element_by_xpath('//*[@id="captchmerefreshimg"]') ActionChains(driver).drag_and_drop(source, dest).perform() time.sleep(3) print(">> Claiming") claim = driver.find_element_by_xpath('/html/body/center/table/tbody/tr/td[2]/div/form/input[2]') ActionChains(driver).move_to_element(driver.find_element_by_xpath('/html/body/center/iframe[2]')).perform() claim.click() print(">> Claimed! Waiting 5 minutes") time.sleep(305)
""" Luke """ import os.path as osp import glob import tqdm import numpy as np import mmcv from mmcap.apis.inference import init_caption from mmcap.apis.extract import extract_encoder_feat from mmcap.datasets.tokenizers.tokenization_kobert import KoBertTokenizer def extract_feat(model, tokenizer, src_dir:str, dst_dir:str): img_pathes = glob.glob('{}*.jpg'.format(src_dir)) for img_path in tqdm.tqdm(img_pathes, total=len(img_pathes)): result = extract_encoder_feat(model, tokenizer, img_path) img_id = img_path.split('/')[-1].split('.')[0] feat_dst_path = osp.join(dst_dir, '{}_feat.pkl'.format(img_id)) mask_dst_path = osp.join(dst_dir, '{}_mask.pkl'.format(img_id)) pos_dst_path = osp.join(dst_dir, '{}_pos.pkl'.format(img_id)) # dump result with open(feat_dst_path, 'wb') as f: mmcv.dump(result[0], f, file_format='pkl') with open(mask_dst_path, 'wb') as f: mmcv.dump(result[1], f, file_format='pkl') with open(pos_dst_path, 'wb') as f: mmcv.dump(result[2], f, file_format='pkl') def main(): dir_info = [{'mode': 'train', 'src': 'data/coco/train2017/', 'dst': 'data/coco/features_train2017/'}, {'mode': 'val', 'src': 'data/coco/val2017/', 'dst': 'data/coco/features_val2017/'}] config = 'configs/image_captioning/cbnet_transformer_test_nondist.py' model = init_caption(config) tokenizer = KoBertTokenizer.from_pretrained('monologg/kobert') for dir_dic in dir_info: print('\nProcessing {}...'.format(dir_dic['mode'])) src_dir = dir_dic['src'] dst_dir = dir_dic['dst'] extract_feat(model, tokenizer, src_dir, dst_dir) if __name__ == "__main__": main()
from train import train_model from data_loader import load from examples.NIPS.MNIST.mnist import MNIST_Net, neural_predicate import torch from network import Network from model import Model from optimizer import Optimizer train_queries = load('train.txt') test_queries = load('test.txt')[:100] def test(model): acc = model.accuracy(test_queries, test=True) print('Accuracy: ', acc) return [('accuracy', acc)] with open('multi_digit.pl') as f: problog_string = f.read() network = MNIST_Net() net = Network(network, 'mnist_net', neural_predicate) net.optimizer = torch.optim.Adam(network.parameters(), lr=0.001) model = Model(problog_string, [net], caching=False) optimizer = Optimizer(model, 2) test(model) train_model(model, train_queries, 1, optimizer, test_iter=1000, test=test, snapshot_iter=10000)
from security.backends.app import SecurityBackend class SecurityTestingBackend(SecurityBackend): name = 'security.backends.testing' label = 'security_backends_testing' backend_name = 'testing' reader = 'security.backends.testing.reader.TestingBackendReader'
from django.contrib import admin from .models import Profile, Classroom, Reservation # Register your models here. admin.site.register(Profile) admin.site.register(Classroom) admin.site.register(Reservation)
from rest_framework.views import APIView from api.models import Profile from rest_framework.exceptions import PermissionDenied class HaveProfileMixin: def initial(self, request, *args, **kwargs): super().initial(request, *args, **kwargs) try: request.user.profile except Profile.DoesNotExist: raise PermissionDenied("You don't have a profile") class IsEmployeeMixin: def initial(self, request, *args, **kwargs): super().initial(request, *args, **kwargs) if request.user.profile.employee_id is None: raise PermissionDenied("You don't seem to be a talent") self.employee = self.request.user.profile.employee self.user = self.request.user class IsEmployerMixin: def initial(self, request, *args, **kwargs): super().initial(request, *args, **kwargs) if request.user.profile.employer_id is None: raise PermissionDenied("You don't seem to be an employer") self.employer = self.request.user.profile.employer self.user = self.request.user class WithProfileView(HaveProfileMixin, APIView): pass class EmployeeView(IsEmployeeMixin, WithProfileView): pass class EmployerView(IsEmployerMixin, WithProfileView): """ View Super class that validates that a User is a valid Employer """ pass
import re print("----------匹配单个字符与数字----------") ''' . 匹配除换行符以外的任意字符 [0123456789] []是字符集合,表达匹配方括号中所包含的任意一个字符 [tracy] 匹配't','r','a','c','y'中任意一个字符 [a-z] 匹配任意小写字母 [A-Z] 匹配任意大写字母 [0-9] 匹配任意数字,类似[0123456789] [0-9a-zA-Z] 匹配任意的数字和字母 [0-9a-zA-Z_] 匹配任意的数字,字母和下划线 [^tracy] 匹配除了tracy这几个字母以外的所有字符,中括号里的^称为脱字符,表示不匹配集合中的字符 [^0-9] 匹配所有的非数字字符 \d 匹配数字,效果同[0-9] \D 匹配非数字符,效果同[^0-9] \w 匹配数字,字母和下划线,效果同[0-9a-zA-Z_] \W 匹配非数字,字母和下划线,效果同[^0-9a-zA-Z_] \s 匹配任意的空白符(空格,换行,回车,换页,制表),效果同[\f\r\n\t] \S 匹配任意的非空白符,效果同[^ \f\n\r\t] ''' print(re.findall("\d","sUnck ! is 6 a go3od man2")) print("----------锚字符(边界字符)----------") ''' ^ 行首匹配,和在[]里的^不是一个意思 $ 行尾匹配 \A 匹配字符串开始,它和^的区别是,\A只匹配整个字符串的开头,即使在re.M模式下也不会匹配其它行的行首 \Z 匹配字符串结束,它和$的区别是,\Z只匹配整个字符串的结尾,即使在re.M模式下也不会匹配其它行的行尾 \b 匹配一个单词的边界,也就是指单词和空格间的位置 ('er\b' 可以匹配never ,但是不能匹配nerve) \B 匹配非单词边界 ('er\B' 可以匹配不是单词边界的,如nerve) ''' # print(re.search("^tracy","tracy is a good man")) print(re.findall("^tracy","tracy is a good man\ntracy is a nice man",re.M)) print(re.findall("\Atracy","tracy is a good man\ntracy is a nice man",re.M)) print(re.findall("man$","tracy is a good man\ntracy is a nice man",re.M)) print(re.findall("man\Z","tracy is a good man\ntracy is a nice man",re.M)) print(re.search(r"er\b","never ")) print(re.search(r"er\b","nerve")) print(re.search("er\B","never")) print(re.search("er\B","nerve")) print("----------匹配多个字符----------") ''' 说明:下方的x,y,z均为假设的普通字符(n,m为非负整数),不是正则表达式的元字符 (xyz) 匹配小括号内的xyz(作为一个整体去匹配) x? 匹配0个或者1个x x* 匹配0个或者任意多个x(.*表示匹配0个或者任意多个字符(换行符除外)) x+ 匹配至少一个x x{n} 匹配确定的n个x(n是一个非负整数) x{n,} 匹配至少n个x x{n,m} 匹配至少n个最多m个x。注意:n<=m x|y |表 示或,匹配的是x或y ''' print(re.findall(r"(tracy)","tracyis a good man,tracy is a nice man")) print(re.findall(r"a?","aaa"))#非贪婪匹配(尽可能少的匹配) print(re.findall(r"a*","aaaabbba")) #贪婪匹配(尽可能多的匹配) print(re.findall(r".*","aaaabbba")) #全部匹配 print(re.findall(r"a+","aaaabbba")) #贪婪匹配(尽可能多的匹配) print(re.findall(r"a{3}","aaaabbba")) #['aaa'] print(re.findall(r"a{3}","aa")) #小于三个,无法匹配 print(re.findall(r"a{3}","aaaabbbaaaa")) #[取三个 print(re.findall(r"a{3,}","aaaabbbaaaa")) #至少三个 print(re.findall(r"a{2,3}","aaaabbbaaaa")) #至少2个,最多3个 print(re.findall(r"((t|T)racy)","tracy--Tracy")) print("----------------特殊----------------") ''' *? +? x? 最小匹配,通常都是尽可能多的匹配,可以使用这种解决贪婪匹配 (?:x) 类似(xyz),但不表示一个组 ''' #需求,提取sunck.....man str = "tracy is a good man! tracy is a nice man!tracy is a handsome man" print(re.findall(r"^tracy.*?man$",str)) print(re.findall(r"tracy.*?man",str)) #注释:/* part*/ /* part1 */ print(re.findall(r"//*.*?/*/","/* part1*/ /* part1 */")) #第二个/将*转义, .*表示任意多个, /将*转义, ?最少个
##future feature #%% import requests import json def get_halts(): """ use nyse api to get halts. Saves a json locally and then compares the new reponse to the local json to see if new halts or resumptions """ res = requests.get('https://www.nyse.com/api/trade-halts/current?offset=0&max=50').json() halts = res['results']['tradeHalts'] try: with open('/tmp/json/halts.json', 'r') as f: halts_json = json.load(f) except: halts_json = [] new_halts = [] for halt in halts: if halt not in halts_json: halts_json.append(halt) new_halts.append(halt) with open('/tmp/json/halts.json', 'w') as f: json.dump(halts_json, f) return new_halts get_halts()
import geomstats.backend as gs from geomstats.geometry.euclidean import Euclidean from geomstats.geometry.hypersphere import Hypersphere from geomstats.geometry.special_orthogonal import SpecialOrthogonal from tests.data_generation import TestData class ConnectionTestData(TestData): def metric_matrix_test_data(self): smoke_data = [ dict( space=Euclidean(dim=4), point=gs.array([0.0, 1.0, 0.0, 0.0]), expected=gs.eye(4), ) ] return self.generate_tests(smoke_data) def parallel_transport_test_data(self): smoke_data = [dict(dim=2, n_samples=2)] return self.generate_tests(smoke_data) def parallel_transport_trajectory_test_data(self): smoke_data = [dict(dim=2, n_samples=2)] return self.generate_tests(smoke_data) def exp_connection_metric_test_data(self): smoke_data = [ dict( dim=2, tangent_vec=gs.array([[0.25, 0.5], [0.30, 0.2]]), base_point=gs.array([[gs.pi / 2, 0], [gs.pi / 6, gs.pi / 4]]), ), dict( dim=2, tangent_vec=gs.array([0.25, 0.5]), base_point=gs.array([gs.pi / 2, 0]), ), ] return self.generate_tests(smoke_data) def log_connection_metric_test_data(self): smoke_data = [ dict( dim=2, point=gs.array([1.0, gs.pi / 2]), base_point=gs.array([gs.pi / 3, gs.pi / 4]), atol=1e-4, ), dict( dim=2, point=gs.array([[1.0, gs.pi / 2], [gs.pi / 6, gs.pi / 3]]), base_point=gs.array([[gs.pi / 3, gs.pi / 4], [gs.pi / 2, gs.pi / 4]]), atol=1e-4, ), ] return self.generate_tests(smoke_data) def geodesic_with_exp_connection_test_data(self): smoke_data = [ dict( dim=2, point=gs.array([1.0, gs.pi / 2]), tangent_vec=gs.array([gs.pi / 3, gs.pi / 4]), n_times=10, n_steps=10, expected=(10, 2), atol=1e-6, ), dict( dim=2, point=gs.array([1.0, gs.pi / 2]), tangent_vec=gs.array([[gs.pi / 3, gs.pi / 4], [gs.pi / 2, -gs.pi / 4]]), n_times=10, n_steps=100, expected=(2, 10, 2), atol=1e-6, ), dict( dim=2, point=gs.array([[1.0, gs.pi / 2], [gs.pi / 6, gs.pi / 3]]), tangent_vec=gs.array([[gs.pi / 3, gs.pi / 4], [gs.pi / 2, gs.pi / 4]]), n_times=10, n_steps=100, expected=(2, 10, 2), atol=1e-6, ), ] return self.generate_tests(smoke_data) def geodesic_with_log_connection_test_data(self): smoke_data = [ dict( dim=2, point=gs.array([1.0, gs.pi / 2]), end_point=gs.array([gs.pi / 3, gs.pi / 4]), n_times=10, n_steps=10, expected=(10, 2), atol=1e-6, ), dict( dim=2, point=gs.array([[1.0, gs.pi / 2], [gs.pi / 6, gs.pi / 3]]), end_point=gs.array([[gs.pi / 3, gs.pi / 4], [gs.pi / 2, gs.pi / 4]]), n_times=10, n_steps=100, expected=(2, 10, 2), atol=1e-6, ), ] return self.generate_tests(smoke_data) def geodesic_and_coincides_exp_test_data(self): smoke_data = [ dict( space=Hypersphere(2), n_geodesic_points=10, vector=gs.array([[2.0, 0.0, -1.0]] * 2), ), dict( space=SpecialOrthogonal(n=4), n_geodesic_points=10, vector=gs.random.rand(2, 4, 4), ), ] return self.generate_tests(smoke_data) def geodesic_invalid_initial_conditions_test_data(self): smoke_data = [dict(space=SpecialOrthogonal(n=4))] return self.generate_tests(smoke_data) def geodesic_test_data(self): smoke_data = [dict(space=Hypersphere(2))] return self.generate_tests(smoke_data) def ladder_alpha_test_data(self): smoke_data = [dict(dim=2, n_samples=2)] return self.generate_tests(smoke_data)
import os import glob import numpy as np from random import randrange from scipy.io import wavfile class DataLoader: def __init__(self, train_spec_dir, val_spec_dir, test_spec_dir, train_batch_size, val_batch_size, test_batch_size, sequence_length, fft_length): base_dir = '' self.train_paths = glob.glob(os.path.join(train_spec_dir, '*.npy'), recursive = True) self.val_paths = glob.glob(os.path.join(val_spec_dir, '*.npy'), recursive = True) self.test_paths = glob.glob(os.path.join(test_spec_dir, '**/*.wav'), recursive = True) self.test_paths_voc_gt = glob.glob(os.path.join(test_spec_dir, 'labeled/*vocals16.wav'), recursive = True) self.test_paths_pred = glob.glob(os.path.join(base_dir, '*mixture16.wav'), recursive = True) self.train_batch_size = train_batch_size self.val_batch_size = val_batch_size self.test_batch_size = test_batch_size self.sequence_length = sequence_length self.fft_length = fft_length self.mean_, self.std_ = np.load('data_prep/mean_std.npy') # takes .npy file where first sublist is mixture magnitude, second subset is vocal magnitude def load_sec_mpa(self, path): matrix, label = np.load(path) matrix_n = (matrix - self.mean_) / self.std_ rm1 = label / (matrix_n + 10**(-6)) rm2 = np.sqrt(label ** 2 / (matrix_n ** 2 + label ** 2)) ratio_mask = np.clip(rm2, 0, 1) return matrix_n, ratio_mask def batch_data_loader(self, batch_size, file_paths, index): matrixs = [] labels = [] for spec in file_paths[index*batch_size : (index+1)*batch_size]: matrix, label = self.load_sec_mpa(spec) matrixs.append(matrix) labels.append(label) return matrixs, labels def train_data_loader(self, index): return self.batch_data_loader(self.train_batch_size, self.train_paths, index) def val_data_loader(self, index): return self.batch_data_loader(self.val_batch_size, self.val_paths, index) def test_data_loader(self): song_names = [self.test_paths[i].split('/')[-1] for i in range(len(self.test_paths))] song_wav = [wavfile.read(self.test_paths[i])[1] for i in range(len(self.test_paths))] return song_names, song_wav def test_data_loader_sdr(self): s, vocal_wav_gt = wavfile.read(self.test_paths_voc_gt[0]) s, vocal_wav_pred = wavfile.read(self.test_paths_pred[0]) return vocal_wav_gt, vocal_wav_pred
#!/usr/bin/env python from ansible.module_utils.basic import * module = AnsibleModule({}) print '{"platform": "' + get_platform() + '","distribution": "' + get_distribution() + '","version": "' + get_distribution_version() + '"}'
""" This script is used to extract person's facial feature data from image using dlib face shape model and deep neural network model to extract face feature in 128-D vector """ import dlib import cv2 import os from sklearn.model_selection import train_test_split import numpy as np from sklearn.externals import joblib import time import shutil # Set the input directory of the user's data input_dir = '/home/alvin/Downloads/wiranto/' sub_dirs = os.listdir(input_dir) # Set the output directory of the user's data target_dir = 'target_person/wiranto/' # Enable preview for each train data enable_preview = False if not os.path.exists(target_dir): os.makedirs(target_dir) # DLIB's model path for face pose predictor and deep neural network model predictor_path='dlib_model/shape_predictor_68_face_landmarks.dat' face_rec_model_path='dlib_model/dlib_face_recognition_resnet_model_v1.dat' detector = dlib.get_frontal_face_detector() sp = dlib.shape_predictor(predictor_path) facerec = dlib.face_recognition_model_v1(face_rec_model_path) temp_data=[] for directory in sub_dirs: image_list = os.listdir(input_dir+directory) for image in image_list: image = cv2.imread(input_dir+directory+'/'+image) dets,scores,idx = detector.run(image, 0,0) for i, d in enumerate(dets): if len(idx)==1: shape = sp(image, d) face_descriptor = np.array([facerec.compute_face_descriptor(image, shape)]) if len(temp_data)==0: temp_data=face_descriptor else: temp_data=np.append(temp_data,face_descriptor,axis=0) is_eligible_frame=True cv2.rectangle(image,(d.left(),d.top()),(d.right(),d.bottom()),(255,0,0),2) if is_eligible_frame: scaled_image=cv2.resize(image,(int(image.shape[1]/2),int(image.shape[0]/2))) dets,scores,idx = detector.run(scaled_image, 0,-0.5) for i, d in enumerate(dets): if len(idx)==1: shape = sp(scaled_image, d) face_descriptor = np.array([facerec.compute_face_descriptor(scaled_image, shape)]) temp_data=np.append(temp_data,face_descriptor,axis=0) if enable_preview: cv2.namedWindow('preview', flags=cv2.WINDOW_NORMAL) cv2.imshow('preview',image) cv2.waitKey(0) print 'Obtained %i data'%(len(temp_data)) joblib.dump(temp_data,target_dir+'/face_descriptor.pkl')
class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class YourSolution(object): def inorderTraversal(self, root): array = [] array = self.inorderTraversalInternal(root, array) return array def inorderTraversalInternal(self, root, array): if root.left is not None: self.inorderTraversalInternal(root.left, array) array.append(root.val) if root.right is not None: self.inorderTraversalInternal(root.right, array) return array def preorderTraversal(self, root): array = [] array = self.preorderTraversalInternal(root, array) return array def preorderTraversalInternal(self, root, array): array.append(root.val) if root.left is not None: self.preorderTraversalInternal(root.left, array) if root.right is not None: self.preorderTraversalInternal(root.right, array) return array if __name__ == '__main__': YS = YourSolution() tree = TreeNode(1) tree.left = TreeNode(2) tree.left.left = TreeNode(3) tree.left.left.left = TreeNode(4) tree.right = TreeNode(5) tree.right.left = TreeNode(6) r = YS.preorderTraversal(tree) print("Pre-Order Traversal") for i in r: print(i) print("In-Order Traversal") r = YS.inorderTraversal(tree) for i in r: print(i)
# Modulo registro # # Max Sebastian Herrera Salazar # # Octubre 2017 # # Version 1 # import random data = [] data2 = [] length=random.randrange(1,1000) i=0 while i <= length : data.append(random.randrange(150,1000)) data2.append(random.randrange(1,10000)) i = i + 1 #print(data) archivo = open("registro.txt", 'r+') for i,j in enumerate(data): archivo.write(str(data[i])+ ',' + str(data2[i])) archivo.write('\n') archivo.close()
import requests url = 'http://datacamp.com/teach/documentation' r = requests.get(url) text = r.text print(text)
""" Transformations of coordinate systems. """ import numpy as np from scipy.spatial.transform import Rotation as R from wana import analysis def transform_to_reference_system(sensor): """ Tranform the data in the given sensor object to a reference system. Use the angular velocities and time intervals to calculate the angles and then rotate at each time step to get the accelerations in the reference system. Parameters ---------- sensor: wana.sensor.Sensor Sensor to be transformed. """ construct_rotations(sensor) transform_accelerations(sensor) def construct_rotations(sensor): """ Construct rotations for every time step to transform to the inertial system. Parameters ---------- sensor: wana.sensor.Sensor Object holding the gyroscope data. """ N = len(sensor.data["delta_angle_x"]) rotations = [R.identity()] for n in range(1, N): # get angles in radians from the previous interval phi_x = sensor.data["delta_angle_x"][n-1]*np.pi/180 phi_y = sensor.data["delta_angle_y"][n-1]*np.pi/180 phi_z = sensor.data["delta_angle_z"][n-1]*np.pi/180 # calculate the SORA rotation vector v = np.array([phi_x, phi_y, phi_z]) r_step = R.from_rotvec(-v) r_prev = rotations[-1] # print("r_step", r_step.as_matrix()) # print("r_prev", r_prev.as_matrix()) r = r_step*r_prev rotations.append(r) sensor.data["rotation_to_iss"] = rotations def transform_accelerations(sensor): """ Rotate acceleration vectors to the iss system. iss = initial sensor system Parameters ---------- sensor: wana.sensor.Sensor Object holding the sensor data. """ N = len(sensor.data["delta_angle_x"]) sensor.data["iss_ax"] = np.ones(N) sensor.data["iss_ay"] = np.ones(N) sensor.data["iss_az"] = np.ones(N) sensor.units["iss_ax"] = "m/s2" sensor.units["iss_ay"] = "m/s2" sensor.units["iss_az"] = "m/s2" for n in range(0, N): r = sensor.data["rotation_to_iss"][n] vec = np.array([ sensor.data["ax"][n], sensor.data["ay"][n], sensor.data["az"][n] ]) vec_rot = r.apply(vec) sensor.data["iss_ax"][n] = vec_rot[0] sensor.data["iss_ay"][n] = vec_rot[1] sensor.data["iss_az"][n] = vec_rot[2] def calc_lab_ez(sensor): """ Calculate the vertical unit vector of the lab frame. Parameters ---------- sensor: wana.sensor.Sensor Object holding the sensor data. """ g_vec = np.array([ sensor.data["iss_gx"][0], sensor.data["iss_gy"][0], sensor.data["iss_gz"][0] ]) e_z = g_vec / np.linalg.norm(g_vec) varname = "lab_ez" sensor.data[varname] = e_z def calc_lab_ehor(sensor): """ Calculate the horizontal unit vectors of the lab frame. Parameters ---------- sensor: wana.sensor.Sensor Object holding the sensor data. """ e_z = sensor.data["lab_ez"] g_vec = np.array([ sensor.data["iss_gx"][0], sensor.data["iss_gy"][0], sensor.data["iss_gz"][0] ]) g = np.linalg.norm(g_vec) e_z = g_vec / g e_x = np.array([1, 0, 0]) - np.dot([1, 0, 0], e_z)*e_z # e_x = np.cross([1, 0, 0], e_z) e_x /= np.linalg.norm(e_x) sensor.data["lab_ex"] = e_x e_y = np.cross(e_z, e_x) e_y /= np.linalg.norm(e_y) sensor.data["lab_ey"] = e_y def calc_trafo_iss_to_lab(sensor): """ Calculate the transformation matrix from iss to lab frame. Parameters ---------- sensor: wana.sensor.Sensor Object holding the sensor data. """ e_x = sensor.data["lab_ex"] e_y = sensor.data["lab_ey"] e_z = sensor.data["lab_ez"] matrix = np.array([ e_x, e_y, e_z ]) sensor.data["trafo_iss_to_lab"] = matrix def iss_to_lab(sensor, varpattern, unit=None): """ Transform the iss accelerations with gravity removed to lab frame. Variable names must be given with a {} to be replaced by the axis name. E.g. for accelerations with gravity removed: varpattern = "a{}_gr" Parameters ---------- sensor: wana.sensor.Sensor Object holding the sensor data. varpattern: str Variable pattern to transform. unit: str Physical unit of the variable. """ iss_x = sensor.data["iss_" + varpattern.format("x")] iss_y = sensor.data["iss_" + varpattern.format("y")] iss_z = sensor.data["iss_" + varpattern.format("z")] iss_vec = np.array([iss_x, iss_y, iss_z]) projection_matrix = sensor.data["trafo_iss_to_lab"] lab_vec = np.dot(projection_matrix, iss_vec) sensor.data["lab_" + varpattern.format("x")] = lab_vec[0] sensor.data["lab_" + varpattern.format("y")] = lab_vec[1] sensor.data["lab_" + varpattern.format("z")] = lab_vec[2] if unit is not None: sensor.units["lab_" + varpattern.format("x")] = unit sensor.units["lab_" + varpattern.format("y")] = unit sensor.units["lab_" + varpattern.format("z")] = unit analysis.calculate_norm(sensor, "lab_"+varpattern, unit=unit)
# -*- coding: utf-8 -*- """ Created on Fri Aug 7 11:25:38 2020 @author: PENG Feng @email: im.pengf@outlook.com """ from utils import date_str_p1 import os if not os.path.isdir("../input"): os.makedirs("../input") if not os.path.isdir("../output"): os.makedirs("../output") TYPE_E = "{}: improper types or lengths of parameters." VALUE_E = "{}: improper range of parameters." DATA_DIR = "../input/stockprice.csv" BUFF_SP_DIR = "../input/stockprice.pickle" BUFF_ALPHA_DIR = "../input/alpha_series.pickle" FIG_DIR = "../output/portfolio_backtest" PORTFOLIO_MONEY_DIR = "../output/portfolio_money.csv" PORTFOLIO_BACKTEST_DIR = "../output/portfolio_backtest.csv" LOG_DIR = "../output/output.log" BACKTEST_DATE_BEG = "2018-1-1" BACKTEST_N_MONTH = 12 BACKTEST_MONTHS = [] for i in range(BACKTEST_N_MONTH + 1): BACKTEST_MONTHS.append(BACKTEST_DATE_BEG) BACKTEST_DATE_BEG = date_str_p1(BACKTEST_DATE_BEG) N_MONEY_ADJ_ITER_A = 0.4 N_MONEY_ADJ_ITER_B = 1 N_ANNUAL_TRADING_DAY = 250 RISK_FREE_ANNUAL_RETURN_RATIO = 0.015
import tensorflow as tf import numpy as np from segmentation import segmentation_model, divide_image, merge_image if __name__ == '__main__': x = tf.placeholder(dtype=tf.float32, shape=[None, 128, 128, 3]) output = segmentation_model(x, False) out_mask = tf.nn.sigmoid(output) saver = tf.train.Saver() sess = tf.Session() sess.run(tf.initializers.global_variables()) saver.restore(sess, './segmentation-crop-128.ckpt-44') images = list() for i in range(4): fname = './cut{}.jpg'.format(i + 1) # test images img = tf.read_file(fname) img = tf.image.decode_jpeg(img, channels=3) img = tf.cast(img, tf.float32) img /= 255.0 image = sess.run(img) print(image.shape) cropped = divide_image(image) print(cropped.shape) result = sess.run(out_mask, feed_dict={x: np.stack(cropped, axis=0)}) print(result.shape) np.save('cut{}.npy'.format(i + 1), merge_image(result, image.shape))
for var1 in range(20,0,-2): #decrementing in twos from 20 to 2/countdown loop print(var1) #displaying number pattern else: print("Enough with loops") #signaling the end of the programme cars=["VW","BMW","Ford","Mazda","Tata"] #declaring list of items for mycars in cars: print(mycars) #printing the previously declared list if mycars=="Ford": #indicating Ford as a break point for the programme break cars=["VW","BMW","Ford","Mazda","Tata"] for mycars in cars: if mycars=="Ford": #telling programme to continue without including Ford continue print(mycars) for class2 in "lifechoices": #declaring a string of characters print(class2) #printing the characters cars1=["VW","BMW","Ford","Mazda","Tata"] #declaring list bikes=["Suzuki","Kawasaki","Honda","Ducatti","Vuka"] #declaring second list for mycars1 in cars1: #showcasing outer loop for mybikes in bikes: #showcasing inner loop print(mycars1,mybikes) #displaying list adjacently for i in range(1,6,+1): #introducing a range and increment print("* "*i) #printing stars for j in range(6,0,-1): #introducing a range and decrement print("* "*j) #printing stars