text
stringlengths
0
1.05M
meta
dict
__author__ = 'arul' import hdf5_getters as GETTERS import sys import numpy as np import happybase # import utils.hbaseConnect as hbase import logging import operator import os from pyspark import SparkContext logging.basicConfig(filename='debug.txt',level=logging.DEBUG) ARTIST_ID_COLUMNID='ArtistId' COLUMN_FAMILY_NAME='cf' YEAR_COLUMNID='year' def getColumnValuesDict(features,h5FileName,artistId,trackId,year): """ Prepare a dictionary that can be assigned as the column values to a row. :param features: :param h5FileName: :param artistId: :param trackId: :return: """ # featuresDict = dict(enumerate(features)) featuresDict = {} #Assert that features size = 1*90 for val in range(0,features.shape[1]): try: featuresDict[COLUMN_FAMILY_NAME+':'+str(val+1)] = str(features[0][val]) except Exception,e: print e if (featuresDict.__len__() == 90): featuresDict[COLUMN_FAMILY_NAME+':'+str(ARTIST_ID_COLUMNID)] = artistId featuresDict[COLUMN_FAMILY_NAME+':'+str(YEAR_COLUMNID)] = str(year) return featuresDict else: logging.debug("Features Dict is not of size 90 for {}",h5FileName) return None def readAndSave(h5File,hBasetableObj): h5 = GETTERS.open_h5_file_read(h5File) timbreFeatures = GETTERS.get_segments_timbre(h5).T ftlen = timbreFeatures.shape[1] ndim = timbreFeatures.shape[0] assert ndim==12,'WRONG FEATURE DIMENSION, transpose issue?' finaldim = 90 # too small case if ftlen < 3: #Skip the file as it is not going to be useful return None avg = np.average(timbreFeatures,1) cov = np.cov(timbreFeatures) covflat = [] for k in range(12): covflat.extend( np.diag(cov,k) ) covflat = np.array(covflat) timbreFeatures = np.concatenate([avg,covflat]) features = timbreFeatures.reshape(1,finaldim) artistId = GETTERS.get_artist_id(h5) trackId = GETTERS.get_track_id(h5) year = GETTERS.get_year(h5) h5.close() return saveDataInHbase(h5File,artistId,trackId,year,features,hBasetableObj) def saveDataInHbase(h5FileName,artistId, trackId, year,features,hBasetableObj): """ Get the features and save in hbase db. Need to change the logic to include batch commits. :param artistId: :param trackId: :param features: :return: """ columnsDict = getColumnValuesDict(features,h5FileName,artistId,trackId,year) if(columnsDict is not None and columnsDict.__len__() != 0): hBasetableObj.put(trackId,columnsDict) return True else: logging.debug("Skipping the record from file {}",h5FileName) return False def readParition(filesIterator): """ Create a database connection :param files: :return: """ print("I got initialized") tableName = 'timbre_sample' hbaseCon = createHbaseTable(tableName) hBaseTableObj = hbaseCon.table(tableName) result = [] for file in filesIterator: result.append(readAndSave(file,hBaseTableObj)) hbaseCon.close() return result # files.map(lambda file: readAndSave(file,hBaseTableObj)) COLUMN_FAMILY_NAME='cf' def createHbaseTable(tableName,host='localhost',port=9090): # con = happybase.Connection('ec2-52-91-167-10.compute-1.amazonaws.com',port) con = happybase.Connection('localhost',port) # if(not con.tables().__contains__(tableName)): # con.create_table(tableName,{ # COLUMN_FAMILY_NAME:dict() # }) # print "table Created" # print con.tables() return con if __name__ == '__main__': filePath = sys.argv[1] with open(filePath,'r+') as fileList: fileListContent = fileList.read() listOfFiles = fileListContent.split('\n') #Last element is not valid listOfFiles.pop() sc = SparkContext(appName="msd") # print os.path.join(os.getcwd(),"/libraries/happybase.zip") # sc.addPyFile(os.path.join(os.getcwd(),"libraries/happybase.zip")) sc.addPyFile(sys.argv[2]+"happybase.zip") sc.addPyFile(sys.argv[2]+"thrift.zip") sc.addPyFile(sys.argv[2]+"hdf5_getters.py") sc.addPyFile(sys.argv[2]+"tables.egg") # filesRDD = sc.textFile(filePath) filesRDD = sc.parallelize(listOfFiles) resultList = filesRDD.mapPartitions(readParition).collect() finalResult = reduce(operator.and_,resultList,True) print finalResult # try: # for file in listOfFiles[1:]: # readAndSave(file) # except Exception,e: # logging.debug("Save failed for {}",file)
{ "repo_name": "Arulselvanmadhavan/Artist_Recognition_from_Audio_Features", "path": "DataCleanup/parsingTasks/loadData_PySpark.py", "copies": "1", "size": "4686", "license": "apache-2.0", "hash": -4754900468129175000, "line_mean": 31.7692307692, "line_max": 83, "alpha_frac": 0.6538625694, "autogenerated": false, "ratio": 3.325762952448545, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4479625521848545, "avg_score": null, "num_lines": null }
__author__ = 'arul' import hdf5_getters as GETTERS import sys import numpy as np import happybase import utils.hbaseConnect as hbase import logging logging.basicConfig(filename='debug.txt',level=logging.DEBUG) ARTIST_ID_COLUMNID=91 def getColumnValuesDict(features,h5FileName,artistId,trackId): """ Prepare a dictionary that can be assigned as the column values to a row. :param features: :param h5FileName: :param artistId: :param trackId: :return: """ # featuresDict = dict(enumerate(features)) featuresDict = {} #Assert that features size = 1*90 for val in range(0,features.shape[1]): try: featuresDict[hbase.COLUMN_FAMILY_NAME+':'+str(val+1)] = str(features[0][val]) except Exception,e: print e if (featuresDict.__len__() == 90): featuresDict[hbase.COLUMN_FAMILY_NAME+':'+str(ARTIST_ID_COLUMNID)] = artistId return featuresDict else: logging.debug("Features Dict is not of size 90 for {}",h5FileName) return None class hbaseConnection(object): def __init__(self,tableName): self.tableName = tableName self.hbaseConnection = hbase.createHbaseTable(tableName) def saveDataInHbase(self,h5FileName,artistId, trackId, features): """ Get the features and save in hbase db. Need to change the logic to include batch commits. :param artistId: :param trackId: :param features: :return: """ table = self.hbaseConnection.table(self.tableName) columnsDict = getColumnValuesDict(features,h5FileName,artistId,trackId) if(columnsDict is not None and columnsDict.__len__() != 0): table.put(trackId,columnsDict) else: logging.debug("Skipping the record from file {}",h5FileName) def readAndSave(self,h5File): #Get H5 Content h5 = GETTERS.open_h5_file_read(h5File) timbreFeatures = GETTERS.get_segments_timbre(h5).T ftlen = timbreFeatures.shape[1] ndim = timbreFeatures.shape[0] assert ndim==12,'WRONG FEATURE DIMENSION, transpose issue?' finaldim = 90 # too small case if ftlen < 3: #Skip the file as it is not going to be useful return None avg = np.average(timbreFeatures,1) cov = np.cov(timbreFeatures) covflat = [] for k in range(12): covflat.extend( np.diag(cov,k) ) covflat = np.array(covflat) timbreFeatures = np.concatenate([avg,covflat]) features = timbreFeatures.reshape(1,finaldim) artistId = GETTERS.get_artist_id(h5) trackId = GETTERS.get_track_id(h5) h5.close() self.saveDataInHbase(h5File,artistId,trackId,features) if __name__ == '__main__': filePath = sys.argv[1] with open(filePath,'r+') as fileList: fileListContent = fileList.read() listOfFiles = fileListContent.split('\n') hbs = hbaseConnection("test") try: for file in listOfFiles[1:]: hbs.readAndSave(file) except Exception,e: logging.debug("Save failed for {}",file)
{ "repo_name": "Arulselvanmadhavan/Artist_Recognition_from_Audio_Features", "path": "MRTasks/parsingTasks/prepareDataSet.py", "copies": "2", "size": "3172", "license": "apache-2.0", "hash": -5706140139459941000, "line_mean": 31.7010309278, "line_max": 89, "alpha_frac": 0.631147541, "autogenerated": false, "ratio": 3.5401785714285716, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5171326112428571, "avg_score": null, "num_lines": null }
__author__ = 'Arunkumar Eli' __email__ = "elrarun@gmail.com" from locators import AmazonEc2Locators from selenium.webdriver.support.ui import Select from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC import time class AmazonEc2(object): NO_HOST_TEXT = "No hosts or containers yet." MAX_WAIT_TIME = 60 STATUS_ERROR = "ERROR" STATUS_ACTIVE = "ACTIVE" STATUS_INACTIVE ="INACTIVE" STATUS_REMOVED = "REMOVED" def __init__(self, driver): self.driver = driver def type_access_key(self, val): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.ACCESS_KEY_INPUT) element.clear() element.send_keys(val) def type_secret_key(self,val): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.SECRET_KEY_INPUT) element.clear() element.send_keys(val) def click_next_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.NEXT_BTN) element.click() def get_availability_zone_list(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.AVAILABILITY_ZONE) return element.text def select_zone(self, zone): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.ZONE_SELECT) select = Select(element) select.select_by_visible_text(zone) def click_vpc_radio_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.VPC_RADIO_BTN) element.click() def click_subnet_radio_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.SUBNET_RADIO_BTN) element.click() def click_security_grp_std_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.STD_RADIO_BTN) element.click() def click_security_grp_custom_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.CUSTOM_RADIO_BTN) element.click() def click_set_instance_option_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.SET_INSTANCE_OPTION_BTN) element.click() def click_slide_bar_3(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.SLIDE_BAR_CLICK_3) element.click() def type_host_name(self, host_name): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.HOST_NAME_INPUT) element.clear() element.send_keys(host_name) def type_host_desc(self, host_desc): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.HOST_DESC_INPUT) element.clear() element.send_keys(host_desc) def select_host_instance_type(self, instance_type): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.HOST_INSTANCE_TYPE_SELECT) select = Select(element) select.select_by_visible_text(instance_type) def type_host_mem_size(self, host_mem_size): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.HOST_MEM_SIZE_INPUT) element.clear() element.send_keys(host_mem_size) def click_create_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.HOST_CREATE_BTN) element.click() def get_host_status_text(self): xpath_loc = AmazonEc2Locators.Ec2Locators.STATUS_LABEL_PREFIX + "1" + AmazonEc2Locators.Ec2Locators.STATUS_LABEL_SUFFIX print xpath_loc by = (By.XPATH, xpath_loc) element = self.driver.find_element(*by) return element.text def get_host_status_element(self): xpath_loc = AmazonEc2Locators.Ec2Locators.STATUS_LABEL_PREFIX + "1" + AmazonEc2Locators.Ec2Locators.STATUS_LABEL_SUFFIX print xpath_loc by = (By.XPATH, xpath_loc) element = self.driver.find_element(*by) return element def check_creating_host(self, num_host): for x in range(1,num_host): xpath_loc = AmazonEc2Locators.Ec2Locators.CREATING_HOST_PREFIX + str(x) + AmazonEc2Locators.Ec2Locators.CREATING_HOST_SUFFIX print xpath_loc by = (By.XPATH, xpath_loc) element = self.driver.find_element(*by) cur_txt = element.text assert cur_txt, "CREATING" def wait_for_first_host_active(self, value): xpath_loc = AmazonEc2Locators.Ec2Locators.STATUS_LABEL_PREFIX + "1" + AmazonEc2Locators.Ec2Locators.STATUS_LABEL_SUFFIX print xpath_loc element = self.driver.find_element(xpath_loc) element = WebDriverWait(self.driver, 10).until(EC.text_to_be_present_in_element_value(element,value)) # def wait_for_other_hosts_active(self, num_host, status): # is_all_active = False # sec = 0 # max_wait_time = 120 # while(is_all_active): # for x in range(1,num_host): # xpath_loc = AmazonEc2Locators.Ec2Locators.STATUS_LABEL_PREFIX + x + AmazonEc2Locators.Ec2Locators.STATUS_LABEL_SUFFIX # print xpath_loc # element = self.driver.find_element(xpath_loc) # cur_txt = element.text # try: # assert cur_txt, status # is_cur_active = True # except Exception as e: # print e.message # is_cur_active = False # # if check_all_active(collection): # is_all_active = True # else: # time.sleep(1) # sec = sec + 1 # # if sec > max_wait_time: # self.fail("time out") # break def is_no_host_text_found(self): try: element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.NO_HOST_FOUND_TEXT) element.is_displayed() print "No Host Found" return True except Exception as e: print "No Host not Found" e.message return False def click_menu(self): status = self.get_host_status_element() hidden_menu = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.CLICK_MENU) actions = ActionChains(self.driver) actions.move_to_element(status) actions.click(hidden_menu) actions.perform() def click_deactivate_link(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.DEACTIVATE_LINK) element.click() def click_delete_link(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.DELETE_LINK) element.click() def click_purge_link(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.PURGE_LINK) element.click() def click_delete_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.DELETE_BTN) element.click() def _host_delete(self): print "Inside _host_delete" host_status= self.get_host_status_text() print host_status self.click_menu() time.sleep(2) if host_status == self.STATUS_ERROR: self.click_delete_link() time.sleep(2) elif host_status == self.STATUS_ACTIVE: self.click_deactivate_link() time.sleep(2) elif host_status == self.STATUS_INACTIVE: self.click_delete_link() time.sleep(2) elif host_status == self.STATUS_REMOVED: self.click_purge_link() time.sleep(2) if host_status != self.STATUS_REMOVED: self.click_delete_btn() time.sleep(2) def host_delete(self): print "Inside host_delete" sec = 0 while ((self.is_no_host_text_found()) or (sec < self.MAX_WAIT_TIME)): print "Calling _host_delete" self._host_delete() print "_host_delete complete" sec = sec + 1 time.sleep(1)
{ "repo_name": "aruneli/rancher-test", "path": "ui-selenium-tests/pages/AmazonEc2.py", "copies": "1", "size": "8146", "license": "apache-2.0", "hash": 6985117429112604000, "line_mean": 35.6936936937, "line_max": 136, "alpha_frac": 0.628038301, "autogenerated": false, "ratio": 3.485665382969619, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46137036839696194, "avg_score": null, "num_lines": null }
__author__ = 'Arunkumar Eli' __email__ = "elrarun@gmail.com" from locators import AmazonEc2Locators from selenium.webdriver.support.ui import Select class AmazonEc2(object): def __init__(self, driver): self.driver = driver def input_access_key(self, val): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.ACCESS_KEY_INPUT) element.clear() element.send_keys(val) def input_secret_key(self,val): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.SECRET_KEY_INPUT) element.clear() element.send_keys(val) def click_next_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.NEXT_BTN) element.click() def get_availability_zone_list(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.AVAILABILITY_ZONE) return element.text def select_zone(self, zone): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.ZONE_SELECT) select = Select(element) select.select_by_visible_text(zone) def click_vpc_radio_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.VPC_RADIO_BTN) element.click() def click_subnet_radio_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.SUBNET_RADIO_BTN) element.click() def click_security_grp_std_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.STD_RADIO_BTN) element.click() def click_security_grp_custom_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.CUSTOM_RADIO_BTN) element.click() def click_set_instance_option_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.SET_INSTANCE_OPTION_BTN) element.click() def click_slide_bar_3(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.SLIDE_BAR_CLICK_3) element.click() def input_host_name(self, host_name): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.HOST_NAME_INPUT) element.clear() element.send_keys(host_name) def input_host_desc(self, host_desc): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.HOST_DESC_INPUT) element.clear() element.send_keys(host_desc) def select_host_instance_type(self, instance_type): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.HOST_INSTANCE_TYPE_SELECT) select = Select(element) select.select_by_visible_text(instance_type) def input_host_mem_size(self, host_mem_size): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.HOST_MEM_SIZE_INPUT) element.clear() element.send_keys(host_mem_size) def click_create_btn(self): element = self.driver.find_element(*AmazonEc2Locators.Ec2Locators.HOST_CREATE_BTN) element.click()
{ "repo_name": "aruneli/rancher-test", "path": "ui-selenium-tests/pages/AmazonEc2Page.py", "copies": "1", "size": "3049", "license": "apache-2.0", "hash": -4428168637509758000, "line_mean": 34.0459770115, "line_max": 100, "alpha_frac": 0.6900623155, "autogenerated": false, "ratio": 3.2855603448275863, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4475622660327586, "avg_score": null, "num_lines": null }
__author__ = 'Arunkumar Eli' __email__ = "elrarun@gmail.com" from locators import AppsLocators from selenium.webdriver.support.ui import Select class AddService(object): def __init__(self, driver): self.driver = driver def click_add_service(self): element = self.driver.find_element(*AppsLocators.AppsLocators.ADD_SERVICE_BUTTON) element.click() def input_service_name(self, val): element = self.driver.find_element(*AppsLocators.AppsLocators.SERVICE_NAME_INPUT) element.clear() element.send_keys(val) def input_service_description(self, val): element = self.driver.find_element(*AppsLocators.AppsLocators.SERVICE_DESCRIPTION_INPUT) element.clear() element.send_keys(val) def click_slide_bar_3(self): element = self.driver.find_element(*AppsLocators.AppsLocators.SLIDE_BAR_CLICK_3) element.click() def input_image_name(self, val): element = self.driver.find_element(*AppsLocators.AppsLocators.SERVICE_IMAGE_INPUT) element.clear() element.send_keys(val) def click_create_btn(self): element = self.driver.find_element(*AppsLocators.AppsLocators.SERVICE_CREATE_BTN) element.click()
{ "repo_name": "aruneli/rancher-test", "path": "ui-selenium-tests/pages/AddServicePage.py", "copies": "1", "size": "1251", "license": "apache-2.0", "hash": 4584286946762751000, "line_mean": 27.4318181818, "line_max": 96, "alpha_frac": 0.6802557954, "autogenerated": false, "ratio": 3.4273972602739726, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46076530556739725, "avg_score": null, "num_lines": null }
__author__ = 'Arunkumar Eli' __email__ = "elrarun@gmail.com" from locators import DigitalOceanLocators from selenium.webdriver.support.ui import Select class DigitalOcean(object): def __init__(self, driver): self.driver = driver def input_access_token(self, val): element = self.driver.find_element(*DigitalOceanLocators.DigitalOceanLocators.ACCESS_TOKEN_INPUT) element.clear() element.send_keys(val) def input_host_name(self,val): element = self.driver.find_element(*DigitalOceanLocators.DigitalOceanLocators.HOST_NAME_INPUT) element.clear() element.send_keys(val) def input_host_desc(self,val): element = self.driver.find_element(*DigitalOceanLocators.DigitalOceanLocators.HOST_DESC_INPUT) element.clear() element.send_keys(val) def select_quantity(self): element = self.driver.find_element(*DigitalOceanLocators.DigitalOceanLocators.SELECT_QUANTITY) return element.text def click_slide_bar(self): element = self.driver.find_element(*DigitalOceanLocators.DigitalOceanLocators.SLIDE_BAR_CLICK_3) element.click() def select_image(self, image): element = self.driver.find_element(*DigitalOceanLocators.DigitalOceanLocators.IMAGE_SELECT) select = Select(element) select.select_by_visible_text(image) def select_host_mem_size(self, mem_size): element = self.driver.find_element(*DigitalOceanLocators.DigitalOceanLocators.HOST_MEM_SIZE_SELECT) select = Select(element) select.select_by_visible_text(mem_size) def select_region(self, region): element = self.driver.find_element(*DigitalOceanLocators.DigitalOceanLocators.HOST_REGION_SELECT) select = Select(element) select.select_by_visible_text(region) def click_create_btn(self): element = self.driver.find_element(*DigitalOceanLocators.DigitalOceanLocators.HOST_CREATE_BTN) element.click()
{ "repo_name": "aruneli/rancher-test", "path": "ui-selenium-tests/pages/DigitalOceanPage.py", "copies": "1", "size": "1997", "license": "apache-2.0", "hash": -2613048918078816000, "line_mean": 32.8474576271, "line_max": 107, "alpha_frac": 0.7045568353, "autogenerated": false, "ratio": 3.5660714285714286, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4770628263871428, "avg_score": null, "num_lines": null }
__author__ = 'Arunkumar Eli' __email__ = "elrarun@gmail.com" from locators import InfraHostsLocators from locators import InfraPageLocators from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as ec from selenium.webdriver import ActionChains import time class InfraHostsPage(object): NO_HOST_TEXT = "No hosts or containers yet." MAX_WAIT_TIME = 600 STATUS_BOOTSTRAPPING = "BOOTSTRAPPING" STATUS_CREATING = "CREATING" STATUS_ERROR = "ERROR" STATUS_ACTIVE = "ACTIVE" STATUS_INACTIVE = "INACTIVE" STATUS_REMOVED = "REMOVED" def __init__(self, driver): self.driver = driver def click_ec2_img(self): element = self.driver.find_element(*InfraPageLocators.InfraPageLocators.EC2_IMG) element.click() def get_host_status_text(self): xpath_loc = InfraHostsLocators.InfraHostsLocators.STATUS_LABEL_PREFIX + "1" + InfraHostsLocators.InfraHostsLocators.STATUS_LABEL_SUFFIX print xpath_loc by = (By.XPATH, xpath_loc) element = self.driver.find_element(*by) return element.text def get_host_status_element(self): xpath_loc = InfraHostsLocators.InfraHostsLocators.STATUS_LABEL_PREFIX + "1" + InfraHostsLocators.InfraHostsLocators.STATUS_LABEL_SUFFIX print xpath_loc by = (By.XPATH, xpath_loc) element = self.driver.find_element(*by) return element def check_creating_host(self, num_host): for x in range(1, num_host): xpath_loc = InfraHostsLocators.InfraHostsLocators.CREATING_HOST_PREFIX + str( x) + InfraHostsLocators.InfraHostsLocators.CREATING_HOST_SUFFIX print xpath_loc by = (By.XPATH, xpath_loc) element = self.driver.find_element(*by) cur_txt = element.text assert cur_txt, "CREATING" def wait_for_first_host_active(self, value): xpath_loc = InfraHostsLocators.InfraHostsLocators.STATUS_LABEL_PREFIX + "1" + InfraHostsLocators.InfraHostsLocators.STATUS_LABEL_SUFFIX print xpath_loc by = (By.XPATH, xpath_loc) element = self.driver.find_element(*by) element = WebDriverWait(self.driver, 180).until(ec.text_to_be_present_in_element_value(element, value)) # def wait_for_other_hosts_active(self, num_host, status): # is_all_active = False # sec = 0 # max_wait_time = 120 # while is_all_active: # for x in range(1,num_host): # xpath_loc = InfraHostsLocators.InfraHostsLocators.STATUS_LABEL_PREFIX + x + InfraHostsLocators.InfraHostsLocators.STATUS_LABEL_SUFFIX # print xpath_loc # element = self.driver.find_element(xpath_loc) # cur_txt = element.text # try: # assert cur_txt, status # is_cur_active = True # except Exception as e: # print e.message # is_cur_active = False # # if check_all_active(collection): # is_all_active = True # else: # time.sleep(1) # sec += 1 # # if sec > max_wait_time: # self.fail("time out") # break def is_no_host_text_found(self): try: element = self.driver.find_element(*InfraHostsLocators.InfraHostsLocators.NO_HOST_FOUND_TEXT) element.is_displayed() print "No Host Found" return True except Exception as e: print "No Host not Found" return False def click_menu(self): status = self.get_host_status_element() hidden_menu = self.driver.find_element(*InfraHostsLocators.InfraHostsLocators.CLICK_MENU) actions = ActionChains(self.driver) actions.move_to_element(status) actions.click(hidden_menu) actions.perform() def click_deactivate_link(self): element = self.driver.find_element(*InfraHostsLocators.InfraHostsLocators.DEACTIVATE_LINK) element.click() def click_delete_link(self): element = self.driver.find_element(*InfraHostsLocators.InfraHostsLocators.DELETE_LINK) element.click() def click_purge_link(self): element = self.driver.find_element(*InfraHostsLocators.InfraHostsLocators.PURGE_LINK) element.click() def click_delete_btn(self): element = self.driver.find_element(*InfraHostsLocators.InfraHostsLocators.DELETE_BTN) element.click() def _host_delete(self): print "Inside _host_delete" host_status = self.get_host_status_text() print host_status time.sleep(2) if host_status == self.STATUS_CREATING: self.click_menu() self.click_delete_link() time.sleep(2) elif host_status == self.STATUS_BOOTSTRAPPING: self.click_menu() self.click_delete_link() time.sleep(2) elif host_status == self.STATUS_ERROR: self.click_menu() time.sleep(2) self.click_delete_link() time.sleep(2) elif host_status == self.STATUS_ACTIVE: self.click_menu() time.sleep(2) self.click_deactivate_link() time.sleep(2) self.click_menu() self.click_delete_link() time.sleep(2) elif host_status == self.STATUS_INACTIVE: self.click_menu() self.click_delete_link() time.sleep(2) elif host_status == self.STATUS_REMOVED: self.click_menu() self.click_purge_link() time.sleep(2) if host_status != self.STATUS_REMOVED: self.click_delete_btn() time.sleep(2) def host_delete(self): print "Inside host_delete" sec = 0 is_no_host = self.is_no_host_text_found() print is_no_host while is_no_host is not True: print "Calling _host_delete" self._host_delete() time.sleep(2) print "end of host_delete" is_no_host = self.is_no_host_text_found() print "Host status now: %s" % is_no_host
{ "repo_name": "aruneli/rancher-test", "path": "ui-selenium-tests/pages/InfraHostsPage.py", "copies": "1", "size": "6392", "license": "apache-2.0", "hash": 562739080124592830, "line_mean": 35.5257142857, "line_max": 151, "alpha_frac": 0.5980913642, "autogenerated": false, "ratio": 3.6588437321121923, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.47569350963121926, "avg_score": null, "num_lines": null }
__author__ = 'Arunkumar Eli' __email__ = "elrarun@gmail.com" from selenium.webdriver.common.by import By class DigitalOceanLocators(object): ACCESS_KEY_INPUT = (By.ID, 'accessKey') SECRET_KEY_INPUT = (By.ID, 'secretKey') NEXT_BTN = (By.CSS_SELECTOR, "button.btn.btn-primary") AVAILABILITY_ZONE = (By.XPATH, "//section[3]/div/div/span") ZONE_SELECT = (By.ID, "selectedZone") VPC_RADIO_BTN = (By.XPATH, "//div[3]/div[2]/div/label") SUBNET_RADIO_BTN = (By.XPATH, "//div[2]/label") SECURITY_GROUP = (By.XPATH, "///section[5]/div/div/span") INSTANCE = (By.XPATH, "//section[7]/div/div/span") ACCOUNT_ACCESS = (By.XPATH, "//section/div/div/span") STD_RADIO_BTN=(By.XPATH,"//section[5]/div[1]/div[2]/div[2]/div[1]/label/input") CUSTOM_RADIO_BTN=(By.XPATH,"//section[5]/div[1]/div[2]/div[2]/div[2]/label/input") SET_INSTANCE_OPTION_BTN = (By.XPATH, "//div[2]/button") SLIDE_BAR_CLICK_3 = (By.XPATH, "//div[2]/div[3]/div") HOST_NAME_INPUT = (By.ID, "prefix") HOST_DESC_INPUT = (By.ID, "description") HOST_INSTANCE_TYPE_SELECT = (By.ID, "instanceType") HOST_MEM_SIZE_INPUT = (By.ID, "rootSize") HOST_CREATE_BTN = (By.XPATH, "//div[2]/button")
{ "repo_name": "aruneli/rancher-test", "path": "ui-selenium-tests/locators/PacketLocators.py", "copies": "2", "size": "1212", "license": "apache-2.0", "hash": -3461568642314811400, "line_mean": 45.6153846154, "line_max": 86, "alpha_frac": 0.6278877888, "autogenerated": false, "ratio": 2.5041322314049586, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.41320200202049584, "avg_score": null, "num_lines": null }
__author__ = 'Arunkumar Eli' __email__ = "elrarun@gmail.com" from selenium.webdriver.common.by import By class Ec2Locators(object): ACCESS_KEY_INPUT = (By.ID, 'accessKey') SECRET_KEY_INPUT = (By.ID, 'secretKey') NEXT_BTN = (By.CSS_SELECTOR, "button.btn.btn-primary") AVAILABILITY_ZONE = (By.XPATH, "//section[3]/div/div/span") ZONE_SELECT = (By.ID, "selectedZone") VPC_RADIO_BTN = (By.XPATH, "//div[3]/div[2]/div/label") SUBNET_RADIO_BTN = (By.XPATH, "//div[2]/label") SECURITY_GROUP = (By.XPATH, "///section[5]/div/div/span") INSTANCE = (By.XPATH, "//section[7]/div/div/span") ACCOUNT_ACCESS = (By.XPATH, "//section/div/div/span") STD_RADIO_BTN=(By.XPATH,"//section[5]/div[1]/div[2]/div[2]/div[1]/label/input") CUSTOM_RADIO_BTN=(By.XPATH,"//section[5]/div[1]/div[2]/div[2]/div[2]/label/input") SET_INSTANCE_OPTION_BTN = (By.XPATH, "//div[2]/button") SLIDE_BAR_CLICK_3 = (By.XPATH, "//div[2]/div[3]/div") HOST_NAME_INPUT = (By.ID, "prefix") HOST_DESC_INPUT = (By.ID, "description") HOST_INSTANCE_TYPE_SELECT = (By.ID, "instanceType") HOST_MEM_SIZE_INPUT = (By.ID, "rootSize") HOST_CREATE_BTN = (By.XPATH, "//div[2]/button")
{ "repo_name": "aruneli/rancher-test", "path": "ui-selenium-tests/locators/AmazonEc2Locators.py", "copies": "1", "size": "1203", "license": "apache-2.0", "hash": -4483984712312975400, "line_mean": 45.2692307692, "line_max": 86, "alpha_frac": 0.6251039069, "autogenerated": false, "ratio": 2.4855371900826446, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.3610641096982644, "avg_score": null, "num_lines": null }
__author__ = 'arun' # Echo client program import socket HOST = '192.168.1.243' #'192.168.1.243' # The remote host PORT = 60007 # The same port as used by the server # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) for x in range(0, 130000): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print "\n\n" print "s.family: ", s.family print "s.type: ", s.type print "Hello I am akkas from client." print "s.proto: ", s.proto print "s.fileno: ", s.fileno s.connect((HOST, PORT)) s.sendall('Hello, world ') data = s.recv(1024) print 'Received', repr(data) , x s.close() # __author__ = 'arun' # # Echo client program # import socket # # HOST = '0.0.0.0.0.0.0.1' # The remote host # PORT = 50007 # The same port as used by the server # s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) # try: # s.connect((HOST, PORT)) # print "Connected." # s.sendall('Hello, world') # data = s.recv(1024) # s.close() # print 'Received', repr(data) # except Exception: # print "There is an error. " # HOST = "0.0.0.0" # PORT = 50007 # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # s.connect((HOST, PORT)) # print "Connected." # s.sendall('Hello, world') # data = s.recv(1024) # s.close() # print 'Received', repr(data) # # Echo client program # import socket # import sys # # HOST = '0:0:0:0' # The remote host # PORT = 50007 # The same port as used by the server # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # #s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) # print "\n\n" # print "s.family: ", s.family # print "s.type: ", s.type # print "s.proto: ", s.proto # print "s.fileno: ", s.fileno # s.connect((HOST, PORT)) # # # # s.sendall('Hello, world') # data = s.recv(1024) # s.close() # print 'Received', repr(data) # # HOST = "0:0:0:0:0:0:0:1" # PORT = 50007 # for res in socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC, socket.SOCK_STREAM): # af, socktype, proto, canonname, sa = res # print "af : ", af # print "socket.AF_INET6 : ", socket.AF_INET6 # print "socktype : ", socktype # print "proto : ", proto # try: # s = socket.socket(af, socktype, proto) # except socket.error as msg: # s = None # continue # try: # s.connect(sa) # except socket.error as msg: # print "msg : ", msg # s.close() # s = None # continue # break # if s is None: # print 'could not open socket' # sys.exit(1) # s.sendall('Hello, world') # data = s.recv(1024) # s.close() # print 'Received', repr(data)
{ "repo_name": "IPVL/Arun-SocketExample", "path": "ipv4_client.py", "copies": "1", "size": "2715", "license": "mit", "hash": 4873359001793046000, "line_mean": 19.4210526316, "line_max": 82, "alpha_frac": 0.5745856354, "autogenerated": false, "ratio": 2.825182101977107, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.3899767737377107, "avg_score": null, "num_lines": null }
__author__ = 'arun' # Echo server program import socket HOST = '192.168.1.243' # Symbolic name meaning all available interfaces PORT = 60007 # Arbitrary non-privileged port s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((HOST, PORT)) s.listen(0) print "\n\n" print "s.family: ", s.family print "Hello I am arun from server." print "s.type: ", s.type print "s.proto: ", s.proto print "s.fileno: ", s.fileno print "\n" x = 1 while 1: conn, addr = s.accept() print 'Connected by', addr print "conn : ", conn x = x + 1 print "server ", x #while 1: data = conn.recv(1024) if not data: break conn.sendall(data) # Echo server program # import socket # import sys # HOST = '0.0.0.0' # Symbolic name meaning all available interfaces # PORT = 50007 # Arbitrary non-privileged port # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # s.bind((HOST, PORT)) # s.listen(1) # # print "\n\n" # print "s.family: ", s.family # print "s.type: ", s.type # print "s.proto: ", s.proto # print "s.fileno: ", s.fileno # # print "\n" # conn, addr = s.accept() # print 'Connected by', addr # print "conn : ", conn # # while 1: # data = conn.recv(1024) # if not data: break # conn.sendall(data) # conn.close() # HOST = '' # Symbolic name meaning all available interfaces # PORT = 50007 # Arbitrary non-privileged port # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # print "s.family: ", s.family # print "s.type: ", s.type # print "s.proto: ", s.proto # print "s.fileno: ", s.fileno # s.bind((HOST, PORT)) # s.listen(1) # HOST = None # Symbolic name meaning all available interfaces # PORT = 50007 # Arbitrary non-privileged port # s = None # for res in socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC, # socket.SOCK_STREAM, 0, socket.AI_PASSIVE): # af, socktype, proto, canonname, sa = res # print "af : ", af # print "socktype: ", socktype # print "proto : ", proto # print "canonname : ", canonname # # try: # s = socket.socket(af, socktype, proto) # except socket.error as msg: # s = None # continue # try: # s.bind(sa) # s.listen(1) # except socket.error as msg: # s.close() # s = None # continue # break # if s is None: # print 'could not open socket' # sys.exit(1) # conn, addr = s.accept() # print 'Connected by', addr # while 1: # data = conn.recv(1024) # if not data: break # conn.send(data) # conn.close()
{ "repo_name": "IPVL/Arun-SocketExample", "path": "ipv4_server.py", "copies": "1", "size": "2709", "license": "mit", "hash": -7034888843410651000, "line_mean": 22.7719298246, "line_max": 87, "alpha_frac": 0.584717608, "autogenerated": false, "ratio": 3.030201342281879, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4114918950281879, "avg_score": null, "num_lines": null }
__author__ = 'arun' # Echo server program import socket import sys HOST = None # Symbolic name meaning all available interfaces PORT = 50007 # Arbitrary non-privileged port s = None #address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM) if addr[0] in (socket.AF_INET, socket.AF_INET6)][0] for res in socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE): af, socktype, proto, canonname, sa = res try: print "Server " print "af : ", af s = socket.socket(socket.AF_INET6, socktype, proto) except socket.error as msg: s = None continue try: s.bind(sa) s.listen(1) except socket.error as msg: s.close() s = None continue break if s is None: print 'could not open socket' sys.exit(1) conn, addr = s.accept() print 'Connected by', addr while 1: data = conn.recv(1024) if not data: break conn.send(data) conn.close()
{ "repo_name": "IPVL/Arun-SocketExample", "path": "ipv6_server.py", "copies": "1", "size": "1097", "license": "mit", "hash": 5198471251142665000, "line_mean": 28.6756756757, "line_max": 175, "alpha_frac": 0.6061987238, "autogenerated": false, "ratio": 3.428125, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9515651395345063, "avg_score": 0.00373446569098743, "num_lines": 37 }
""" First-order integrator using waveform relaxation. TODO: Implement the waveform relaxation. class RelaxationIntegrator --------- A subclass of the SciPy ODE class. """ from scipy.integrate import _ode from scipy.integrate import ode def runner(_, f, y0, t0, t1, rtol, atol, solout, nsteps, verbosity, f_params): r = ode(f).set_integrator('dopri5', rtol=rtol, atol=atol, nsteps=nsteps, verbosity=verbosity) r.set_solout(solout) r.set_f_params(*f_params) r.set_initial_value(y0, t0) r.integrate(t1) assert r.successful() return r.t, r.y class RelaxationIntegrator(_ode.IntegratorBase): """Integrator capable of handling discontinuous, coupled equations.""" runner = runner # TODO: Switch to WR runner. name = 'relax' supports_solout = True def __init__(self, rtol=1e-6, atol=1e-12, nsteps=500, method=None, verbosity=1, ): self.rtol = rtol self.atol = atol self.nsteps = nsteps self.verbosity = verbosity self.success = True self.set_solout(None) def set_solout(self, solout, complex=False): self._solout = solout def reset(self, n, has_jac): self.call_args = [self.rtol, self.atol, self._solout, self.nsteps, self.verbosity] self.success = True def run(self, f, jac, y0, t0, t1, f_params, jac_params): args = ((f, y0, t0, t1) + tuple(self.call_args) + (f_params,)) try: t, y = self.runner(*args) except ValueError as e: print("Something went wrong with integration!") self.success = False raise return y, t if RelaxationIntegrator.runner is not None: _ode.IntegratorBase.integrator_classes.append(RelaxationIntegrator)
{ "repo_name": "aryamccarthy/gapjunctions", "path": "gapjunctions/ode.py", "copies": "1", "size": "1917", "license": "mit", "hash": 6230435410882319000, "line_mean": 27.6119402985, "line_max": 78, "alpha_frac": 0.5946791862, "autogenerated": false, "ratio": 3.4171122994652405, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.451179148566524, "avg_score": null, "num_lines": null }
__author__ = 'asafvaladarsky' from lxml import etree from os import walk, sep, path from logging import info, warning def main(mainFolderName,schemaFileName, shouldRecursiveSearch): with open(schemaFileName) as schemaFile: schemaText = schemaFile.read() xmlschema = etree.XMLSchema(etree.parse(schemaFileName)) filesList = [] recursiveSearch = True if shouldRecursiveSearch != "0" else False for root, dirs, files in walk(mainFolderName): for file_name in files: if file_name.lower().endswith(".xml"): #with open(file_name) as xmlFile: filesList.append(path.join(root, file_name)) if not recursiveSearch: print "breaking" break for fileName in filesList: doc = etree.parse(fileName) xmlschema.assertValid(doc) if not xmlschema.validate(doc): warning('File %s does not pass validation %s' % (file_name,schemaFileName)) else: info("File %s passed validation: %s" % (file_name,schemaFileName)) if __name__ == "__main__": from sys import argv main(argv[1], argv[2], argv[3])
{ "repo_name": "hasadna/OpenPress", "path": "engine/sitemap-generator/xml_validator.py", "copies": "1", "size": "1157", "license": "mit", "hash": 4078366218172621000, "line_mean": 31.1666666667, "line_max": 87, "alpha_frac": 0.6352636128, "autogenerated": false, "ratio": 3.922033898305085, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0036631554392630348, "num_lines": 36 }
import threading import SocketServer import socket from xbmc import Monitor from resources.lib.kodihelper import KodiHelper from resources.lib.WidevineHTTPRequestHandler import WidevineHTTPRequestHandler # helper function to select an unused port on the host machine def select_unused_port(): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('127.0.0.1', 0)) addr, port = sock.getsockname() sock.close() return port helper = KodiHelper() # pick & store a port for the proxy service wv_proxy_port = select_unused_port() helper.set_setting('wv_proxy_port', str(wv_proxy_port)) helper.log('Port {0} selected'.format(str(wv_proxy_port))) # server defaults SocketServer.TCPServer.allow_reuse_address = True # configure the proxy server wv_proxy = SocketServer.TCPServer(('127.0.0.1', wv_proxy_port), WidevineHTTPRequestHandler) wv_proxy.server_activate() wv_proxy.timeout = 1 if __name__ == '__main__': monitor = Monitor() # start thread for proxy server proxy_thread = threading.Thread(target=wv_proxy.serve_forever) proxy_thread.daemon = True proxy_thread.start() # kill the services if kodi monitor tells us to while not monitor.abortRequested(): if monitor.waitForAbort(5): wv_proxy.shutdown() break # wv-proxy service shutdown sequence wv_proxy.server_close() wv_proxy.socket.close() wv_proxy.shutdown() helper.log('wv-proxy stopped')
{ "repo_name": "emilsvennesson/kodi-cmore", "path": "service.py", "copies": "1", "size": "1563", "license": "mit", "hash": 5214167549105159000, "line_mean": 29.0576923077, "line_max": 91, "alpha_frac": 0.7146513116, "autogenerated": false, "ratio": 3.4656319290465634, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.967116923592219, "avg_score": 0.0018228009448745237, "num_lines": 52 }
__author__ = 'Aseem' __name__ = '0001-0050' #Most of these are in functions directory import calendar import combinatorics import common import files import lcm import math import primes import series import sys import utils_ab from numbers_ab import * from fractions import Fraction from itertools import count, islice, permutations, product RESOURCES = 'Resources' def prob_001(): return series.sum_multiples_upto((3, 5), 1000) def prob_002(): return sum(b for b in series.fibonacci(1, 2, 4000000) if b % 2 == 0) def prob_003(): return primes.largest_prime_factor(600851475143) def prob_004(): largest = 0 for i in range(100, 1000): for j in range(i + 1, 1000): num = i * j if num > largest and is_palindrome(num): largest = num return largest def prob_005(): return lcm.lcm_of_range(1, 21) def prob_006(): return series.sum_numbers(100) ** 2 - series.sum_squares(100) def prob_007(): return primes.nth_prime(10001) def prob_008(): number_str = ''.join(files.get_lines(RESOURCES, '008.txt')) consecutive = 5 return max(product_digits(number_str[i: i + consecutive]) for i in range(len(number_str) - consecutive)) def prob_009(): for c in range(1, 997): c_square = c * c for b in range(1, c): a = 1000 - b - c if a * a + b * b == c_square and b > a > 0: return a * b * c def prob_010(): return sum(primes.primes_list(2000000)) def prob_012(): for num in count(1): cur_tri_num = triangle_num(num) div = num_of_divisors(cur_tri_num) if div > 500: return cur_tri_num def prob_013(): total = sum(int(line) for line in files.get_lines(RESOURCES, '013.txt')) return str(total)[:10] def prob_014(): recursion_limit = sys.getrecursionlimit() sys.setrecursionlimit(recursion_limit * 10) longest = 1 longest_num = 1 for i in range(1, 1000000): current = series.len_collatz(i) if current > longest: longest = current longest_num = i sys.setrecursionlimit(recursion_limit) return longest_num def prob_015(): return combinatorics.combinations(40, 20) def prob_016(): num = str(2 ** 1000) return sum(int(i) for i in num) def prob_018(): return common.max_path_sum_tri_file(RESOURCES, "018.txt") def prob_019(): ans = 0 for month in range(1, 13): for year in range(1901, 2001): if calendar.monthrange(year, month)[0] == 6: ans += 1 return ans def prob_020(): return sum(int(i) for i in str(math.factorial(100))) def prob_021(): #TODO Refactor Maybe sums = [sum_proper_divisors(i) for i in range(10000)] total = 0 for i in range(10000): b = sums[i] if b >= 10000 or b == i: continue if sums[b] == i: total += i return total def prob_022(): names = files.get_line(RESOURCES, '022.txt', split_option=',') names.sort() scores = 0 for i in range(len(names)): score = 0 for c in names[i][1:-1]: score += ord(c) - ord('A') + 1 scores += (score * (i + 1)) return scores def prob_023(): listt = [] total = 0 for i in range(1, 28124): listt.append(i < sum_proper_divisors(i)) for j in range(1, i): if listt[j - 1] and listt[i - j - 1]: break else: total += i return total def prob_024(): for cur_per in islice(permutations("0123456789"), 999999, 999999 + 1): return utils_ab.iterable_to_int(cur_per) def prob_025(): for b, i in zip(series.fibonacci(1, 1), count(2)): if len(str(b)) > 999: return i def prob_028(): #To do this I derived the formula for sum of corners in n x n spiral #It's easy to find the pattern if you consider the outer ring only in spiral def sum_of_outer_ring(n): return 4*(n*n) - 6 * (n - 1) return 1 + sum(sum_of_outer_ring(i) for i in range(3, 1002, 2)) def prob_029(): return len({pow(a, b) for a in range(2, 101) for b in range(2, 101)} ) def prob_033(): ans = Fraction(1, 1) for num, den in product(range(10, 100), repeat=2): if num >= den: continue num_list = [i for i in str(num)] den_list = [i for i in str(den)] temp_list = [i for i in num_list if i in den_list] if len(temp_list) != 1 or '0' in temp_list: continue num_list.remove(temp_list[0]) den_list.remove(temp_list[0]) num2 = int(num_list[0]) den2 = int(den_list[0]) if not(num2 and den2): continue if Fraction(num, den) == Fraction(num2, den2): ans *= Fraction(num, den) return ans.denominator def prob_035(): return(4 + sum(primes.is_circular_prime(i, len(str(i))) for i in range(11, 1000000, 2))) def prob_036(): return sum(i for i in range(1, 1000000) if is_palindrome(str(i)) and is_palindrome(bin(i)[2:])) def prob_039(): maximum = 0 maximum_p = 3 for param in range(3, 1001): cur = 0 for a in range(1, param-1): #This equation can be obtained by solving the two given equations b = (param * (param - 2 * a))/(2.0 * (param - a)) if b != int(b): continue c = param - a - b if c <= 0: break if a*a + b*b == c*c: cur += 1 else: if cur > maximum: maximum = cur maximum_p = param return maximum_p def prob_040(): s = '' for i in count(1): s += str(i) if len(s) >= 1000000: break total = 1 for i in range(7): total *= int(s[10 ** i - 1]) return total def prob_041(): largest = 0 for i in range(1, 9): for j in permutations(range(1, i + 1)): temp = utils_ab.iterable_to_int(j) if temp > largest and primes.is_prime(temp): largest = temp return largest def prob_042(): words = files.get_line(RESOURCES, "042.txt", split_option=',') triangles = 0 for word in words: value = sum((ord(c) - ord('A') + 1) for c in word[1:-1]) if is_triangle_num(value): triangles += 1 return triangles def prob_043(): from utils_ab import iterable_to_int as converter total = 0 divisors = [2, 3, 5, 7, 11, 13, 17] for cur_per in permutations(range(10)): if cur_per[0] == 0: continue for j in range(7, 0, -1): if converter(cur_per[j:j + 3]) % divisors[j - 1] != 0: break else: total += converter(cur_per) return total def prob_045(): for i in count(286): tri_num = triangle_num(i) if is_pentagonal_num(tri_num) and is_hexagonal_num(tri_num): return tri_num def prob_046(): #TODO Refactor for i in count(9, 2): if primes.is_prime(i): continue for j in count(1): temp = i - 2 * j ** 2 if temp < 0: return i if primes.is_prime(temp): break def prob_047(): nums = 0 for i in count(1): if primes.num_distinct_prime_factors(i) == 4: nums += 1 if nums == 4: return i - 3 else: nums = 0 def prob_048(): return str(sum(i ** i for i in range(1, 1001)))[-10:] if __name__ == "0001-0050": common.run_all(__name__)
{ "repo_name": "anshbansal/general", "path": "Python3/project_euler/0001-0050.py", "copies": "1", "size": "7832", "license": "mit", "hash": -7301582526232317000, "line_mean": 21.4441260745, "line_max": 80, "alpha_frac": 0.5301327886, "autogenerated": false, "ratio": 3.3256900212314227, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4355822809831423, "avg_score": null, "num_lines": null }
__author__ = 'Aseem' import combinatorics import common import files import math import numbers_ab from itertools import count, product RESOURCES = 'Resources' def prob_052(): def set_of_digits(cur_num): return set(str(cur_num)) for num in count(1): if all(set_of_digits(j * num) == set_of_digits(num) for j in range(2, 7)): return num def prob_053(): return sum(combinatorics.combinations(n, r) > 1000000 for n in range(1, 101) for r in range(n + 1)) def prob_055(): total = 0 for num in range(1, 10000): for _ in range(1, 50): num += numbers_ab.rev_num(num) if numbers_ab.is_palindrome(num): break else: total += 1 return total def prob_056(): sum_t = 0 for a, b in product(range(1, 100), repeat=2): cur = sum(int(i) for i in str(a ** b)) if cur > sum_t: sum_t = cur return sum_t def prob_067(): return common.max_path_sum_tri_file(RESOURCES, "067.txt") def prob_081(): #TODO Refactor Maybe """"To understand how this works just make a (4,4) matrix""" size_matrix = 80 mat = files.read_int_from_lines(RESOURCES, "081.txt", split_option=',') for i in range(1, size_matrix): for j in range(i + 1): if j == 0: mat[i][j] += mat[i - 1][j] mat[j][i] += mat[j][i - 1] else: mat[i][j] += min(mat[i][j - 1], mat[i - 1][j]) if i != j: mat[j][i] += min(mat[j - 1][i], mat[j][i - 1]) return mat[size_matrix - 1][size_matrix - 1] def prob_097(): return (28433 * pow(2, 7830457, 10 ** 10)) % (10 ** 10) + 1 def prob_099(): largest = 0 for i, line in zip(count(1), files.get_lines(RESOURCES, "099.txt", split_option=',')): a, b = map(int, line) current = b * math.log(a) if current > largest: largest = current line_largest = i return line_largest if __name__ == "__main__": common.run_all(__name__)
{ "repo_name": "anshbansal/general", "path": "Python3/project_euler/0051-0100.py", "copies": "1", "size": "2113", "license": "mit", "hash": 7314602589948776, "line_mean": 22.4888888889, "line_max": 90, "alpha_frac": 0.517274018, "autogenerated": false, "ratio": 3.153731343283582, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.41710053612835823, "avg_score": null, "num_lines": null }
__author__ = 'Aseem' import files import sys import time RULER = "=====" def _accumulate(row, sums): if sums is None: return row return ([row[0] + sums[0]] + [row[i] + max(sums[i - 1], sums[i]) for i in range(1, len(row) - 1)] + [row[-1] + sums[-1]]) def max_path_sum_triangle(rows): sums = None for row in rows: sums = _accumulate(row, sums) return max(sums) if sums else None def max_path_sum_tri_file(path, file_name): return max_path_sum_triangle(files.read_int_from_lines(path, file_name, split_option=' ')) def run_all(module_name): cur_module = __import__(module_name) list_functions = [i for i in dir(sys.modules[module_name]) if i.startswith('prob_') is True] list_answers = {i[0]: i[1] for i in files.get_lines('Resources', 'answers.txt', split_option='^')} for func_name in list_functions: prob_num = func_name.lstrip("prob_0") timm_t = time.time() ans = str(getattr(cur_module, func_name)()) temp_time = time.time() - timm_t if ans != list_answers[prob_num]: print("PROBLEM IN " + prob_num + " ANSWER = " + ans) elif temp_time > 1.0: print(RULER + RULER + func_name + RULER + RULER + str(temp_time))
{ "repo_name": "anshbansal/general", "path": "Python3/project_euler/common.py", "copies": "1", "size": "1275", "license": "mit", "hash": 7647962960652256000, "line_mean": 29.380952381, "line_max": 102, "alpha_frac": 0.5819607843, "autogenerated": false, "ratio": 2.9859484777517564, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.40679092620517565, "avg_score": null, "num_lines": null }
__author__ = 'Aseem' import itertools import math import numbers_ab def is_prime(num): """Checks whether a number is prime or not""" if num == 2: return True if num % 2 == 0 or num < 2: return False temp = int(math.sqrt(num)) + 1 for i in range(3, temp, 2): if num % i == 0: return False return True def largest_prime_factor(num): """Returns the largest prime factor for num > 0""" ans = 0 sqrt_num = int(math.sqrt(num)) + 1 for i in itertools.chain([2], range(3, sqrt_num, 2)): while not (num % i): ans = i num //= i return ans if num == 1 else num def nth_prime(num): """"Returns the Nth prime""" pos = num fun = primes_list_mem(pos) while len(fun) < num: pos *= 2 fun = primes_list(pos) return fun[num - 1] def primes_list(num): """Returns list of prime numbers""" if num < 2: return [] isprime = [num for num in range(3, num + 1, 2)] temp2 = int(math.sqrt(num)) + 1 for i in range(3, temp2, 2): if isprime[(i - 3) // 2]: j = 3 temp = j * i while temp <= num: isprime[(temp - 3) // 2] = 0 j += 2 temp = j * i return [2] + [x for x in isprime if x] def primes_list_mem(num, isprime=[3]): """Returns list of Prime numbers Advantage: Half memory usage Takes 1/10th time if called in the same run for same or lower number Improvement in speed if it has been called before Disadvantage: Has storage between function calls - Takes up a lot of memory space""" if num < 2: return [] if num > 2 * len(isprime) + 2: def mapping(x): return (x - 3)//2 first_new = 2 * len(isprime) + 3 isprime += [num for num in range(first_new, num + 1, 2)] temp2 = int(math.sqrt(num)) + 1 for i in range(3, first_new, 2): if isprime[mapping(i)]: j = first_new//i if j == 1: j = 3 elif j % 2 == 0: j += 1 temp = j * i while temp <= num: isprime[mapping(temp)] = 0 j += 2 temp = j * i for i in range(first_new, temp2, 2): if isprime[mapping(i)]: j = 3 temp = j * i while temp <= num: isprime[mapping(temp)] = 0 j += 2 temp = j * i return [2] + [x for x in isprime[:(num - 1)//2] if x] def prime_factors(num): sqrt_num = math.floor(math.sqrt(num)) + 1 for i in itertools.chain([2], range(3, sqrt_num, 2)): if num % i: continue while num % i == 0: num //= i yield i if num != 1: yield num def num_distinct_prime_factors(num): """returns the num of distinct prime factors for num > 0""" return sum(1 for _ in prime_factors(num)) def product_of_prime_factors(num): ans = 1 for i in prime_factors(num): ans *= i return ans def is_circular_prime(num, length): for i in str(num): if not (int(i) % 2): return False for check in range(length): if not is_prime(numbers_ab.circular_shift(num, check + 1)): return False return True
{ "repo_name": "anshbansal/general", "path": "Python3/functions/primes.py", "copies": "1", "size": "3464", "license": "mit", "hash": 2872673660500823600, "line_mean": 23.5744680851, "line_max": 74, "alpha_frac": 0.4884526559, "autogenerated": false, "ratio": 3.6045785639958376, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9590667153701984, "avg_score": 0.00047281323877068556, "num_lines": 141 }
__author__ = 'Aseem' import math import series def rev_num(num): if num < 0: return -int(str(-num)[::-1]) else: return int(str(num)[::-1]) def is_palindrome(num): if isinstance(num, str): return num == num[::-1] else: return num == rev_num(num) def get_binary(num): return int(bin(num)[2:]) def triangle_num(n): return series.sum_numbers(n) def is_triangle_num(n): temp = (-1 + math.sqrt(8 * n + 1)) / 2 if temp == int(temp): return True return False def pentagonal_num(n): return (n * (3 * n - 1)) // 2 def is_pentagonal_num(n): temp = (1 + math.sqrt(1 + 24 * n)) / 6 if temp == int(temp): return True return False def hexagonal_num(n): return n * (2 * n - 1) def is_hexagonal_num(n): temp = (1 + math.sqrt(1 + 8 * n)) / 4 if temp == int(temp): return True return False def is_bouncy_num(num): lisst1 = [int(i) for i in str(num)] lisst2 = sorted(lisst1) if (lisst1 == lisst2) or (lisst1 == lisst2[::-1]): return False return True def factorial(num): ans = 1 while num > 1: ans *= num num -= 1 return ans def product_digits(str_num): product = 1 for i in str_num: product *= int(i) return product def circular_shift(num, shift=1): num = str(num) length = len(num) return int(num[length - shift:] + num[:length - shift]) def divisors_of_num(num): if num < 2: if num == 1: yield 1 return temp = math.sqrt(num) counter = temp_i = int(temp) if temp == temp_i: yield temp_i else: counter += 1 for i in range(1, counter): if num % i == 0: yield i yield num // i def num_of_divisors(num): return sum(1 for _ in divisors_of_num(num)) def sum_of_divisors(num): return sum(divisors_of_num(num)) def sum_proper_divisors(num): return sum_of_divisors(num) - num def zeros_in_fact(num): if num < 0: return 0 fives = 0 while num: num //= 5 fives += num return fives def reverse_trunc(num, times=1): """Returns a number by truncating its MSD By default it truncates one digit""" return num % (10 ** (len(str(num)) - times)) def decimal_to_other(num, base): add = num % base if num <= 1: return str(num) else: return str(decimal_to_other(num//base, base)) + str(add)
{ "repo_name": "anshbansal/general", "path": "Python3/functions/numbers_ab.py", "copies": "1", "size": "2487", "license": "mit", "hash": -3116977569618130000, "line_mean": 17.1605839416, "line_max": 82, "alpha_frac": 0.545637314, "autogenerated": false, "ratio": 3.0218712029161603, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.906649854637707, "avg_score": 0.00020199410781813386, "num_lines": 137 }
__author__ = 'Aseem' def prob_031(): #TODO Needs to be refactored ways = 1 for i in range(3): sum_a = i * 100 for j in range(5): sum_b = sum_a + j*50 if sum_b > 200: break for k in range(11): sum_c = sum_b + k*20 if sum_c > 200: break for l in range(21): sum_d = sum_c + l*10 if sum_d > 200: break for m in range(41): sum_e = sum_d + m*5 if sum_e > 200: break for n in range(101): sum_f = sum_e + n*2 if sum_f > 200: break for o in range(201): if sum_f + o == 200: ways += 1 return ways import time s = time.time() print(prob_031()) print(time.time() - s)
{ "repo_name": "anshbansal/general", "path": "Python3/project_euler/001_050/031.py", "copies": "1", "size": "1080", "license": "mit", "hash": 1150659492225715500, "line_mean": 27.4210526316, "line_max": 52, "alpha_frac": 0.3157407407, "autogenerated": false, "ratio": 4.337349397590361, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.515309013829036, "avg_score": null, "num_lines": null }
__author__ = 'ashabou' import argparse import os import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages import pandas as pd from sklearn import preprocessing import numpy as np from pandas import DataFrame as df argument_parser = argparse.ArgumentParser() argument_parser.add_argument("--root-path", dest="root_path", default=".", help="root dataset path") argument_parser.add_argument("--report-path", dest="report_path", default=".", help="report path fot stats") args, unknown = argument_parser.parse_known_args() pdf_dir = os.path.dirname(args.report_path) if not os.path.exists(pdf_dir): os.makedirs(pdf_dir) pdf_pages = PdfPages(args.report_path) #load data data_file_path = os.path.join(args.root_path, "abalone.data") data = pd.read_csv(data_file_path, header=None, prefix="V") data.columns = ['Sex', 'Length', 'Diameter', 'Height', 'Whole weight', 'Shucked weight', 'Viscera weight', 'Shell weight', 'Rings']#last column=label print data.head() print data.tail() print data.describe() nrows, ncols = data.shape print data.shape #show quantiles of variables plt.figure() plt.xlabel("Attribute") plt.ylabel("quantile ranges") plt.title("box plot") plt.boxplot(data.iloc[:,1:].values) pdf_pages.savefig() plt.close() #normalize form plt.figure() plt.xlabel("Attribute") plt.ylabel("quantile ranges - normalized") plt.title("box plot with normalization") scaler = preprocessing.StandardScaler(copy=True, with_mean=True, with_std=True) plt.boxplot(scaler.fit_transform( (data.iloc[:,1:]).values.astype(np.float))) pdf_pages.savefig() plt.close() #parallel plot: non-binary case plt.figure() plt.ylabel("Value") plt.title("parallel plot") mean_label = data.iloc[:,-1].mean() std_label = data.iloc[:,-1].std() for i in xrange(nrows): pcolor = (data.iloc[i,ncols-1] - mean_label)/std_label (data.iloc[i,1:-1]).plot(color = plt.cm.RdYlBu(1/(1+np.exp(-pcolor))),alpha=0.5) pdf_pages.savefig() plt.close() #attributes pearson correlations: plt.figure() plt.xlabel("Attribute") plt.ylabel("Attribute") plt.title("cross correlation (with target also)") plt.imshow(data.iloc[:,1:].corr()) plt.colorbar() pdf_pages.savefig() plt.close() pdf_pages.close()
{ "repo_name": "aymen82/kaggler-competitions-scripts", "path": "dev/abalone/script.py", "copies": "1", "size": "2329", "license": "bsd-3-clause", "hash": 8202563143351316000, "line_mean": 27.0722891566, "line_max": 108, "alpha_frac": 0.6878488622, "autogenerated": false, "ratio": 3.2035763411279228, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4391425203327923, "avg_score": null, "num_lines": null }
__author__ = 'ashabou' from pyspark import SparkContext from pyspark.sql import SQLContext from pyspark.mllib.clustering import KMeans import logging sc = SparkContext(appName="db-creator", master="local[*]") sqc = SQLContext(sc) logger = sc._jvm.org.apache.log4j logger.LogManager.getLogger("INFO").setLevel(logger.Level.ERROR) logger.LogManager.getLogger("org").setLevel(logger.Level.ERROR) logger.LogManager.getLogger("akka").setLevel(logger.Level.ERROR) import argparse import os import random import time import numpy as np random.seed(0) # (datetime.now()) logging.basicConfig() logger = logging.getLogger('CODEBOOK') logger.setLevel(logging.DEBUG) argument_parser = argparse.ArgumentParser(description='Create a codebook') argument_parser.add_argument('--input-root-dir', dest='input_root_dir', type=str, default=None, help='path to input root dir') argument_parser.add_argument('--stats', dest='stats', type=int, default=1, help='stats of codebook') args = argument_parser.parse_args() def error(model, point): center = model.centers[model.predict(point)] return np.sqrt(sum([x ** 2 for x in (point - center)])) if __name__ == '__main__': # check args logger.debug("check dirs...") if not args.input_root_dir: argument_parser.error("missing input root path") logger.info("input root dir= %s" % args.input_root_dir) # check input db input_local_features = os.path.join(args.input_root_dir, "locals", "local", "features.parquet") if not os.path.isdir(input_local_features): raise Exception("missing db parquet directory") # check output dir # logger.debug("Create new codebook dir...") output_dir = os.path.join(args.input_root_dir, 'codebooks', 'codebook') if os.path.isdir(output_dir): new_name = output_dir + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) logger.info("backup old output-dir in %s" % new_name) os.rename(output_dir, os.path.join(args.input_root_dir, new_name)) os.makedirs(output_dir) k=100 features = sqc.read.parquet(input_local_features).map(lambda x: x.features).cache() print features.first() model = KMeans.train(features, k, maxIterations=10, initializationMode="random") WSSSE = features.map(lambda point: error(model, point)).reduce(lambda x, y: x + y) print("Within Set Sum of Squared Error = " + str(WSSSE)) print model.clusterCenters[0] model.save(sc, os.path.join(output_dir, "codebook.model")) sc.stop()
{ "repo_name": "aymen82/SparkImageRecognition", "path": "scripts/create-codebook.py", "copies": "1", "size": "2736", "license": "apache-2.0", "hash": 890162988241444400, "line_mean": 30.8139534884, "line_max": 99, "alpha_frac": 0.6370614035, "autogenerated": false, "ratio": 3.6, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.47370614034999997, "avg_score": null, "num_lines": null }
__author__ = 'ashabou' from pyspark import SparkContext from pyspark.sql import SQLContext, Row, functions from pyspark.mllib.clustering import KMeansModel from pyspark.mllib.linalg import SparseVector import logging sc = SparkContext(appName="db-creator", master="local[*]") sqc = SQLContext(sc) logger = sc._jvm.org.apache.log4j logger.LogManager.getLogger("INFO").setLevel(logger.Level.ERROR) logger.LogManager.getLogger("org").setLevel(logger.Level.ERROR) logger.LogManager.getLogger("akka").setLevel(logger.Level.ERROR) import argparse import os import functools import random import time from scipy.spatial import distance import cv2 import numpy as np random.seed(0) # (datetime.now()) logging.basicConfig() logger = logging.getLogger('FEATURES') logger.setLevel(logging.DEBUG) argument_parser = argparse.ArgumentParser(description='Create a codebook') argument_parser.add_argument('--input-root-dir', dest='input_root_dir', type=str, default=None, help='path to input root dir') argument_parser.add_argument('--stats', dest='stats', type=int, default=1, help='stats of codebook') args = argument_parser.parse_args() def array_to_sparse_vector(ar): dict = {} for i in xrange(len(ar)): if ar[i]!=0: dict.update({i: ar[i]}) return SparseVector(len(ar), dict) def compute_global_feature(row, feature_name, model, pooling): try: img_path = row.filename img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) if feature_name in ["surf", "SURF"]: extractor = cv2.SURF() elif feature_name in ["sift", "SIFT"]: extractor = cv2.SIFT() else: raise Exception("feature %s not yet supported" % feature_name) kp, descriptors = extractor.detectAndCompute(img, None) model = model.value clusterCenters = model.clusterCenters # bow = np.zeros(len(clusterCenters)) bow = np.ones(len(clusterCenters)) return Row(fileName=img_path, cls=row.cls, features=array_to_sparse_vector(bow.tolist())) for x in descriptors: k = model.predict(x) dist = distance.euclidean(clusterCenters[k], x) if pooling == "max": bow[k] = max(bow[k], dist) elif pooling == "sum": bow[k] = bow[k] + dist return Row(fileName=img_path, cls=row.cls, features=array_to_sparse_vector(bow.tolist())) except Exception, e: logging.exception(e) return [] if __name__ == '__main__': # check args logger.debug("check dirs...") if not args.input_root_dir: argument_parser.error("missing input root path") logger.info("input root dir= %s" % args.input_root_dir) # check input codebook input_codebook = os.path.join(args.input_root_dir, "codebooks", "codebook", "codebook.model") if not os.path.isdir(input_codebook): raise Exception("missing codebook model directory") # check input db input_db = os.path.join(args.input_root_dir, "dbs", "db", "out.parquet") if not os.path.isdir(input_db): raise Exception("missing db parquet directory") # check output dir # logger.debug("Create new codebook dir...") output_dir = os.path.join(args.input_root_dir, 'features', 'feature') if os.path.isdir(output_dir): new_name = output_dir + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) logger.info("backup old output-dir in %s" % new_name) os.rename(output_dir, os.path.join(args.input_root_dir, new_name)) os.makedirs(output_dir) model = KMeansModel.load(sc, input_codebook) model = sc.broadcast(model) pooling="max" feature_name = "SIFT" df = sqc.read.parquet(input_db) print df.count() features_bow = df.map(functools.partial(compute_global_feature, feature_name="SURF", model=model, pooling=pooling)) print features_bow.first() print features_bow.count() featuresSchema = sqc.createDataFrame(features_bow) featuresSchema.registerTempTable("features") featuresSchema.write.parquet(os.path.join(output_dir, 'features.parquet')) sc.stop()
{ "repo_name": "aymen82/SparkImageRecognition", "path": "scripts/create-global-features.py", "copies": "1", "size": "4524", "license": "apache-2.0", "hash": 8851838174643259000, "line_mean": 30.2, "line_max": 97, "alpha_frac": 0.6061007958, "autogenerated": false, "ratio": 3.732673267326733, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4838774063126733, "avg_score": null, "num_lines": null }
__author__ = 'ashabou' from pyspark import SparkContext from pyspark.sql import SQLContext, Row from pyspark.ml.classification import LogisticRegression from pyspark.ml.feature import Normalizer, StringIndexer from sklearn.preprocessing import normalize from pyspark.ml import Pipeline from pyspark.ml.evaluation import BinaryClassificationEvaluator from pyspark.mllib.linalg import SparseVector from pyspark.mllib.classification import SVMWithSGD, LogisticRegressionWithSGD,LogisticRegressionWithLBFGS from pyspark.mllib.regression import LabeledPoint from pyspark.mllib.evaluation import BinaryClassificationMetrics import logging sc = SparkContext(appName="db-creator", master="local[*]") sqc = SQLContext(sc) logger = sc._jvm.org.apache.log4j logger.LogManager.getLogger("INFO").setLevel(logger.Level.ERROR) logger.LogManager.getLogger("org").setLevel(logger.Level.ERROR) logger.LogManager.getLogger("akka").setLevel(logger.Level.ERROR) import argparse import os import random import time import numpy as np random.seed(0) # (datetime.now()) logging.basicConfig() logger = logging.getLogger('CODEBOOK') logger.setLevel(logging.DEBUG) argument_parser = argparse.ArgumentParser(description='Create a codebook') argument_parser.add_argument('--input-root-dir', dest='input_root_dir', type=str, default=None, help='path to input root dir') argument_parser.add_argument('--stats', dest='stats', type=int, default=1, help='stats of codebook') args = argument_parser.parse_args() def array_to_sparse_vector(ar): dict = {} for i in xrange(len(ar)): if ar[i]!=0: dict.update({i: ar[i]}) return SparseVector(len(ar), dict) def normalizer(row): ar = row.features.toArray() return Row(cls=str(row.cls), features=array_to_sparse_vector(normalize(np.array(ar).reshape(-1, 1)))) if __name__ == '__main__': # check args logger.debug("check dirs...") if not args.input_root_dir: argument_parser.error("missing input root path") logger.info("input root dir= %s" % args.input_root_dir) # check input features input_features = os.path.join(args.input_root_dir, "features", "feature", "features.parquet") if not os.path.isdir(input_features): raise Exception("missing features") # check output dir # logger.debug("Create new training model...") output_dir = os.path.join(args.input_root_dir, 'models', 'model') if os.path.isdir(output_dir): new_name = output_dir + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) logger.info("backup old output-dir in %s" % new_name) os.rename(output_dir, os.path.join(args.input_root_dir, new_name)) os.makedirs(output_dir) features = sqc.read.parquet(input_features) features = features.filter(features['cls']!='None')\ .select(['cls', 'features'])\ .cache() print features features = sqc.createDataFrame(features.map(normalizer)) print features training, valid = features.randomSplit([0.75, 0.25]) labelIndexer = StringIndexer(inputCol="cls", outputCol="label") model = labelIndexer.fit(training) training = model.transform(training).rdd.map(lambda row: LabeledPoint(row.label, row.features)) valid = model.transform(valid).rdd.map(lambda row: LabeledPoint(row.label, row.features)) print training.first() #lr = LogisticRegression() #pipeline = Pipeline(stages=[labelIndexer,lr]) # fit model = LogisticRegressionWithLBFGS.train(training, numClasses=10) #model = pipeline.fit(training) # ecaluate #evaluator = BinaryClassificationEvaluator(metricName="areaUnderROC") # predict on training #predictions = model.predict(training) predictionAndLabels = training.map(lambda lp: (float(model.predict(lp.features)), lp.label)) # Instantiate metrics object metrics = BinaryClassificationMetrics(predictionAndLabels) # Area under precision-recall curve print("Area under PR = %s" % metrics.areaUnderPR) #auroc = evaluator.evaluate(predictions) #print auroc # predict on test #predictions = model.transform(valid) #auroc = evaluator.evaluate(predictions) #print auroc predictionAndLabels = valid.map(lambda lp: (float(model.predict(lp.features)), lp.label)) # Instantiate metrics object metrics = BinaryClassificationMetrics(predictionAndLabels) # Area under precision-recall curve print("Area under PR = %s" % metrics.areaUnderPR) sc.stop()
{ "repo_name": "aymen82/SparkImageRecognition", "path": "scripts/train.py", "copies": "1", "size": "4756", "license": "apache-2.0", "hash": -1144050385470290200, "line_mean": 30.9194630872, "line_max": 107, "alpha_frac": 0.6759882254, "autogenerated": false, "ratio": 3.9015586546349468, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0077237090359981815, "num_lines": 149 }
__author__ = 'ashabou' from pyspark import SparkContext from pyspark.sql import SQLContext, Row import logging sc = SparkContext(appName="db-creator", master="local[*]") sqc = SQLContext(sc) logger = sc._jvm.org.apache.log4j logger.LogManager.getLogger("INFO").setLevel(logger.Level.ERROR) logger.LogManager.getLogger("org").setLevel(logger.Level.ERROR) logger.LogManager.getLogger("akka").setLevel(logger.Level.ERROR) import argparse import os import glob import time logging.basicConfig() logger = logging.getLogger('DATASET') logger.setLevel(logging.DEBUG) argument_parser = argparse.ArgumentParser(description='Create a data set frame from directories') argument_parser.add_argument('--input-root-dir', dest='input_root_dir', type=str, default=None, help='path to root db dir') argument_parser.add_argument('--max-size', dest='max_size', type=int, default=None, help='max size of one class data set') argument_parser.add_argument('--stats', dest='stats', type=int, default=1, help='stats of the data set') args = argument_parser.parse_args() if __name__ == '__main__': # check args logger.debug("check dirs...") if not args.input_root_dir: argument_parser.error("missing input directory") logger.info("input-root-dir = %s" % args.input_root_dir) # check output dir # logger.debug("Create new dataset dir...") output_dir = os.path.join(args.input_root_dir, "dbs", "db") if os.path.isdir(output_dir): new_name = output_dir + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) logger.info("backup old output-dir in %s" % new_name) os.rename(output_dir, os.path.join(args.input_root_dir, new_name)) os.makedirs(output_dir) # get class names cls_names = [os.path.basename(cls) for cls in glob.glob(os.path.join(args.input_root_dir, 'images/*')) if os.path.isdir(cls)] logger.info("class names = %s" % cls_names) # get training images paths rdd_cls_names = sc.parallelize(cls_names) rdd_cls_images_names = rdd_cls_names.flatMap( lambda cls: [Row(filename=img_path, cls=cls) for img_path in glob.glob(os.path.join(args.input_root_dir, 'images', cls, '*.jpg'))]) rdd_test_images_names = sc.parallelize(['None']).flatMap( lambda cls: [Row(filename=img_path, cls=cls) for img_path in glob.glob(os.path.join(args.input_root_dir, 'test', '*.jpg'))]) df = sqc.createDataFrame(rdd_cls_images_names.union(rdd_test_images_names)) fraction_class_train = 10.0 / df.filter(df.cls == 'c0').count() fraction_test = 100.0 / df.filter(df.cls == 'None').count() print fraction_class_train, fraction_test # select random images df_sampled = df.sampleBy('cls', fractions={'None': fraction_test, 'c0': fraction_class_train, 'c1': fraction_class_train, 'c2': fraction_class_train, 'c3': fraction_class_train, 'c4': fraction_class_train, 'c5': fraction_class_train, 'c6': fraction_class_train, 'c7': fraction_class_train, 'c8': fraction_class_train, 'c9': fraction_class_train}, seed=0) #df_sampled.write.parquet(os.path.join(output_dir, 'out.parquet')) #print df_sampled.count() df.write.parquet(os.path.join(output_dir, 'out.parquet')) print df.count() sc.stop()
{ "repo_name": "aymen82/SparkImageRecognition", "path": "projects/state-farm/create-db.py", "copies": "1", "size": "3889", "license": "apache-2.0", "hash": -3714237832236429300, "line_mean": 35.6886792453, "line_max": 118, "alpha_frac": 0.5690408845, "autogenerated": false, "ratio": 3.732245681381958, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9778326391599681, "avg_score": 0.004592034856455359, "num_lines": 106 }
__author__ = 'ashabou' import argparse import os import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages import numpy as np import re from pyspark import SparkContext from pyspark.sql import SQLContext from pyspark.mllib.feature import Normalizer, StandardScaler from pyspark.mllib.recommendation import ALS, Rating from pyspark.mllib.evaluation import RegressionMetrics, RankingMetrics argument_parser = argparse.ArgumentParser() argument_parser.add_argument("--root-path", dest="root_path", default=".", help="root dataset path") argument_parser.add_argument("--report-path", dest="report_path", default=".", help="report path fot stats") args, unknown = argument_parser.parse_known_args() pdf_dir = os.path.dirname(args.report_path) if not os.path.exists(pdf_dir): os.makedirs(pdf_dir) pdf_pages = PdfPages(args.report_path) sc = SparkContext("local[4]", "ml-100k") #user catalogue ''' user_data_rdd = sc.textFile(os.path.join(args.root_path, "u.user")) print "first raw: ", user_data_rdd.first() print "#users: ", user_data_rdd.count() user_fields_rdd = user_data_rdd.map(lambda line: line.split("|")) print "line split: %s, #fields = %d" %(user_fields_rdd.first(), len(user_fields_rdd.first())) genders_rdd = user_fields_rdd.map(lambda fields: fields[2]).distinct() print "genders = %s, #genders = %d" %(genders_rdd.collect(), genders_rdd.count()) occupations_rdd = user_fields_rdd.map(lambda fields: fields[3]).distinct() print "occupations = %s, #occupations = %d" %(occupations_rdd.collect(), occupations_rdd.count()) zip_codes_rdd = user_fields_rdd.map(lambda fields: fields[4]).distinct() print "#zip_codes = %d" %(zip_codes_rdd.count()) #histogram of ages ages = user_fields_rdd.map(lambda fields: int(fields[1])).collect() plt.figure() plt.hist(ages, bins=20) plt.title("age histogram") pdf_pages.savefig() plt.close() #count occupations count_per_occuptation = user_fields_rdd.map(lambda fields: (fields[3], 1)).reduceByKey(lambda x,y: x+y).collect() plt.figure() plt.bar(range(len(count_per_occuptation)), [c[1] for c in count_per_occuptation]) plt.title("ocuupation histogram 1" ) pdf_pages.savefig() plt.close() #or count_per_occuptation = user_fields_rdd.map(lambda fields: fields[3]).countByValue() plt.figure() plt.bar(range(len(count_per_occuptation.keys())), count_per_occuptation.values()) plt.xticks(range(len(count_per_occuptation.keys())), count_per_occuptation.keys(), rotation=30) plt.title("ocuupation histogram 2") fig = plt.gcf() fig.set_size_inches(16,10) pdf_pages.savefig() plt.close() #encoding categorical data def encoder(value, dict): code = [0]*len(dict) for key,val in dict.items(): if val==value: code[key]=1 return code occupations_dict=dict(occupations_rdd.sortBy(lambda x: x).zipWithIndex().map(lambda (x,y): (y,x)).collect()) print occupations_dict occupations = user_fields_rdd.map(lambda fields: fields[3]).map(lambda x: encoder(x,occupations_dict)) print occupations.first() ''' #rating catalog rating_rdd = sc.textFile(os.path.join(args.root_path, "u.data")) print rating_rdd.first() count_ratings = rating_rdd.count() print "#ratings = %d" %count_ratings rating_fields_rdd = rating_rdd.map(lambda row: row.split("\t")).map(lambda fields: [float(x) for x in fields]) print rating_fields_rdd.first() ''' count_fields = len(rating_fields_rdd.first()) print "#fields = %d" %count_fields for id in xrange(count_fields): col_rdd = rating_fields_rdd.map(lambda fields: fields[id]) mmin = col_rdd.reduce(lambda x,y : min(x,y)) mmax = col_rdd.reduce(lambda x,y : max(x,y)) mmean = col_rdd.reduce(lambda x,y : x+y)/count_ratings mmedian = np.median(col_rdd.collect()) print col_rdd.stats() print "field %d: min=%f, max=%f, mean=%f, median=%f" %(id, mmin, mmax, mmean, mmedian) user_ratings_freq = rating_fields_rdd.map(lambda fields: (int(fields[0]), int(fields[2]))).groupByKey().map(lambda (x,y): (x, len(y))) first = user_ratings_freq.first() print "rating freq of user %d = %d" %(first[0], first[1]) firsts = user_ratings_freq.take(5) print "rating freq of users %s" %firsts ratings = user_ratings_freq.map(lambda (x,y): y).collect() plt.figure() plt.hist(ratings, bins=200) plt.title("ratings histo") pdf_pages.savefig() plt.close() #features scaling (colum based) scaler = StandardScaler(withMean=False, withStd=True).fit(rating_fields_rdd) scaled_fields_rdd = scaler.transform(rating_fields_rdd) print scaled_fields_rdd.first() #vector normalize fetaures normalizer = Normalizer() normalized_fields_rdd = normalizer.transform(scaled_fields_rdd) print normalized_fields_rdd.first() ''' als_ratings_rdd = rating_fields_rdd.map(lambda fields: fields[0:3]).map(lambda fields: Rating(*fields)) print als_ratings_rdd.first() model = ALS.train(als_ratings_rdd, rank=50, iterations=10, lambda_=0.01) print model.userFeatures().count(), model.productFeatures().count() test_rdd = rating_fields_rdd.map(lambda fields: fields[0:2]) predictions_rdd = model.predictAll(test_rdd).map(lambda rating: ((rating.user, rating.product), rating.rating)) print predictions_rdd.first() ratingsAndPredictions_rdd = rating_fields_rdd.map(lambda fields: ((fields[0], fields[1]), fields[2])).join(predictions_rdd) print ratingsAndPredictions_rdd.first() values_rdd = ratingsAndPredictions_rdd.map(lambda (x,y): y) regression_eval = RegressionMetrics(values_rdd) print "error=%f" %(regression_eval.meanSquaredError) user_ratings_for_map_eval_rdd = ratingsAndPredictions_rdd.map(lambda (x,y): (int(x[0]), y)).groupByKey() print user_ratings_for_map_eval_rdd #movie catalog ''' movie_rdd = sc.textFile(os.path.join(args.root_path, "u.item")) print movie_rdd.first() count_movies = movie_rdd.count() print "#movies = %d" %count_movies movie_fields_rdd = movie_rdd.map(lambda line: line.split("|")) print movie_fields_rdd.first() def get_year(field, value=None): try: return int(field.split("-")[-1]) except Exception, e: if value is None: return 1900 else: return value ages_rdd = movie_fields_rdd.map(lambda field: get_year(field[2])).filter(lambda x: x!=1900) ages_hist_dict = ages_rdd.countByValue() plt.figure() plt.bar(range(len(ages_hist_dict.keys())), ages_hist_dict.values()) plt.xticks(range(len(ages_hist_dict.keys())), ages_hist_dict.keys(), rotation=30) plt.title("movies ages histo") fig = plt.gcf() fig.set_size_inches(16,10) pdf_pages.savefig() plt.close() age_median = np.median(ages_rdd.collect()) print age_median ages_sorted_rdd = ages_rdd.sortBy(lambda x: x).zipWithIndex().map(lambda (x,y): (y,x)) count_ages = ages_sorted_rdd.count() rdd_age_median = ages_sorted_rdd.lookup(count_ages/2)[0] print rdd_age_median ages_clean_rdd = movie_fields_rdd.map(lambda field: get_year(field[2])) new_median = np.median(ages_clean_rdd.collect()) print new_median import re def extract_title(raw): grps = re.search("\((\w+)\)", raw) if grps: return raw[:grps.start()].strip() else: return raw titles_rdd = movie_fields_rdd.map(lambda fields: fields[1]).map(lambda raw: extract_title(raw)) print titles_rdd.take(5) titles_tokens_rdd = titles_rdd.map(lambda title: title.split(" ")) terms_rdd = titles_tokens_rdd.flatMap(lambda x: x).distinct() print terms_rdd.take(5), terms_rdd.count() terms_dict = terms_rdd.zipWithIndex().collectAsMap() print terms_dict def BoW(tokens, dict): bow = [0]*len(dict) for t in tokens: bow[dict[t]]=1 return bow terms_dict_bcast = sc.broadcast(terms_dict) bow_rdd = titles_tokens_rdd.map(lambda tokens: BoW(tokens, terms_dict_bcast.value)) code = bow_rdd.first() print len(code), np.where(np.array(code)==1) ''' pdf_pages.close() sc.stop()
{ "repo_name": "aymen82/kaggler-competitions-scripts", "path": "dev/ml-100k/script.py", "copies": "1", "size": "7740", "license": "bsd-3-clause", "hash": 6466805822790444000, "line_mean": 33.5535714286, "line_max": 134, "alpha_frac": 0.7112403101, "autogenerated": false, "ratio": 2.919652961146737, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9093971411530966, "avg_score": 0.007384371943154351, "num_lines": 224 }
__author__ = 'ashabou' import argparse import os import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages import pandas as pd from random import uniform import numpy as np from pandas import DataFrame as df argument_parser = argparse.ArgumentParser() argument_parser.add_argument("--root-path", dest="root_path", default=".", help="root dataset path") argument_parser.add_argument("--report-path", dest="report_path", default=".", help="report path fot stats") args, unknown = argument_parser.parse_known_args() pdf_dir = os.path.dirname(args.report_path) if not os.path.exists(pdf_dir): os.makedirs(pdf_dir) pdf_pages = PdfPages(args.report_path) #load data data_file_path = os.path.join(args.root_path, "sonar.all-data") data = pd.read_csv(data_file_path, header=None, prefix="V") print data.head() print data.tail() print data.describe() print data.shape #parallel plot: show attributes distributions for the two classes nrows, ncols = data.shape plt.figure() plt.xlabel("Attribute index") plt.ylabel("Attribute value") plt.title("parallel plot") for i in xrange(nrows): datarow = data.iloc[i] pcolor = None if datarow[ncols-1]=='R': pcolor = "red" elif datarow[ncols-1]=="M": pcolor="blue" else: pcolor="black" datarow[:-1].plot(color=pcolor) pdf_pages.savefig() plt.close() #cross-plot: show cross attribute correlations id1=1 id2=2 plt.figure() plt.xlabel("Attribute %d" %id1) plt.ylabel("Attribute %d" %id2) plt.title("cross plot %d,%d --> correlation = %f" %(id1, id2, (data.iloc[:,id1]).corr(data.iloc[:,id2]))) plt.scatter(data.iloc[:,id1], data.iloc[:,id2]) pdf_pages.savefig() plt.close() id2=30 plt.figure() plt.xlabel("Attribute %d" %id1) plt.ylabel("Attribute %d" %id2) plt.title("cross plot %d,%d --> correlation = %f" %(id1, id2, (data.iloc[:,id1]).corr(data.iloc[:,id2]))) plt.scatter(data.iloc[:, id1], data.iloc[:, id2]) pdf_pages.savefig() plt.close() #attribut-target plot: shw correlatuons between attribute and target id=20 plt.scatter(data.iloc[:,id], map(lambda x: 0+uniform(-0.1,0.1) if x=="M" else 1+uniform(-0.1,0.1), data.iloc[:,-1])) pdf_pages.savefig() plt.close() #attributes pearson correlations: plt.figure() plt.xlabel("Attribute") plt.ylabel("Attribute") plt.title("cross correlation") plt.imshow(data.iloc[:,:-1].corr()) plt.colorbar() pdf_pages.savefig() plt.close() pdf_pages.close()
{ "repo_name": "aymen82/kaggler-competitions-scripts", "path": "dev/rocks-vs-mines/script.py", "copies": "1", "size": "2419", "license": "bsd-3-clause", "hash": 4496936931537130500, "line_mean": 25.8777777778, "line_max": 116, "alpha_frac": 0.6998759818, "autogenerated": false, "ratio": 2.92503022974607, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9049159319653302, "avg_score": 0.01514937837855354, "num_lines": 90 }
__author__ = 'Ashar Malik' f = open('dictionary.txt', 'r') dictionary = f.read().split("\n") def ces_shift(str, index): #Caesarian shift str = str.lower() char_list = [] for char in str: if not char.isalpha():#ignore non-letters char_list.append(char) continue ascii = ord(char)+index if(ascii>122): #character wrapping ascii = ascii-25 if(ascii<97): #for negative indices ascii = ascii+25 char_list.append(chr(ascii)) return "".join(char_list) def amount_words(str): words = str.split(" ") count = 0 for word in words: if dictionary.__contains__(word): count+=1 return count def decode_ces_shift(str): #tests all shift possibilities for one with most correctly spelled words best_index_guess = 0 best_guess_amt_words = 0 best_str_guess = "" for i in range(0, 25):#test all shift possibilities conv_str = ces_shift(str, -i) word_count = amount_words(conv_str) if(word_count>best_guess_amt_words):#is the new shift guess better than the other? best_guess_amt_words = word_count best_index_guess = i best_str_guess = conv_str return best_str_guess org_str = "This is a sample string to illustrate an example." shift_by = 20 coded_string = ces_shift(org_str, shift_by) print "Original: '%s'\nEncoded (N=%d): '%s'\nDecoding..." % (org_str, shift_by, coded_string) print decode_ces_shift(coded_string)
{ "repo_name": "asharmalik/Caesar-Cipher-Decoder", "path": "Cipher.py", "copies": "1", "size": "1532", "license": "apache-2.0", "hash": -8551643198744929000, "line_mean": 26.8545454545, "line_max": 99, "alpha_frac": 0.6083550914, "autogenerated": false, "ratio": 3.3744493392070485, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.44828044306070486, "avg_score": null, "num_lines": null }
__author__ = 'Ashar Malik' import csv, time import smtplib, os import imaplib import email from shutil import rmtree from email.MIMEMultipart import MIMEMultipart from email.MIMEBase import MIMEBase from email.MIMEText import MIMEText from email.Utils import COMMASPACE, formatdate from email import Encoders from os import listdir sender_username = "" sender_pass = "" smtp_address = "" smtp_port = 80 #send_mail([to], subject, text, [attachments], [cc]) def send_mail(send_to, subject, text, files=[], cc=[]): assert isinstance(send_to, list) assert isinstance(files, list) assert isinstance(cc, list) msg = MIMEMultipart() msg['From'] = sender_username msg['To'] = COMMASPACE.join(send_to) msg['Date'] = formatdate(localtime=True) msg['Subject'] = subject #new things... msg['CC'] = COMMASPACE.join(cc) send_to = send_to + cc msg.attach( MIMEText(text) ) for f in files: print "Attaching %s..." % os.path.basename(f), part = MIMEBase('application', "octet-stream") part.set_payload( open(f,"rb").read() ) Encoders.encode_base64(part) part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f)) msg.attach(part) print "Sending...", smtp = smtplib.SMTP('smtpout.secureserver.net', 80) smtp.login(sender_username, sender_pass) smtp.sendmail(sender_username, send_to, msg.as_string()) smtp.close() def read_csv(file_name): csv_info = [] with open(file_name, 'rb') as f: reader = csv.reader(f) for row in reader: csv_info.append(row) return csv_info def log(text): global current_log print "Log: %s" % text text = "[" + time.strftime("%d/%m/%Y") +" "+ time.strftime("%I:%M:%S") + "] " + text current_log = current_log + text +"\n" with open("bin/log.txt", 'a') as f: f.write(text+"\n") def filter_whitespace(name): while name.endswith(" "): name = name[0:-1] if name.__len__() == 0: return name return name def correct_email(email): email = email.replace(" ", "") #get rid of whitespace while email[-1:].isalpha() == False: #get rid of any trailing punctuation (non-alphabetic characters) email = email[:-1] return email def run_campaign(campaign_loc): global current_log begin = time.time() email_list = [] email_problems = [] cc = [] current_log = "" if os.path.isfile(campaign_loc + 'to_email.csv') == False: print "No emailing list provided in this email! Please make sure you saved it as 'to_email' as 'CSV (Comma Delimited)' or 'Windows Comma Separated' on Mac." return -1 try: csv_info = read_csv(campaign_loc + 'to_email.csv') except Exception as e: print "Unable to open CSV file. Did this file come from a Mac? Make sure you saved as 'Windows Comma Separated'" print "You can also open the file on this computer and resave it as CSV." return -1 #ask for subject of email if os.path.isfile(campaign_loc + 'template.txt'): with open(campaign_loc + 'template.txt', 'rb') as f: template = f.read() else: print "No template in this email! Make sure you save it as 'template.txt'." return -1 #change %Name to %name template = template.replace("%Name", "%name") subject = raw_input("Subject: ") headers = csv_info.pop(0) #make headers lowercase for i, item in enumerate(headers): headers[i] = str(item.lower()).replace(" ", "") #remove whitespace as well email_index = headers.index("email") replace_list = [] #compile index/phrase list to replace in template for i, to_replace in enumerate(headers): if to_replace != "email": replace_list.append((to_replace, i)) #fill in templates for item in csv_info: email_str = template to_email = item[email_index] if len(to_email) != 0: for replace_token in replace_list: replacement = filter_whitespace(item[replace_token[1]]) #removes any trailing spaces email_str = email_str.replace('%'+replace_token[0], replacement) #template is done being filled in to_email = correct_email(to_email) email_list.append((email_str, to_email)) #.DS_Store #grab attachments if os.path.isdir(campaign_loc+'Attachments') == False: print "No attachments found." attachments = [] else: attachments = listdir(campaign_loc+'Attachments') if ".DS_Store" in attachments: attachments.remove(".DS_Store") if template.lower().__contains__("attach") and attachments.__len__() == 0: while True: print "The template mentions attachments. But I did not find any attachments. Continue? y/n" usr_input = raw_input('>> ') if usr_input == "y": break elif usr_input == "n": return -1 cc_term = "a" while True: print "Would you like to add %s CC? (y/n)" % cc_term cc_decision = raw_input(">> ") if cc_decision == "y": cc_person = raw_input("Type an email: ") cc.append(cc_person) cc_term = "another" elif cc_decision == "n": break print "Filled in Template:\n"+email_list[0][0] print "----------------" print "From: %s" % sender_username print "Subject: %s" % subject print "CC: %s" % cc print "Attachments: %s" % attachments while True: usr_input = "" usr_input = raw_input("Continue (y/n): ") if usr_input == "n": return -1 elif usr_input == "y": break if attachments.__len__() > 0: for i, attachment in enumerate(attachments): attachments[i] = campaign_loc+'Attachments/'+attachment #sending emails now emails_sent = [] while True: email_problems = [] total_emails = len(email_list) for i, email in enumerate(email_list): to = email[1] text = email[0] print "(%s/%s) Emailing %s..." % (i+1, total_emails, to), try: send_mail([to], subject, text, attachments, cc) except Exception as e: print "An error occurred." email_problems.append(email) else: print "Done." emails_sent.append(to) if email_problems.__len__() > 0: print "These emails were not sent(%s):" % len(email_problems) for email in email_problems: print email[1] skip_problems = False while True: retry_reply = raw_input("Retry? (y/n): ") if retry_reply == "n": skip_problems = True break elif retry_reply == "y": email_list = email_problems break if skip_problems == True: break else: break #done sending emails elapsed_time = time.time()-begin #in seconds emails_sent = len(email_list) #not true print "These people were successfully emailed:\n%s" % '\n'.join(zip(*email_list)[1])+"\n" def loadSettings(): global smtp_address global smtp_port global sender_username global sender_pass if os.path.isfile("settings.txt") == False: to_write = "address=%s\n" % smtp_address to_write = to_write+"port=%s\n" % smtp_port to_write = to_write+"username=%s\n" % sender_username to_write = to_write+"pass=%s" % sender_pass with open('settings.txt', 'w') as f: f.write(to_write) print "Created settings.txt:\n%s" % to_write return with open('settings.txt', 'r') as f: variables = f.readlines() for i, line in enumerate(variables): variables[i] = line.replace("\n", "") print variables[i] variables[i] = variables[i].split("=") if variables[i][0] == 'address': smtp_address = variables[i][1] elif variables[i][0] == 'port': smtp_port = int(variables[i][1]) elif variables[i][0] == 'username': sender_username = variables[i][1] elif variables[i][0] == 'pass': sender_pass = variables[i][1] print "Successfully loaded settings." def list_dir(): print "Available campaigns:" listing = [name for name in os.listdir("Email Campaigns") if os.path.isdir(os.path.join("Email Campaigns", name))] for i, directory in enumerate(listing): print "%s: %s" % (i, directory) print "Type a number or press ENTER to refresh:" return listing def main(): global sender_username global sender_pass #create skeleton if os.path.isdir('bin') == False: os.makedirs('bin') if os.path.isdir('Email Campaigns') == False: os.makedirs('Email Campaigns') #maybe skip if there is only one campaign? (ask if they want to use that one y/n options = list_dir() while True: usr_input = raw_input('>> ') print usr_input if usr_input.isdigit(): index = int(usr_input) if index+1 <= len(options): if run_campaign('Email Campaigns/'+options[index]+'/') == -1: print "Campaign could not run. Please type a number or press ENTER to refresh:" else: print "Campaign finished." options = list_dir() else: print "Number is not an option. Type a number or press ENTER to refresh:" elif usr_input == "set_user": sender_username = raw_input("Type username: ") print "Username set to %s." % sender_username sender_pass = raw_input("Type password: ") print "Password set to %s." % sender_pass else: options = list_dir() pass loadSettings() main() #todo: show astericks for password #add multi account options #error detection for names/emails #export unsent emails?
{ "repo_name": "asharmalik/EmailerLite", "path": "emailerlite.py", "copies": "1", "size": "10297", "license": "apache-2.0", "hash": -5956263207089413000, "line_mean": 30.9813664596, "line_max": 164, "alpha_frac": 0.5688064485, "autogenerated": false, "ratio": 3.817945865776789, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9851124747095439, "avg_score": 0.007125513436270037, "num_lines": 322 }
__author__ = 'asherkhb' from os import system from depriciated import config_writers def runDarkmaster(image_dict, darklist_filename, masterdark_filename, norm_filename, bot_xo=None, bot_xf=None, bot_yo=None, bot_yf=None, top_xo=None, top_xf=None, top_yo=None, top_yf=None, width=None, height=None, config=None, medianNorm=False, medianDark=False): # Write dark images to config file. darks = image_dict['DARK'] config_writers.writeListCfg(darks, darklist_filename) # Fill out required parameters options = '--fileListFile=%s --darkFileName=%s --normFileName=%s' % (darklist_filename, masterdark_filename, norm_filename) # Fill out bottom/top normalization coordinates, if present. if bot_xo and bot_xf and bot_yo and bot_yf and top_xo and top_xf and top_yo and top_yf: options += ' --bot_xo=%s --bot_xf=%s --bot_yo=%s --bot_yf=%s' % (str(bot_xo), str(bot_xf), str(bot_yo), str(bot_yf)) options += ' --top_xo=%s --top_xf=%s --top_yo=%s --top_yf=%s' % (str(top_xo), str(top_xf), str(top_yo), str(top_yf)) # Fill out height/width of centered normalization region (overrides normalization coordinates), if present. if width and height: options += ' --width=%s --height=%s' % (str(width), str(height)) # Add median options, if present if medianNorm: options += ' --medianNorm' if medianDark: options += ' --medianDark' # Build & call darkmaster command. cmd = 'darkmaster ' + options system(cmd) #images = {"DARK": [], "SCIENCE": []} #runDarkmaster(images, 'darks.list', 'masterdark.fits', 'norms.dat')
{ "repo_name": "acic2015/findr", "path": "deprecated/run_darkmaster.py", "copies": "1", "size": "1969", "license": "mit", "hash": 4671566102791164000, "line_mean": 47.0487804878, "line_max": 111, "alpha_frac": 0.5388522092, "autogenerated": false, "ratio": 3.7362428842504745, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.47750950934504743, "avg_score": null, "num_lines": null }
__author__ = 'asherkhb' import os.path import multiprocessing as mp import pprint # Location of darksub and fitscent. If in path can leave, otherwise give path here. darksub = 'darksub' fitscent = 'fitscent' # File Shifts file_shifts = 'file_shifts.txt' # Maximum number of parallel processes. max_processes = 2 # Just some sample testing data. images = {"DARK": [], "SCIENCE": ['V47_20141104053022231159.fits', 'V47_20141104053022514640.fits', 'V47_20141104053022798116.fits', 'V47_20141104053023932047.fits', 'V47_20141104053024215509.fits', 'V47_20141104053024782464.fits', 'V47_20141104053025065946.fits', 'V47_20141104053026766813.fits', 'V47_20141104053027050292.fits']} def prependToFilename(filename, prepending): """ Prepend Text to Filename. :param filename: Filename or path/to/filename to be modified. :param prepending: String to prepend to filename. :return: Modified filename or path/to/filename. """ b = os.path.basename(filename) n = prepending + b return filename.replace(b, n) def spawnDsubCmd(science_img, dark_img, norm_bot=None, norm_top=None): """ Spawn a darksub command. :param science_img: Science image filename or path/to/filename. :param dark_img: Master dark filename or path/to/filename. :param norm_bot: Multiplicative scaling to apply to the bottom amplifier (optional). :param norm_top: Multiplicative scaling to apply to the top amplifier (optional). :return: darksub_command, subtracted_fiilename """ dsub_out = prependToFilename(science_img, 'dsub_') dsub_opts = '--inputFile=%s --darkFile=%s --outputFile=%s' % (science_img, dark_img, dsub_out) if norm_bot: dsub_opts += ' --norm_bot=%s' % str(norm_bot) if norm_top: dsub_opts += ' --norm_top=%s' % str(norm_top) dsub_cmd = darksub + ' ' + dsub_opts return dsub_cmd, dsub_out def spawnCentCmd(subtracted_img, xshift, yshift): """ Spawn a fitscent command. :param subtracted_img: Dark subtracted science image. :param xshift: X shift to apply to image. :param yshift: Y shift to apply to image. :return: fitscent_command, centered_filename """ cent_out = prependToFilename(subtracted_img, 'cent_') cent_opts = '--input=%s --x=%s --y=%s --output=%s' % (subtracted_img, str(xshift), str(yshift), cent_out) cent_cmd = fitscent + ' ' + cent_opts return cent_cmd, cent_out def loadShifts(shifts_file): shifts = {} with open(shifts_file, 'r') as s: for l in s: c = l.split() shifts[c[0]] = {'x': c[1], 'y': c[2]} return shifts def getNorms(img): # TODO """ :param img: Image to obtain normalization s for. :return: """ top = '' bot = '' return top, bot def getShifts(img, fileshifts): # TODO """ :param img: image to get shift values :return: xshift, yshift """ xs = fileshifts[img]['x'] ys = fileshifts[img]['y'] return xs, ys def runProcess(call): print call os.system(call) return 1 def subtractAndCenter(image_dict, masterdark, shifts_file): # Build list of science images to process. sciences = image_dict['SCIENCE'] # Load shift values from file to memory. fileshifts = loadShifts(shifts_file) # Define necessary variables. scmds = [] souts = [] ccmds = [] couts = [] # Build up commands for each science image. for img in sciences: # Get norm and shift values. tnorm, bnorm = getNorms(img) xshift, yshift = getShifts(img, fileshifts) # Build subtraction task. ds_cmd, ds_out = spawnDsubCmd(img, masterdark, norm_bot=bnorm, norm_top=tnorm) # subtractions[img] = {'cmd': ds_cmd, 'out': ds_out} scmds.append(ds_cmd) souts.append(ds_out) # Build centering task. cn_cmd, cn_out = spawnCentCmd(ds_out, xshift=xshift, yshift=yshift) # centerings[img] = {'cmd': cn_cmd, 'out': cn_out} ccmds.append(cn_cmd) couts.append(cn_out) # Execute subtraction tasks (parallel). sub_pool = mp.Pool(processes=max_processes) sub_pool.map(runProcess, scmds) # Execute centering tasks (parallel). cent_pool = mp.Pool(processes=max_processes) cent_pool.map(runProcess, ccmds) # Return list of final filenames. return couts def main(): cent_dsub_files = subtractAndCenter(images, 'masterdark.fits', file_shifts) print cent_dsub_files if __name__ == '__main__': main()
{ "repo_name": "acic2015/findr", "path": "deprecated/run_darksub_fitscent.py", "copies": "1", "size": "4790", "license": "mit", "hash": -2993255557048280000, "line_mean": 29.7115384615, "line_max": 109, "alpha_frac": 0.611691023, "autogenerated": false, "ratio": 3.2607215793056503, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.43724126023056503, "avg_score": null, "num_lines": null }
__author__ = 'Ashish' from nltk.tokenize import sent_tokenize import json import uuid class TagTogReader(object): def __init__(self, file_location): self.file_location = file_location self.documents = {} self.symbols = [',', '.', '(', ')', ':', ';', '[', ']'] self.punctuations = ['.', ','] self.json_content = {} def parse(self): data_file = open(self.file_location) tempString = "" id = 1 for line in data_file: if str(line).strip().startswith("###MEDLINE"): if tempString!="": self.documents[pmid.rstrip()]={} sentences = sent_tokenize(tempString.lstrip().rstrip()) self.documents[pmid.rstrip()]['abstract']=sentences[0] self.documents[pmid.rstrip()]['text'] = ' '.join(sentences[1:]) id = 1 json_single={} json_single['annotatable']={} json_single['annotatable']['parts']=["s1h1", "s2h1", "s2p1"] json_single['anncomplete']=False json_single['sources']=[] json_single['sources'].append({"name": "MEDLINE", "id": pmid.rstrip(), "url": None}) json_single['entities']=entities json_single['relations']=[] json_single['metas']={} self.json_content[pmid.rstrip()]=json_single entities = [] pmid = line.split(":")[1] tempString = "" is_title = True elif str(line)=='\n': continue else: tempWord = str(line).split('\t') if tempWord[0] in self.symbols: if tempWord[0].startswith('(') or tempWord[0].startswith('['): tempString = tempString + ' ' + tempWord[0] else: tempString = tempString + tempWord[0] if (tempWord[0]=="." and is_title==True): is_title = False; if (entity and tempWord[1].startswith("I-")): if tempWord[0]=="(": word = word+" "+tempWord[0] word.rstrip() elif word.endswith("("): word = word+tempWord[0]; elif tempWord[0]==")": word = word+tempWord[0] if (isEntity and tempWord[1].startswith("O")): entity['offsets'][0]['text'] = word entities.append(entity) isEntity = False entity = {} else: if tempWord[1].startswith("B-protein") or tempWord[1].startswith("B-RNA") or tempWord[1].startswith("B-DNA"): if tempWord[1].startswith("B-protein"): classId = "e_1" elif tempWord[1].startswith("B-DNA"): classId = "e_2" else: classId = "e_3" entity = {} entity['classId']=classId word = tempWord[0] entity['offsets']=[{'start': len(tempString)}] entity['confidence']={'state': "", 'who': ['user:genia4er'], 'prob': 1.0000} isEntity = True if (is_title): entity['part']='s1h1' else: entity['part']='s2p1' elif tempWord[1].startswith("I-protein") or tempWord[1].startswith("I-RNA") or tempWord[1].startswith("I-DNA"): if word.endswith("("): word = word+tempWord[0] else: word = word+" "+tempWord[0] elif (tempWord[1].startswith("O") and isEntity): entity['offsets'][0]['text'] = word entities.append(entity) isEntity = False entity = {} if tempString.endswith('(') or tempString.endswith('['): tempString = tempString + tempWord[0] else: tempString = tempString + ' ' + tempWord[0] data_file.close() def createJSON(self): for key in self.json_content: cutoff = len(self.documents[key]['abstract']) for i in range(len(self.json_content[key]['entities'])): if self.json_content[key]['entities'][i]['offsets'][0]['start']>cutoff: self.json_content[key]['entities'][i]['part']='s2p1' self.json_content[key]['entities'][i]['offsets'][0]['start'] = self.json_content[key]['entities'][i]['offsets'][0]['start'] - cutoff - 1 else: self.json_content[key]['entities'][i]['part']='s1h1' f = open('/home/ashish/mthesis-ashish/resources/corpora/entity_recognition/jnlpba/anndoc/'+str(key)+'.ann.json', 'w') f.write(json.dumps(self.json_content[key], sort_keys=True, indent=2, separators=(',', ': '))) f.close() def writeHTML(self, key): title = self.documents.get(key)['abstract'] abstr = self.documents.get(key)['text'] doctype = "<!DOCTYPE html>\n" hashId = str(uuid.uuid4().hex) + ':' + key.rstrip() header = '<html id="'+ hashId +'" data-origid="'+key.rstrip() + '" class="anndoc" data-anndoc-version="2.0" lang="" xml:lang="" xmlns="http://www.w3.org/1999/xhtml">\n' \ "\t<head>\n" \ '\t\t<meta charset="UTF-8"/>\n' \ '\t\t<meta name="generator" content="org.rostlab.relna"/>\n' \ "\t\t<title>" + hashId + "</title>\n" \ "\t</head>\n" body = "\t<body>\n" \ "\t\t<article>\n" \ '\t\t\t<section data-type="title">\n' \ '\t\t\t\t<h2 id="s1h1">' + title + "</h2>\n" \ "\t\t\t</section>\n" abstract = '\t\t\t<section data-type="abstract">\n' \ '\t\t\t\t<h3 id="s2h1">' \ 'Abstract' \ '</h3>\n' \ '\t\t\t\t<div class="content">\n' content = '\t\t\t\t\t<p id = "s2p1">' content_close = '</p>\n' abstract_close = "\t\t\t\t</div>\n\t\t\t</section>\n" body_close = "\t\t</article>\n" \ "\t</body>\n" html_close = "</html>" html_file = open('/home/ashish/mthesis-ashish/resources/corpora/entity_recognition/jnlpba/anndoc/'+str(key).rstrip()+'.html', 'w') html_file.write(doctype) html_file.write(header) html_file.write(body) html_file.write(abstract) html_file.write(content) html_file.write(abstr) html_file.write(content_close) html_file.write(abstract_close) html_file.write(body_close) html_file.write(html_close) return def convert_to_html(self): for pmid in self.documents.keys(): self.writeHTML(pmid) def print_documents(self): for pmid in self.documents.keys(): print pmid, self.documents[pmid] tagtog = TagTogReader('/home/ashish/mthesis-ashish/resources/corpora/entity_recognition/jnlpba/iob/train/Genia4ERtask2.iob2') tagtog.parse() tagtog.convert_to_html() tagtog.createJSON()
{ "repo_name": "ashishbaghudana/mthesis-ashish", "path": "miscellaneous/jnlpba2tagtogconverter/TagTogFormat.py", "copies": "1", "size": "5926", "license": "mit", "hash": -571011975546040700, "line_mean": 33.0574712644, "line_max": 172, "alpha_frac": 0.5968612892, "autogenerated": false, "ratio": 2.8138651471984804, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8663002415634486, "avg_score": 0.04954480415279889, "num_lines": 174 }
__author__ = 'Ashish' import pandas as pd import numpy as np from pylab import * #import matplotlib.pyplot as plt pd.set_option('max_columns', 50) # pass in column names for each CSV u_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code'] users = pd.read_csv('Data\ml-100k\u.user', sep='|', names=u_cols) r_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp'] ratings = pd.read_csv('Data\ml-100k\u.data', sep='\t', names=r_cols) # the movies file contains columns indicating the movie's genres # let's only load the first five columns of the file with usecols m_cols = ['movie_id', 'title', 'release_date', 'video_release_date', 'imdb_url'] movies = pd.read_csv('Data\ml-100k\u.item', sep='|', names=m_cols, usecols=range(5)) # create one merged DataFrame movie_ratings = pd.merge(movies, ratings) lens = pd.merge(movie_ratings, users) labels = ['0-9', '10-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-79'] lens['age_group'] = pd.cut(lens.age, range(0, 81, 10), right=False, labels=labels) #print lens[['age', 'age_group']].drop_duplicates()[:10] print "====================================================" #Now we can now compare ratings across age groups. print lens.groupby('age_group').agg({'rating': [np.size, np.mean]}) print "====================================================" most_50 = lens.groupby('movie_id').size().order(ascending=False)[:50] print most_50 lens.set_index('movie_id', inplace=True) by_age = lens.ix[most_50.index].groupby(['title', 'age_group']) by_age.rating.mean().head(15) print by_age.rating.mean().unstack(1).fillna(0)[10:20] by_age.rating.mean().unstack(1).fillna(0)[5:10].plot() show()
{ "repo_name": "Swaraj1998/MyCode", "path": "ML-Workshop/day5/analysis2.py", "copies": "1", "size": "1665", "license": "mit", "hash": -5982947511565101000, "line_mean": 29.8333333333, "line_max": 84, "alpha_frac": 0.627027027, "autogenerated": false, "ratio": 2.826825127334465, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8846207140902858, "avg_score": 0.02152900268632167, "num_lines": 54 }
__author__ = 'ashish' import pandas as pd import shutil import sys import os import re import gc text_features = {'donations.csv': ['donation_message'], 'essays.csv': ['title', 'short_description', 'need_statement', 'essay'], 'resources.csv': ["vendor_name", "project_resource_type", "item_name", "item_number"]} in_dir = ".\data\data_files\\" out_dir = ".\data\input\\" pattern = re.compile(r'[^\w.% /-]') def clean_text(x): """ Cleans the chunk of text by removing '\r\n' and other characters defined by pattern. :param x: text to clean :return: cleaned text """ return re.sub(r'\s+', ' ', pattern.sub(' ', re.sub(r"\r\\n", "", str(x))).strip()) def clean_kdd_files(): """ Cleans the KDD data files and writes them to a new folder. 1. Original KDD file must be present in .\data\data_files directory 2. Output files will be written to .\data\input directory :raise: any exception is raised """ try: data_files = os.listdir(in_dir) if not os.path.exists(out_dir): os.makedirs(out_dir) files_to_clean = set(data_files).intersection(text_features.keys()) files_to_copy_only = set(data_files).difference(text_features.keys()) print "Files to Clean: ", files_to_clean print "Files to Copy: ", files_to_copy_only for file_name in files_to_clean: in_file = in_dir + file_name out_file = out_dir + file_name print "\nIn File: %s \t----\t Out File: %s " % (in_file, out_file) print "Reading %s ..." % in_file df = pd.read_csv(in_file) print "Data dimensions: ", df.shape cols_to_clean = text_features[file_name] print "Applying clean_text to feature(s): %s ..." % ", ".join(cols_to_clean) df[cols_to_clean] = df[cols_to_clean].applymap(clean_text) print "Saving cleaned file to %s ..." % out_file df.to_csv(out_file, sep=',', encoding='utf-8', header=True, index=False) print "Done" del df gc.collect() print "\n" for file_name in files_to_copy_only: in_file = in_dir + file_name out_file = out_dir + file_name print "Copying %s AS IS, to %s ..." % (in_file, out_file) shutil.copy(in_file, out_file) print "Done" print "\nHave fun!" except Exception, e: print e.message raise e if __name__ == "__main__": try: clean_kdd_files() except Exception, e: sys.exit(-1)
{ "repo_name": "ashishsnaik/KDDCup2014", "path": "kdd_clean_data.py", "copies": "1", "size": "2709", "license": "mit", "hash": 7743644121689597000, "line_mean": 29.8705882353, "line_max": 103, "alpha_frac": 0.5404208195, "autogenerated": false, "ratio": 3.495483870967742, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4535904690467742, "avg_score": null, "num_lines": null }
from __future__ import absolute_import, division, print_function import os # gpi, future import gpi from bart.gpi.borg import IFilePath, OFilePath, Command # bart import bart base_path = bart.__path__[0] # library base for executables import bart.python.cfl as cfl class ExternalNode(gpi.NodeAPI): '''Usage: pics [-l1/-l2] [-r lambda] [-t <trajectory>] <kspace> <coil_maps> <output> Parallel-imaging compressed-sensing reconstruction. -l1/-l2 toggle l1-wavelet or l2 regularization. -r lambda regularization parameter -c real-value constraint -s step iteration stepsize -i maxiter number of iterations -t trajectory k-space trajectory Generalized regularization options (experimental) -R <T>:A:B:C <T> is regularization type (single letter), A is transform flags, B is joint threshold flags, and C is regularization value. Specify any number of regularization terms. -R Q:C l2-norm in image domain -R I:B:C l1-norm in image domain -R W:A:B:C l1-wavelet -R T:A:B:C total variation Example: -R T:7:0:.01 3D isotropic total variation with 0.01 regularization. ''' def initUI(self): # Widgets self.addWidget('ExclusivePushButtons', 'regularization', buttons=('l1-wavelet', 'l2'), val=0) self.addWidget('DoubleSpinBox', 'lamb', val=0.01, min=0, max=1, decimals=5) # IO Ports self.addInPort('kspace', 'NPYarray') self.addInPort('coil_maps', 'NPYarray') self.addInPort('traj', 'NPYarray', obligation=gpi.OPTIONAL) self.addOutPort('output', 'NPYarray') return 0 def compute(self): kspace = self.getData('kspace') coil_maps = self.getData('coil_maps') trajectory = self.getData('traj') reg = self.getVal('regularization') lamb = self.getVal('lamb') # load up arguments list args = [base_path+'/bart pics'] if reg == 0: args += ['-l1'] else: args += ['-l2'] args += ['-r {}'.format(lamb)] args += ['-e -i 100'] # setup file for getting data from external command if trajectory is not None: traj = IFilePath(cfl.writecfl, trajectory, asuffix=['.cfl','.hdr']) args += ['-t', traj] kspace = IFilePath(cfl.writecfl, kspace, asuffix=['.cfl','.hdr']) coil_maps = IFilePath(cfl.writecfl, coil_maps, asuffix=['.cfl','.hdr']) output = OFilePath(cfl.readcfl, asuffix=['.cfl','.hdr']) args += [kspace, coil_maps, output] print(Command(*args)) self.setData('output', output.data()) if trajectory is not None: traj.close() kspace.close() coil_maps.close() output.close() return 0
{ "repo_name": "nckz/bart", "path": "gpi/PICS_GPI.py", "copies": "1", "size": "2968", "license": "bsd-3-clause", "hash": -3354590103709165600, "line_mean": 29.9166666667, "line_max": 101, "alpha_frac": 0.6020889488, "autogenerated": false, "ratio": 3.4391657010428736, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9532549139544397, "avg_score": 0.001741102059695087, "num_lines": 96 }
import numpy as np import gpi class ExternalNode(gpi.NodeAPI): """Transform coordinates from BNI conventions to BART conventions. INPUT: in - a numpy arrary of k-space coordinates in the BNI convention i.e. (-0.5, 0.5), dimensions: [readouts, pts, xy(z)] OUTPUT: out - a numpy array of k-space coordinates in the BART convention i.e. (-mtx/2, mtx/2), dimensions: [zyx, pts, readouts] WIDGETS: mtx - the resulting matrix size (assumed square/cubic) """ # initialize the UI - add widgets and input/output ports def initUI(self): # Widgets self.addWidget('SpinBox', 'mtx', val=128, min=1, max=1024) # self.addWidget('DoubleSpinBox', 'bar', val=10, min=0, max=100) # self.addWidget('PushButton', 'baz', toggle=True) # self.addWidget('ExclusivePushButtons', 'qux', # buttons=['Antoine', 'Colby', 'Trotter', 'Adair'], val=1) # IO Ports self.addInPort('in', 'NPYarray', ndim=3) self.addOutPort('out', 'NPYarray', dtype=np.complex64, ndim=3) # validate the data - runs immediately before compute # your last chance to show/hide/edit widgets # return 1 if the data is not valid - compute will not run # return 0 if the data is valid - compute will run def validate(self): in_data = self.getData('in') # TODO: make sure the input data is valid # [your code here] return 0 # process the input data, send it to the output port # return 1 if the computation failed # return 0 if the computation was successful def compute(self): coords = self.getData('in').copy() mtx = self.getVal('mtx') # just transpose first to reverse dimensions coords = coords.T # adjust by the matrix size # TODO: account for "true resolution" coords *= mtx # reverse the xyz dimension coords[:,...] = coords[::-1,...] # pad the z-dimension with zeros if the trajectory is 2D if coords.shape[0] == 2: coords = np.pad(coords, ((0,1), (0,0), (0,0)), mode='constant', constant_values=0) # if the trajectory is not 3D at this point, something has gone wrong if coords.shape[0] != 3: self.log.warn("The final dimensions of the input data must be 2 (xy), or 3 (xyz).") return 1 self.setData('out', np.require(coords, dtype=np.complex64)) return 0
{ "repo_name": "nckz/bart", "path": "gpi/BNI2BART_Traj_GPI.py", "copies": "1", "size": "2665", "license": "bsd-3-clause", "hash": -332524509410624900, "line_mean": 33.6103896104, "line_max": 95, "alpha_frac": 0.5868667917, "autogenerated": false, "ratio": 3.8125894134477827, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4899456205147783, "avg_score": null, "num_lines": null }
__author__ = 'Ash' import numpy as np import time import http.client, urllib.parse from pprint import pprint API_KEY = ["H671BFO41N0TP246", "VJEFXKQ0AE4LD80D", "QPHVEZKYTKYNXOQZ"] n_spine = [8, 8, 1] means = [65, 55, 60, 58, 52, 60, 70, 50] fields = list(map(lambda x: "field"+x, map(str, range(1, 9)))) headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"} def main(): period = np.arange(0, 2, 2/8) * np.pi while True: for dt in period: for n, key in zip(n_spine, API_KEY): sensors = np.cos(period+dt)*1 + means + np.random.rand(1, 8)[0] data = dict(zip(fields, sensors)) data["key"] = key params = urllib.parse.urlencode(data) conn = http.client.HTTPConnection("api.thingspeak.com:80") conn.request("POST", "/update", params, headers) response = conn.getresponse() pprint(data) print(response.status, response.reason) conn.close() time.sleep(16/len(API_KEY)) if __name__ == "__main__": main()
{ "repo_name": "jcuroboclub/White-Roofs", "path": "fakeData.py", "copies": "1", "size": "1152", "license": "mit", "hash": -7371103323866162000, "line_mean": 32.9117647059, "line_max": 79, "alpha_frac": 0.5529513889, "autogenerated": false, "ratio": 3.272727272727273, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9288814926234126, "avg_score": 0.007372747078629433, "num_lines": 34 }
__author__ = 'ash' from collections import deque from sys import maxint import copy class Edge: def __init__(self,node_pair): self.node_pair = node_pair def init_weights(self): """ Was made because YAML inits only the given fields """ self.bandhist = deque() self.histtime = 3600 # keep history for one hour self.hist_growed = False self.avgbw = 0 # average bandwidth self.maxb = -1 # maximum bandwidth self.minb = maxint # minimum bandwidth self.count = 0 self.cur_bw_id = -1 # current bandwidth capture period self.sim_con_total = 0 # sum of simultaneous (concurrent) connections on this edge self.latency = 0 def calculate_stats(self): """ Re-calculates the statistics """ if self.count == 0: # if it is the first portion of information self.avgbw = self.sim_con_total self.maxb = self.sim_con_total self.minb = self.sim_con_total self.count += 1 else: # we already have first portion self.avgbw = (self.avgbw * self.count + self.sim_con_total) / (self.count + 1) # standard (mean * n + x(i+1))/(n+1) self.count += 1 if self.sim_con_total > self.maxb: self.maxb = self.sim_con_total if self.sim_con_total < self.minb: self.minb = self.sim_con_total def append_bandwidth(self,edgeinfo): """ Checking the current traffic sniffing period and calculating the value on the edge """ if edgeinfo.bw_id != self.cur_bw_id: # if new portion of bandwidth statistics self.cur_bw_id = edgeinfo.bw_id self.calculate_stats() # in sim_con_total - we have accumulated traffic of the previous step. Use it to calc stat self.sim_con_total = edgeinfo.value # the value of first traffic on channel of the new step else: self.sim_con_total += edgeinfo.value # if the step is the same - accumulating the fraffic on edge def __repr__(self): return "<Edge: %s : %s" % (self.node_pair,self.sim_con_total) @staticmethod def edges_list_to_dict(edge_list): """ Get dict from the list as it is more comfortable to work with """ res = dict() for edge in edge_list: res[(edge.node_pair[0]),(edge.node_pair[1])] = edge # Saving the reverse edge for opposite traffic edge_reverse = Edge((edge.node_pair[1],edge.node_pair[0])) edge_reverse.init_weights() res[(edge.node_pair[1]),(edge.node_pair[0])] = edge_reverse return res class EdgeInfo: """ Class describes the portion of traffic information for the bw_id period """ def __init__(self,value,bw_id): self.value = value #self.time = time self.bw_id = bw_id
{ "repo_name": "ashepelev/TopologyWeigher", "path": "source/topology_weigher/TopologyWeigher/Edge.py", "copies": "1", "size": "2943", "license": "apache-2.0", "hash": 2932807077492739000, "line_mean": 34.9024390244, "line_max": 128, "alpha_frac": 0.584777438, "autogenerated": false, "ratio": 3.7682458386683737, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.48530232766683734, "avg_score": null, "num_lines": null }
__author__ = 'ash' from collections import deque from sys import maxint class Edge: def __init__(self,node_pair,maxb): self.node_pair = node_pair self.maxb = maxb def init_weights(self): """ Was made because YAML inits only the given fields """ self.bandhist = deque() self.histtime = 3600 # keep history for one hour self.hist_growed = False self.avgbw = 0 # average bandwidth self.maxb = -1 # maximum bandwidth self.minb = maxint # minimum bandwidth self.count = 0 self.cur_bw_id = -1 # current bandwidth capture period self.sim_con_total = 0 # sum of simultaneous (concurrent) connections on this edge def calculate_stats(self): """ Re-calculates the statistics """ if self.count == 0: # if it is the first portion of information self.avgbw = self.sim_con_total self.maxb = self.sim_con_total self.minb = self.sim_con_total self.count += 1 else: # we already have first portion self.avgbw = (self.avgbw * self.count + self.sim_con_total) / (self.count + 1) # standard (mean * n + x(i+1))/(n+1) self.count += 1 if self.sim_con_total > self.maxb: self.maxb = self.sim_con_total if self.sim_con_total < self.minb: self.minb = self.sim_con_total def append_bandwidth(self,edgeinfo): """ Checking the current traffic sniffing period and calculating the value on the edge """ if edgeinfo.bw_id != self.cur_bw_id: # if new portion of bandwidth statistics self.cur_bw_id = edgeinfo.bw_id self.calculate_stats() # in sim_con_total - we have accumulated traffic of the previous step. Use it to calc stat self.sim_con_total = edgeinfo.value # the value of first traffic on channel of the new step else: self.sim_con_total += edgeinfo.value # if the step is the same - accumulating the fraffic on edge # For a while we don't need to keep the whole history. # Just the main characteristics - avg, max & min, count """ self.bandhist.append(edgeinfo) if not self.hist_growed: if self.bandhist[len(self.bandhist)-1].time - self.bandhist[0] > self.histtime: self.hist_growed = True self.bandhist.popleft() else: self.bandhist.popleft() self.calc_weights() """ @staticmethod def edges_list_to_dict(edge_list): """ Get dict from the list as it is more comfortable to work with """ res = dict() for edge in edge_list: res[(edge.node_pair[0]),(edge.node_pair[1])] = edge return res class EdgeInfo: """ Class describes the portion of traffic information for the bw_id period """ def __init__(self,value,bw_id): self.value = value #self.time = time self.bw_id = bw_id
{ "repo_name": "ashepelev/TopologyWeigher", "path": "test_framework/Edge.py", "copies": "1", "size": "3075", "license": "apache-2.0", "hash": 451137867327215400, "line_mean": 33.5617977528, "line_max": 128, "alpha_frac": 0.5798373984, "autogenerated": false, "ratio": 3.8198757763975157, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9856648021687715, "avg_score": 0.008613030621960052, "num_lines": 89 }
__author__ = 'ash' from oslo.config import cfg from nova.scheduler import weights from nova.db import api as db_api from nova.openstack.common import log as logging import TopologyWeigher.utils as topoutils from TopologyWeigher.BandwidthHistory import BandwidthHistory as BandwidthHistory from TopologyWeigher.Scheduler import Scheduler as Scheduler topology_weight_opts = [ cfg.IntOpt('topology_statistics_time', default=3600, help='In seconds. For how many time should the scheduler' 'handle the statistics'), cfg.IntOpt('channel_max_bandwidth', default=100, help='In Mbits. The bandwidth of the channels in topology'), cfg.IntOpt('traffic_multiplier', default=10, help='Multiplies the traffic value to handle with normalizing.' 'The more value is - the more scheduler will ignore the disbalance load' 'between nodes'), cfg.BoolOpt('traffic_enable_topology_statistics', default=False, help='Collect the traffic and ping statistics for scheduling'), cfg.StrOpt('topology_description_path', default=None, help='Full path to directory with describing of the topology.' 'The directory should have nodes.yaml and edges.yaml files.') ] CONF = cfg.CONF CONF.register_opts(topology_weight_opts) LOG = logging.getLogger(__name__) class TopologyWeighedObject(weights.WeighedHost): """ Implementing new version of WeighedObject """ def __init__(self, obj, weight): super(TopologyWeighedObject, self).__init__(obj,weight) def set_ip_id(self,ip_addr,id): self.obj.ip = ip_addr def set_id(self,id): self.obj.id = id @staticmethod def to_weight_list(weighed_obj_list,scheduler_dict,node_by_hostname): """ Transform scheduler answer to nova.scheduler standard type :param weighed_obj_list: The input for weigher. Contains the filtered compute-nodes list :param scheduler_dict: The weigher work result :param node_by_hostname: The dict in format <node hostname>:<node object> :return: a list in order of weighed_obj_list contains the weights of the nodes """ weights = [] for obj in weighed_obj_list: weights.append(scheduler_dict[node_by_hostname[obj.obj.host].id]) return weights class TopologyWeigher(weights.BaseHostWeigher): """ Implementing new version of a Weigher """ minval = 0.0 maxval = 1.0 def weight_multiplier(self): return -1.0 # as more traffic and latency is worser # def _weigh_object(self, obj, weight_properties): pass def weigh_objects(self, weighed_obj_list, weight_properties): topology_priority = weight_properties['instance_type'].get('topology_priority',None) if topology_priority is None: return context = weight_properties['context'] enabled_collector = CONF.traffic_enable_topology_statistics topology_path = CONF.topology_description_path if not enabled_collector: LOG.error("TopologyWeigher won't worked: the traffic collector is disabled in nova.conf") return if topology_path is None: if not topoutils.only_check_db(context): LOG.error("TopologyPath doesn't specified in nova.conf and no topology description in db") # loading scheduler args of priority # loaded from the weigher input - weight_properties # Transform the priority input string to the Task object task = topoutils.task_from_conf_topopriority(topology_priority) # The time before now we need the statistics topology_statistics_time = CONF.topology_statistics_time # Maximum bandwidth of channels in topology # TODO specifiying individual bandwidth for each channel max_bandw = CONF.channel_max_bandwidth # Multiplier for traffic value # The bigger it is - the more scheduler will ignore the compute nodes' resources disbalance traf_mult = CONF.traffic_multiplier # Get from the db the average traffic for last topology_statistics_time seconds traffic_info = db_api.traffic_get_avg(context, topology_statistics_time) # Get the topology description information (self.node_list,self.edge_list) = topoutils.get_nodes_and_edges(context) # Creating the object to aggregate the statistics bw_hist_traffic = BandwidthHistory(self.node_list,self.edge_list) # Iterate through the traffic load information # And aggregate information on the edges for tr in traffic_info: (val,src,dst) = tr src = int(src) dst = int(dst) # val*8 = value from byte - to bits/s # *traf_mult - multiplies - regulates how scheduler will ignore the disbalance load on the nodes # max_bandw*1024*1024 = to bit/s # /(max_bandw*1024*1024) = normalize based on the maximum bandwidth val = (val*8*traf_mult)/(max_bandw*1024*1024) bw_hist_traffic.append_traffic((src,dst),val,0) # Get the distance matrix, based on the traffic dist = Scheduler.build_distances(bw_hist_traffic) # Get the dict <hostname>:<node object> node_list_by_hostname = topoutils.list_to_endpoints_dict(self.node_list) # The weigher works and result the weights of the nodes weights = Scheduler.schedule(dist,task,self.node_list) # Transforming weigher result to the one accepted by nova.scheduler return TopologyWeighedObject.to_weight_list(weighed_obj_list,weights,node_list_by_hostname)
{ "repo_name": "ashepelev/TopologyWeigher", "path": "source/topology_weigher/topology.py", "copies": "1", "size": "5922", "license": "apache-2.0", "hash": -4144208654003840500, "line_mean": 41.3, "line_max": 108, "alpha_frac": 0.6501182033, "autogenerated": false, "ratio": 4.23, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5380118203300001, "avg_score": null, "num_lines": null }
__author__ = 'ash' import networkx as nx import matplotlib.pyplot as plt class GraphDrawer: def __init__(self, node_list, edges_list): self.nodes = node_list self.edges = edges_list def get_edges(self): """ Extracts pairs of nodes from the Edge objects """ edges = [] for edge in self.edges: edges.append(edge.node_pair) return edges def get_labels(self): """ Getting the labels for nodes on the graph """ labels = {} i = 0 for node in self.nodes: try: labels[i] = str(node.id) + " " + node.hostname except AttributeError: labels[i] = "Switch id " + str(node.id) i += 1 return labels def draw_graph(self, graph, labels=None, graph_layout='shell', node_size=1600, node_color='blue', node_alpha=0.3, node_text_size=12, edge_color='blue', edge_alpha=0.3, edge_tickness=1, edge_text_pos=0.3, text_font='sans-serif',draw_bandwidth='max'): # create networkx graph G=nx.Graph() # add edges for edge in graph: G.add_edge(edge[0], edge[1]) # these are different layouts for the network you may try # spring seems to work best if graph_layout == 'spring': graph_pos=nx.spring_layout(G) elif graph_layout == 'spectral': graph_pos=nx.spectral_layout(G) elif graph_layout == 'random': graph_pos=nx.random_layout(G) else: graph_pos=nx.shell_layout(G) # draw graph nx.draw_networkx_nodes(G,graph_pos,node_size=node_size, alpha=node_alpha, node_color=node_color) nx.draw_networkx_edges(G,graph_pos,width=edge_tickness, alpha=edge_alpha,edge_color=edge_color) nx.draw_networkx_labels(G, graph_pos,labels,font_size=node_text_size, font_family=text_font) # if labels is None: # labels = range(len(graph)) edge_labels = [] if draw_bandwidth == 'max': for edge in self.edges: edge_labels.append(edge.maxb) elif draw_bandwidth == 'avg': for edge in self.edges: edge_labels.append(edge.avgbw) else: print "Wrong draw_bandwidth type param" edge_labels = dict(zip(graph, edge_labels)) nx.draw_networkx_edge_labels(G, graph_pos, edge_labels=edge_labels, label_pos=edge_text_pos) # show graph plt.show()
{ "repo_name": "ashepelev/TopologyWeigher", "path": "test_framework/GraphDrawer.py", "copies": "1", "size": "2735", "license": "apache-2.0", "hash": -4982703674246230000, "line_mean": 30.4482758621, "line_max": 77, "alpha_frac": 0.526142596, "autogenerated": false, "ratio": 3.8684582743988685, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4894600870398868, "avg_score": null, "num_lines": null }
__author__ = 'ash' import Node import Edge import YamlDoc from Scheduler import Task import socket import fcntl from struct import * from nova import db from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) def get_topology(path=None,nodes_file = "nodes.yaml", edges_file = "edges.yaml"): """ Gets the information about topology from the .yaml files :param path: directory path :param nodes_file: Not-strict yaml file with nodes :param edges_file: Not-strict yaml file with edges :return: pair with node list and edge list """ if path==None: LOG.error("No topology path specified for TopologyWeigher") return n_path = path + "/" yd = YamlDoc.YamlDoc(n_path + nodes_file,n_path+edges_file) return (yd.node_list,yd.edge_list) def list_to_endpoints_dict(node_list): """ Transforms node list to dict <hostname>:<node object> """ node_dict = {} for x in node_list: if isinstance(x,Node.ComputeNode): node_dict[x.hostname] = x return node_dict def get_node_dict_by_id(node_list): """ Transforms node list to dict <node_id>:<node object> """ node_dict = dict() for x in node_list: # As every node is a child of Node.Endpoint if isinstance(x,Node.Endpoint): node_dict[x.id] = x return node_dict def get_node_dict(node_list): """ Transforms node list to dict <node ip addr>:<node id> """ node_dict = dict() for x in node_list: # As Switch doesn't have ip addr if not isinstance(x, Node.Switch): node_dict[x.ip_addr] = x.id return node_dict def get_router_id_ip(node_list): """ Gets the router ip and id """ for x in node_list: if isinstance(x, Node.Router): return (x.id, x.ip) LOG.error("TopologyWeigher: No router found") def get_hosts_id(src_ip,dst_ip,node_dict,router_id): """ Transforms packet's src/dst IPs to node's ids Checking if src or dst IP is external """ if src_ip not in node_dict: src_id = router_id else: src_id = node_dict[src_ip] if dst_ip not in node_dict: dst_id = router_id else: dst_id = node_dict[dst_ip] return (src_id,dst_id) def get_my_id(node_dict,my_ip): """ Returns id of this node """ return node_dict[my_ip] def get_ip_address(ifname): """ Gets the ip address of the interface """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, # SIOCGIFADDR pack('256s', ifname[:15]) )[20:24]) def task_from_conf_topopriority(topology_priority): # Constructs a task from the string given at the booting # The string came with filter_properties priors = topology_priority.split(',') prior_list = [] for prior in priors: prior_pare = prior.split(':') node = int(prior_pare[0]) priority = int(prior_pare[1]) prior_list.append((node,priority)) return Task(prior_list) def get_nodes_and_edges(context): """ Gets the description of nodes and edges """ nodes = db.node_get(context) edges = db.edge_get(context) node_list = [] edges_list = [] for node in nodes: node_list.append(create_node_from_db(node)) for edge in edges: edges_list.append(create_edge_from_db(edge)) return (node_list,edges_list) def create_node_from_db(node): """ Constructs Node objects from the db result """ if node.name == "Switch": return Node.Switch(long(node.node_id)) if node.name == "Router": return Node.Router(long(node.node_id),str(node.ip_addr)) if node.name == "CloudController": return Node.CloudController(long(node.node_id),str(node.ip_addr),str(node.hostname)) if node.name == "ComputeNode": return Node.ComputeNode(long(node.node_id),str(node.ip_addr),str(node.hostname)) def create_edge_from_db(edge): """ Construct Edge object from the db result """ return Edge.Edge((int(edge.start),int(edge.end))) def only_check_db(ctxt): node_count = db.check_node(ctxt) node_count = int(node_count[0]) edge_count = db.check_edge(ctxt) edge_count = int(edge_count[0]) return not ((node_count == 0) or (edge_count == 0)) def only_load_db(ctxt, topology_description_path): (node_list,edge_list) = get_topology(topology_description_path) for node in node_list: resources = {} resources['node_id'] = node.id resources['name'] = get_node_type(node) if not isinstance(node, Node.Switch): resources['ip_addr'] = node.ip_addr if isinstance(node, Node.Endpoint): resources['hostname'] = node.hostname db.node_add(ctxt, resources) for edge in edge_list: resources = {} resources['start'] = edge.node_pair[0] resources['end'] = edge.node_pair[1] db.edge_add(ctxt, resources) def check_and_set_topology(ctxt, topology_description_path): """ After nova-scheduler stated If the there is no description of topology in db - it loads The topology from local yaml files and load them to db """ node_count = db.check_node(ctxt) node_count = int(node_count[0]) edge_count = db.check_edge(ctxt) edge_count = int(edge_count[0]) if node_count == 0 or edge_count == 0: LOG.debug("TopologyWeigher: There is no topology description in db. Loading from local files...") (node_list,edge_list) = get_topology(topology_description_path) else: return for node in node_list: resources = {} resources['node_id'] = node.id resources['name'] = get_node_type(node) if not isinstance(node, Node.Switch): resources['ip_addr'] = node.ip_addr if isinstance(node, Node.Endpoint): resources['hostname'] = node.hostname db.node_add(ctxt, resources) for edge in edge_list: resources = {} resources['start'] = edge.node_pair[0] resources['end'] = edge.node_pair[1] db.edge_add(ctxt, resources) def get_node_type(node): if isinstance(node,Node.Switch): return "Switch" if isinstance(node,Node.Router): return "Router" if isinstance(node,Node.ComputeNode): return "ComputeNode" if isinstance(node,Node.CloudController): return "CloudController"
{ "repo_name": "ashepelev/TopologyWeigher", "path": "source/topology_weigher/TopologyWeigher/utils.py", "copies": "1", "size": "6556", "license": "apache-2.0", "hash": -7708099360426881000, "line_mean": 29.4930232558, "line_max": 105, "alpha_frac": 0.6191275168, "autogenerated": false, "ratio": 3.470619375330863, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.45897468921308626, "avg_score": null, "num_lines": null }
__author__ = 'ash' import Node import YamlDoc import socket import fcntl from struct import * from time import sleep from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) def get_topology(path=None,nodes_file = "nodes.yaml", edges_file = "edges.yaml"): """ Gets the information about topology from the .yaml files :param path: directory path :param nodes_file: Not-strict yaml file with nodes :param edges_file: Not-strict yaml file with edges :return: pair with node list and edge list """ if path==None: LOG.error("No topology path specified for TopologyWeigher") return n_path = path + "/" yd = YamlDoc.YamlDoc(n_path + nodes_file,n_path+edges_file) return (yd.node_list,yd.edge_list) def list_to_endpoints_dict(node_list): """ Transforms node list to dict <hostname>:<node object> """ node_dict = {} for x in node_list: if isinstance(x,Node.ComputeNode): node_dict[x.hostname] = x return node_dict def get_node_dict_by_id(node_list): """ Transforms node list to dict <node_id>:<node object> """ node_dict = dict() for x in node_list: if isinstance(x,Node.Endpoint): node_dict[x.id] = x return node_dict def get_node_dict(node_list): """ Transforms node list to dict <node ip addr>:<node id> """ node_dict = dict() for x in node_list: if not isinstance(x, Node.Switch): node_dict[x.ip_addr] = x.id return node_dict def get_router_id_ip(node_list): """ Gets the router ip and id """ for x in node_list: if isinstance(x, Node.Router): return (x.id, x.ip_addr) LOG.error("Traffic-Monitor: No router found") def get_hosts_id(src_ip,dst_ip,node_dict,router_id): """ Transforms packet's src/dst IPs to node's ids Checking if src or dst IP is external """ if src_ip not in node_dict: src_id = router_id else: src_id = node_dict[src_ip] if dst_ip not in node_dict: dst_id = router_id else: dst_id = node_dict[dst_ip] return (src_id,dst_id) def get_my_id(node_dict,my_ip): """ Returns id of this node """ return node_dict[my_ip] def get_ip_address(if_name): """ Gets the ip address of the interface """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, # SIOCGIFADDR pack('256s', if_name[:15]) )[20:24]) def cycle_conductor_call(func,args): """ Hack function for conductor calls. Tries to call function each second until done """ while 1: try: result = func(args) return result except: # Obviosly remote error - NoSuchMethod - the magic or RPC to not work sometimes sleep(1) def cycle_conductor_add(func,context,values): """ Hack function for conductor calls. Tries to call function each second until done """ while 1: try: result = func(context,values) return result except: # Obviosly remote error - NoSuchMethod - the magic or RPC to not work sometimes sleep(1) def get_nodes(context,api): """ Function gets topology description via nova-conductor :param context: security context :param api: conductor API :return: node list """ # Get the count of the nodes in db node_count = api.check_node(context)#cycle_conductor_call(api.check_node,context) node_count = int(node_count[0]) if node_count == 0: # scheduler haven't yet started and didn't set the db tables with topology info sleep(5) # we will wait some time node_count = api.check_node(context)#cycle_conductor_call(api.check_node,context) node_count = int(node_count[0]) # That means that the scheduler haven't started # Possible case - launching this service standalone if node_count == 0: LOG.error("Traffic statistics cannot be launched: there is no info about topology in db. Check that nova-scheduler is launched") return False nodes = api.node_get(context)#cycle_conductor_call(api.node_get,context) node_list = [] for node in nodes: node_list.append(create_node_from_db(node)) return node_list def create_node_from_db(node): """ Based on the data from db construct instances of Node classes """ if node['name'] == "Switch": return Node.Switch(int(node['node_id'])) if node['name'] == "Router": return Node.Router(int(node['node_id']),str(node['ip_addr'])) if node['name'] == "CloudController": return Node.CloudController(int(node['node_id']),str(node['ip_addr']),str(node['hostname'])) if node['name'] == "ComputeNode": return Node.ComputeNode(int(node['node_id']),str(node['ip_addr']),str(node['hostname']))
{ "repo_name": "ashepelev/TopologyWeigher", "path": "source/traffic_monitor/TopologyWeigher/utils.py", "copies": "1", "size": "4966", "license": "apache-2.0", "hash": -3655082678123608600, "line_mean": 30.0375, "line_max": 136, "alpha_frac": 0.625453081, "autogenerated": false, "ratio": 3.547142857142857, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4672595938142857, "avg_score": null, "num_lines": null }
__author__ = 'ash' import numpy as np from sets import Set import sys #import pulp import Node class Task: def __init__(self,vm_dep_list,storage_priority,public_priority): self.vm_dep_list = vm_dep_list self.storage_priority = storage_priority self.public_priority = public_priority @staticmethod def example_task(): vm_dep_list = [(7,3),(6,4),(13,5),(9,4)] # list of dependencies. format: (<compute_node_with_vm_id>,<priority>) storage_priority = 4 public_priority = 4 task = Task(vm_dep_list,storage_priority,public_priority) return task class Scheduler: def __init__(self,node_list,edges_list): self.node_list = node_list self.edge_list = edges_list self.dim = len(node_list) self.infinity = 10000 self.undefined = -1 def make_adjacency_matrix(self): # matrix = np.matrix(np.zeros((self.dim,self.dim),dtype=np.int)) matrix = [[self.infinity for x in xrange(self.dim)] for y in xrange(self.dim)] # test = matrix[0][1] for edge in self.edge_list: i,j = edge.node_pair test = matrix[i][j] matrix[i][j] = int(1) matrix[j][i] = int(1) return matrix def min_distance(self,dist,q): """ Finds in dist minimal distance with indexes from the queue q """ min = sys.maxint minind = -1 for elem in q: if (dist[elem] < min): min = dist[elem] minind = elem return minind def dijkstra(self,matrix,src): """ Standard Dijkstra algorithm. For source finds shortest pathes to every other node. """ dist = [self.infinity for x in xrange(self.dim)] previous = [self.undefined for x in xrange(self.dim)] route_list = [[] for x in xrange(self.dim)] dist[src] = 0 # previous[src] = src q = Set() for i in range(0,self.dim): q.add(i) while (len(q) > 0): if (len(q) == self.dim): u = src else: u = self.min_distance(dist,q) q.remove(u) target = u path_node = u while previous[path_node] != self.undefined: route_list[target].append(path_node) path_node = previous[path_node] route_list[target].append(src) route_list[target].reverse() # as we aggregate it reverse for j in range(0,self.dim): if j == u: continue alt = dist[u] + matrix[u][j] if alt < dist[j]: dist[j] = alt previous[j] = u return (dist,route_list) def calc_routes(self): """ With dijkstra algorithm builds the route matrix in the whole topology """ matrix = self.make_adjacency_matrix() route_matrix = [] #np.matrix((self.dim,self.dim),dtype=Route) for i in range(0,self.dim): #previous = np.zeros((1,self.dim),dtype=np.int) (dist, route_list) = self.dijkstra(matrix,i) # print previous route_matrix.append([]) for j in range(0,self.dim): rt = Route(dist[j],route_list[j]) route_matrix[i].append(rt) return route_matrix @staticmethod def build_distances(bw_hist): """ Takes the information about the weights on edges and builds the matrix of distances between nodes. """ # assuming that edge_list has changed after TrafficGen route_matrix = bw_hist.route_matrix edge_dict = bw_hist.edge_dict dim = len(route_matrix) dist = [[0 for x in range(0,dim)] for y in range(0,dim)] for i in range(0,dim): for j in range(0,dim): route = route_matrix[i][j].route route_sum = 0 for k in range(0,len(route)-1): (v1,v2) = (route[k],route[k+1]) if edge_dict.has_key((v1,v2)): route_sum += edge_dict[(v1,v2)].avgbw else: route_sum += edge_dict[(v2,v1)].avgbw dist[i][j] = route_sum return dist @staticmethod def prepare_priority_list(task,node_list): """ Takes the information about the task And constructs the list of pairs : (<node>,<priority>) """ # construct (<storage>,<priority> list) st_dep_list = [] for x in node_list: if type(x) is Node.Storage: st_dep_list.append((x.id,task.storage_priority)) # construct public priority list pub_dep_list = [] for x in node_list: if type(x) is Node.NetworkNode: pub_dep_list.append((x.id,task.public_priority)) # append to vm dep_list priorities = [] priorities.extend(task.vm_dep_list) priorities.extend(st_dep_list) priorities.extend(pub_dep_list) return priorities @staticmethod def schedule(dist,task,node_list): """ Simple scheduler. For every appropriate node (Compute node) finds the sum to the prior nodes """ priorities = Scheduler.prepare_priority_list(task,node_list) min_dist = sys.maxint min_glob = sys.maxint min_id = -1 for node in node_list: if not isinstance(node,Node.ComputeNode): continue max_route = 0 for prior in priorities: traf = dist[node.id][prior[0]]*prior[1] if traf > max_route: # We are searching for maximum traffic on route link max_route = traf if max_route < min_glob: min_glob = max_route min_id = node.id return min_id def print_route(self, route_matrix): for i in range(0,self.dim): for j in range(0,self.dim): sys.stdout.write("From " + str(i) + " to " + str(j) + " dist " + str(route_matrix[i][j].dist) + " Route: ") print route_matrix[i][j].route class Route: def __init__(self,dist,route): self.dist = dist self.route = route
{ "repo_name": "ashepelev/TopologyWeigher", "path": "test_framework/Scheduler.py", "copies": "1", "size": "6401", "license": "apache-2.0", "hash": 3921595605400946000, "line_mean": 32.3385416667, "line_max": 123, "alpha_frac": 0.5267926886, "autogenerated": false, "ratio": 3.80785246876859, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.483464515736859, "avg_score": null, "num_lines": null }
__author__ = 'ash' import random import time class TrafficGen: def __init__(self, node_list, bwhist): self.node_list = node_list #self.start = time.clock() self.traffic = dict() self.bw_hist = bwhist self.bw_refresh = 2 self.bw_id = 0 # this field is required for recording simultaneous channel usage def generator(self): """ Generating traffic - packets width dst, src and len Every 2 seconds dumps the aggregated info to calculate the bandwidth """ self.start = time.clock() worktime = time.clock() while True: if time.clock() - worktime > 10: # for tests. Get stat for 10 seconds break; rand = random.randint(0,5000) # there is a chance for packet to appear if rand == 0: capt_time = time.clock() if capt_time - self.start > self.bw_refresh: # as the refresh time elapsed self.process_bandwidth(capt_time) # we calculate the bandwidth for this period self.bw_id += 1 # next will be sniffed traffic for new period self.start = capt_time # reset the start time self.traffic.clear() # clear the current traffic information container (src,dst) = self.example_load() if src == dst: # if src and dst are equal - we inc with mod dst += 1 dst % len(self.node_list) length = random.randint(500,1500) pk = Packet(src,dst,length) if (src,dst) not in self.traffic: self.traffic[(src,dst)] = 0 self.traffic[(src,dst)] += length # accumulate the length of packets in our history dict def process_bandwidth(self,capt_time): #os.system('clear') print "Bandwidth refresh " + str(self.bw_refresh) + " seconds" for k in self.traffic.keys(): bandwidth = self.traffic[k] / (capt_time - self.start) # simple bandwidth calculate formula. capt_time is the last time record in this period (src,dst) = k self.bw_hist.append((src,dst),bandwidth,self.bw_id) # Give command to append the traffic info #sys.stdout.write(str(src) + " > " + str(dst) + "\t\t" + str(bandwidth) + "\n") def example_load(self): """ 9 to 3 - heavy 8 to 10 - heavy 0 to * - low """ nodelen = len(self.node_list) rand = random.randint(0,nodelen*3) if rand > 2*nodelen: (src,dst) = (9,3) elif rand <= 2*nodelen and rand >= nodelen: (src,dst) = (8,10) else: (src,dst) = (0,random.randint(0,nodelen-1)) return (src,dst) class Packet: """ Class describes the packet info """ def __init__(self, src, dst, length): self.src = src self.dst = dst self.len = length
{ "repo_name": "ashepelev/TopologyWeigher", "path": "test_framework/TrafficGen.py", "copies": "1", "size": "2990", "license": "apache-2.0", "hash": 5452045898563247000, "line_mean": 36.8607594937, "line_max": 153, "alpha_frac": 0.5444816054, "autogenerated": false, "ratio": 3.9707835325365206, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.501526513793652, "avg_score": null, "num_lines": null }
__author__ = 'ash' import Scheduler import Edge class BandwidthHistory: def __init__(self,node_list,edge_list): #self.hist = dict() sched = Scheduler.Scheduler(node_list, edge_list) self.route_matrix = sched.calc_routes() for x in edge_list: # initiate weights with default values x.init_weights() self.edge_dict = Edge.Edge.edges_list_to_dict(edge_list) def append(self,pair,value,bw_id): """ Mapping the traffic data of the route to the edges (channels) of the route """ (src,dst) = pair route = self.route_matrix[src][dst].route ei = Edge.EdgeInfo(value,bw_id) for i in range(0,len(route)-1): # iterate through route from src to dst (v1,v2) = (route[i],route[i+1]) if self.edge_dict.has_key((v1,v2)): # cause we might keep them vice versa self.edge_dict[(v1,v2)].append_bandwidth(ei) # accumulate info on the edges else: self.edge_dict[(v2,v1)].append_bandwidth(ei)
{ "repo_name": "ashepelev/TopologyWeigher", "path": "test_framework/BandwidthHistory.py", "copies": "1", "size": "1066", "license": "apache-2.0", "hash": -2486157133299029000, "line_mean": 27.8108108108, "line_max": 91, "alpha_frac": 0.5900562852, "autogenerated": false, "ratio": 3.472312703583062, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9361917437709865, "avg_score": 0.04009031021463946, "num_lines": 37 }
__author__ = 'ash' class Node: """ Parent for all topology classes """ def __init__(self, vid): self.id = vid # characterized only by id # def add_neighbours_by_one(self, n): # self.neighbours.append(n) # def set_neighbours(self, neigh_list): # self.neighbours = neigh_list class Switch(Node): """ Characterizes only by id. """ def __init__(self, vid): self.id = vid class Router(Node): def __init__(self,vid,ip_addr): self.id = vid self.ip_addr = ip_addr # we add hostname (or can add ip) def check_ip(self, ipa): octets = ipa.split('.') if len(octets) != 4: print "IPv4 must have 4 octets" return False i = 0 while i < len(octets): oc = int(octets[i]) if oc < 0 or oc > 255: print "An octet must be in [0,255]" return False i+=1 return True def assign_ip(self,ipa): # to write the ip checker if self.check_ip(ipa): self.ip_addr = ipa # def add_neighbours(self, n): # if len(self.neighbours > 0): # print "No more than 1 neighbours" # return # self.neighbours.append(n) class Endpoint(Node): """ Endpoint is the parent class for all service nodes of OpenStack """ def __init__(self,vid,ip_addr,hostname): self.id = vid self.ip_addr = ip_addr self.hostname = hostname def assign_ip(self,ipa): self.ip_addr = ipa def assign_hostname(self, hn): self.hostname = hn class ComputeNode(Endpoint): pass class Storage(Endpoint): pass class NetworkNode(Endpoint): pass class CloudController(Endpoint): pass
{ "repo_name": "ashepelev/TopologyWeigher", "path": "source/topology_weigher/TopologyWeigher/Node.py", "copies": "1", "size": "1787", "license": "apache-2.0", "hash": -180052391738404960, "line_mean": 20.0235294118, "line_max": 67, "alpha_frac": 0.5478455512, "autogenerated": false, "ratio": 3.490234375, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9493832140049, "avg_score": 0.008849557230200011, "num_lines": 85 }
__author__ = 'Ashoo' import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import scipy.stats import warnings sns.set(color_codes=True) # Reading the data where low_memory=False increases the program efficiency data= pd.read_csv("gapminder.csv", low_memory=False) # setting variables that you will be working with to numeric data['breastcancerper100th']= data['breastcancerper100th'].convert_objects(convert_numeric=True) data['femaleemployrate']= data['femaleemployrate'].convert_objects(convert_numeric=True) data['alcconsumption']= data['alcconsumption'].convert_objects(convert_numeric=True) #print "Showing missing data coulmn-wise" #print data.isnull().sum() # Create a copy of the original dataset as sub5 by using the copy() method sub5=data.copy() # Since the data is all continuous variables therefore the use the mean() for missing value imputation sub5.fillna(sub5['breastcancerper100th'].mean(), inplace=True) sub5.fillna(sub5['femaleemployrate'].mean(), inplace=True) sub5.fillna(sub5['alcconsumption'].mean(), inplace=True) # Showing the count of null values after imputation #print sub5.isnull().sum() # categorize quantitative variable based on customized splits using the cut function sub5['alco']=pd.qcut(sub5.alcconsumption,6,labels=["0","1-4","5-9","10-14","15-19","20-24"]) sub5['brst']=pd.qcut(sub5.breastcancerper100th,5,labels=["1-20","21-40","41-60","61-80","81-90"]) # Converting response variable to categorical sub5['brst']=sub5['brst'].astype('category') # Cross tabulating the response variable with explantory variable ct1=pd.crosstab(sub5['brst'],sub5['alco']) #ct1=pd.crosstab(sub5['alco'],sub5['brst']) print "Contigency Table" print ct1 print "\n\n" # the axis=0 statement tells python to sum all the values in each column in python colsum=ct1.sum(axis=0) colpct=ct1/colsum print(colpct) # Chi-Square print('\n\nChi-square value, p value, expected counts') cs1=scipy.stats.chi2_contingency(ct1) print(cs1) sub5['brst']=sub5['brst'].astype('category') sub5['alco']=sub5['alco'].convert_objects(convert_numeric=True) #sns.factorplot(x='alcconsumption', y='breastcancerper100th', data=sub5, kind="bar", ci=None) sns.factorplot(x='alco', y='brst', data=sub5, kind="bar",ci=None) plt.xlabel("Alcohol consumption in Liters") plt.ylabel("Breast Cancer cases per 100th women") # ==================================================== # POST HOC COMPARISON TEST recode2={1-20:1,21-40:2} sub5['COMP1v2']=sub5['brst'].map(recode2) ct2=pd.crosstab(sub5['brst'],sub5['COMP1v2']) print "Contigency Table -2\n" print ct2 print "\n\n" # the axis=0 statement tells python to sum all the values in each column in python colsum=ct2.sum(axis=0) colpct=ct2/colsum print(colpct) # Chi-Square print('\n\nChi-square value, p value, expected counts') cs2=scipy.stats.chi2_contingency(ct2) print(cs2) ####################################################### recode3={41-60:3,61-80:4} sub5['COMP1v3']=sub5['alco'].map(recode3) ct3=pd.crosstab(sub5['brst'],sub5['COMP1v3']) print "Contigency Table - 3\n" print ct3 print "\n\n" # the axis=0 statement tells python to sum all the values in each column in python colsum=ct3.sum(axis=0) colpct=ct3/colsum print(colpct) # Chi-Square print('\n\nChi-square value, p value, expected counts') cs3=scipy.stats.chi2_contingency(ct3) print(cs3)
{ "repo_name": "duttashi/Data-Analysis-Visualization", "path": "scripts/general/chiSquareTest.py", "copies": "1", "size": "3347", "license": "mit", "hash": 9043964226844690000, "line_mean": 32.8080808081, "line_max": 102, "alpha_frac": 0.7242306543, "autogenerated": false, "ratio": 2.8364406779661016, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.40606713322661014, "avg_score": null, "num_lines": null }
__author__ = 'ashwin' __email__ = 'gashwin1@umbc.edu' """ All Test Code. """ from lib.models.classify import NaiveBayes data_sep = "," elim_var = ['$continuous$'] def test_naive_bayes(train_file_reader, test_file_reader): # Create a Bernoulli NB. naive_bayes = NaiveBayes() # Vectorize the training data for Naive Bayes. [train_ft_data, train_cl_data] = naive_bayes.vectorize_data(train_file_reader.file_feature_data, train_file_reader.file_class_result) # Train the model. model = naive_bayes.train_model(train_ft_data, train_cl_data) # Vectorize the test data. [test_ft_data, test_cl_data] = naive_bayes.vectorize_data(test_file_reader.file_feature_data, test_file_reader.file_class_result, False) # Test the data. test_results = model.predict(test_ft_data) score = naive_bayes.get_accuracy_score(test_cl_data, test_results) return score def test_nb_cross_product(train_file_reader, test_file_reader, cross_prod_columns): # Create a Bernoulli NB. naive_bayes = NaiveBayes() # Create a Cross-product between two columns. cross_train_ft_data = train_file_reader.cross_prod_var(train_file_reader.file_feature_data, cross_prod_columns) # Vectorize the training data for Naive Bayes. [train_ft_data, train_cl_data] = naive_bayes.vectorize_data(cross_train_ft_data, train_file_reader.file_class_result) # Train the model. model = naive_bayes.train_model(train_ft_data, train_cl_data) cross_test_ft_data = test_file_reader.cross_prod_var(test_file_reader.file_feature_data, cross_prod_columns) # Vectorize the test data. [test_ft_data, test_cl_data] = naive_bayes.vectorize_data(cross_test_ft_data, test_file_reader.file_class_result, False) # Test the data. test_results = model.predict(test_ft_data) score = naive_bayes.get_accuracy_score(test_cl_data, test_results) return score
{ "repo_name": "codehacken/Kb4ML", "path": "lib/test.py", "copies": "1", "size": "2393", "license": "mit", "hash": -4860071327679453000, "line_mean": 38.2295081967, "line_max": 100, "alpha_frac": 0.5620559967, "autogenerated": false, "ratio": 3.733229329173167, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4795285325873167, "avg_score": null, "num_lines": null }
__author__ = 'Ashwin' __email__ = 'gashwin1@umbc.edu' """ Basic LDA module that is used in the project. """ import re from gensim import corpora, models import operator class LDAVisualModel: def __init__(self, word_corpus): """ The LDAVisualModel requires list of word lists from the document corpus. Each list of words represents a document. :param word_corpus: [[<words>],[],[]] """ self.id2word = corpora.Dictionary(word_corpus) self.mm = [] self.lda = None def create_word_corpus(self, word_corpus, store_corpus=False, store_loc='dicts/corpus.mm'): """ :param word_corpus: word_corpus: [[<words>],[],[]] :param store_corpus: boolean to store the serialized corpus or not. :param store_loc: Defines the location where the file is to be stored. """ for text in word_corpus: self.mm.append(self.id2word.doc2bow(text)) if store_corpus: corpora.MmCorpus.serialize(store_loc, word_corpus) def train_lda(self, num_top=2, update_t=1, chunks=10000, num_pass=1): """ :param num_top: The number of topics for which LDA trains. :param update_t: :param chunks: :param num_pass: The number of passes that LDA executes on the data. """ self.lda = models.LdaModel(corpus=self.mm, id2word=self.id2word, num_topics=num_top, update_every=update_t, chunksize=chunks, passes=num_pass) def get_lda_corpus(self, num_of_topics=10, num_of_words=10): """ Get the topic associated with each document. """ topics = [] if self.lda: for topic in self.lda.print_topics(num_of_topics, num_of_words): regex = re.findall(r'(0\.[0-9]*)\*([0-9a-z]*)', topic, re.M | re.I) topics.append(regex) return topics def generate_doc_topic(self): # Find the number of topics. num_topics = self.lda.num_topics # Build the topic - document matrix. doc_top = [] for idx, doc in enumerate(self.lda[self.mm]): doc_top.append([0] * num_topics) for topic in doc: doc_top[idx][topic[0]] = topic[1] return doc_top def generate_doc_topic_rank(self): # Find the number of topics. num_topics = self.lda.num_topics doc_top_rank = [] # Build the topic - document matrix. for idx, doc in enumerate(self.lda[self.mm]): top_prob = [0] * num_topics top_rank = [0] * num_topics # This constructs the topic probability list. for topic in doc: top_prob[topic[0]] = topic[1] # Construct the ranks. prob_rank = sorted(top_prob, reverse=True) top_sort = sorted(range(len(top_prob)), key=lambda k: top_prob[k], reverse=True) # Create a new list with the ranks. for rank, topic in enumerate(top_sort): if prob_rank[rank] > 0: top_rank[topic] = rank else: top_rank[topic] = num_topics - 1 doc_top_rank.append(top_rank) return doc_top_rank @staticmethod def gen_doc_top_words(topics, doc_top): # This maintains the top words list for each document. doc_to_word = [] # Check the probability of the topic and the word # distribution in it. for doc in doc_top: tmp_word_prob = {} for idx, top_prob in enumerate(doc): if top_prob > 0: for word in topics[idx]: if word not in tmp_word_prob: tmp_word_prob[word[1]] = float(word[0])*top_prob else: tmp_word_prob[word[1]] += float(word[0])*top_prob # Sort the dictionary sorted_word_prob = sorted(tmp_word_prob.items(), key=operator.itemgetter(1), reverse=True) doc_to_word.append(sorted_word_prob) return doc_to_word
{ "repo_name": "codehacken/LDAExplore", "path": "processdata/lda.py", "copies": "1", "size": "4195", "license": "mit", "hash": -1563422881210270200, "line_mean": 33.1056910569, "line_max": 102, "alpha_frac": 0.5463647199, "autogenerated": false, "ratio": 3.7522361359570664, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9796424614280661, "avg_score": 0.0004352483152812379, "num_lines": 123 }
__author__ = 'ashwin' __email__ = 'gashwin1@umbc.edu' """" Implement Standard classifiers. """ # Implementing the std. Naive Bayes Algorithm. # Classification is based on Maximum-Likelihood for selecting the final class. from sklearn.feature_extraction import DictVectorizer from sklearn.naive_bayes import BernoulliNB import numpy as np from sklearn import metrics class NaiveBayes: def __init__(self): self.feature_vector = DictVectorizer(sparse=False) self.class_vector = {} self.nb_model = BernoulliNB() self.class_var = None # This function converts a set of class variable values to integer eq. # This is can then be used as the class ID. def data2vector(self, class_data, reset=False): if reset: self.class_vector = {} # The final vector of integer for the class variables. transform_vector = [] # The conversion is a simple one taking values from 0 to # x where x+1 type of values are there for the class variable. idx = 0 for data_point in class_data: if data_point[data_point.keys()[0]] not in self.class_vector: self.class_vector[data_point[data_point.keys()[0]]] = idx idx += 1 transform_vector.append(self.class_vector[data_point[data_point.keys()[0]]]) return np.array(transform_vector) def vectorize_data(self, file_feature_data, file_class_result, if_train=True): # Vectorize the training data. if if_train: transformed_feature_data = self.feature_vector.fit_transform(file_feature_data) else: transformed_feature_data = self.feature_vector.transform(file_feature_data) # Vectorize the training data results (that is the class results applied to the same set) transformed_class_data = self.data2vector(file_class_result) return [transformed_feature_data, transformed_class_data] def train_model(self, ft_data, cl_data): return self.nb_model.fit(ft_data, cl_data) def predict(self, ft_data): return self.nb_model.predict(ft_data) @staticmethod def get_accuracy_score(train_cl_real, test_cl_predict): return metrics.accuracy_score(train_cl_real, test_cl_predict) """ Sample Code: Using NB Created. Use File Reader to read the file and get the data. file_data is then passed to the Naive Bayes to train the model. naive_b = NaiveBayes(file_reader.col_var, file_reader.class_var) naive_b.train_model(file_data) """ class DiscreteNaiveBayes: def __init__(self, var_names, class_var_name): self.factor_freq = {} self.result_freq = {} self.class_var_name = class_var_name self.__reset__(var_names) # The __reset__ method is used to reset the trained model vector. # This resets all the frequency counts to 0. def __reset__(self, var_names): # Setup the factor_freq mapping. for var in var_names: if var != self.class_var_name: self.factor_freq[var] = {} for var_category in var_names[var]: self.factor_freq[var][var_category] = {} for class_category in var_names[self.class_var_name]: self.factor_freq[var][var_category][class_category] = 0 # Setup for the frequency mapping for the resultant categories. for class_category in var_names[self.class_var_name]: self.result_freq[class_category] = 0 # Create the NB model basing on training data. def train_model(self, training_data): # Create a counter for each combination for a specific class value. # Probability is calculated as a n(X1) / n(S). for data_point in training_data: for data_point_val in data_point: if data_point_val != self.class_var_name: self.factor_freq[data_point_val][data_point[data_point_val]][data_point[self.class_var_name]] += 1 self.result_freq[data_point[self.class_var_name]] += 1 """ TBD: Using the trained model to calculate prob. for the test data. """
{ "repo_name": "codehacken/Kb4ML", "path": "lib/models/classify.py", "copies": "1", "size": "4185", "license": "mit", "hash": -5811608597094135000, "line_mean": 36.3660714286, "line_max": 118, "alpha_frac": 0.6384707288, "autogenerated": false, "ratio": 3.777075812274368, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9900978687755766, "avg_score": 0.002913570663720555, "num_lines": 112 }
__author__ = 'Ashwin' __email__ = 'gashwin1@umbc.edu' """ Perform basic file operations that can be used to feed the corpus into other models such as LDA. The module uses NLTK's english language tokenizer and stop word list to clear the document's and generate a set of tokens. """ ''' Using nltk to clear stopwords from a document. ''' from nltk import word_tokenize, Text from nltk.corpus import stopwords import glob import re import csv regex_clear = "^[a-zA-Z0-9_@]*$" token_list = [] # This is the class to work with CSV files. def read_csv(filename, delimit='"', quote='|'): file_ptr = open(filename, 'r') csv_reader = csv.reader(file_ptr, delimiter=delimit, quotechar=quote) # Use a generator to provide lines. for row in csv_reader: yield row class FileReader: def __init__(self): self.token_list = [] # This is a specific function which reads a single file but treats every line as # document. # E.g. Abstracts from different documents can be treated as a separate document for # each abstract. def read_text_sections(self, filename): tokens = [] file_handle = open(filename, "r") for line in file_handle: try: file_tokens = word_tokenize(line) file_tokenized_text = Text(file_tokens) stop_words = stopwords.words('english') # Clear Stop words in the tokens and special characters. for token in file_tokenized_text: lower_str = token.lower() if lower_str not in stop_words and re.match(regex_clear, lower_str) and len(lower_str) > 2\ and not(lower_str.isdigit()): tokens.append(lower_str) except UnicodeDecodeError: print "Unicode Decode Error: Moving On" if len(tokens) != 0: self.token_list.append(tokens) file_handle.close() def read_file(self, filename): """ This function reads a file and returns a set of tokens back. :param filename: This is name of file to be read. """ tokens = [] file_handle = open(filename, "r") file_text = file_handle.read() # file_text contains the whole file. # This is used because the current file contents are not large # although the number of files are large in number. try: file_tokens = word_tokenize(file_text) file_tokenized_text = Text(file_tokens) stop_words = stopwords.words('english') # Clear Stop words in the tokens and special characters. for token in file_tokenized_text: lower_str = token.lower() if lower_str not in stop_words and re.match(regex_clear, lower_str) and len(lower_str) > 2\ and not(lower_str.isdigit()): tokens.append(lower_str) except UnicodeDecodeError: print "Unicode Decode Error: Moving On" file_handle.close() if len(tokens) != 0: self.token_list.append(tokens) def read_dir(self, file_dir_name): """ This function reads a directory of files and returns a list of token lists. :param file_dir_name: This is the name of the directory. """ files = glob.glob(file_dir_name+"/*") for file_name in files: self.read_file(file_name) def get_token_list(self): return self.token_list # This function is to writes to a CSV file. # The file contains the probability of each topic. def write_prob_to_file(doc_to_word, doc_top, num_of_words, num_topics, filename): # Write the headers for the columns to the CSV. col_string = "name,group," for i in range(0, num_topics - 1): col_string += "T" + str(i) + "," col_string += "T" + str(i) + ",ID\n" # Write the document information to the CSV file. for idx, doc in enumerate(doc_top): col_string += "\"" for i in range(0, num_of_words - 1): col_string += str(doc_to_word[idx][i][0]) + ", " col_string += str(doc_to_word[idx][i+1][0]) + "\"," col_string += "D" + str(idx) for topic in doc: col_string += "," + str(topic) col_string += "," + str(idx) + "\n" with open(filename, "w") as file_handle: file_handle.write(col_string) # This function is to writes to a CSV file. # The file contains the probability of each topic. def write_rank_to_file(doc_to_word, doc_top_rank, num_of_words, num_topics, t_file, d_file): # Write the headers for the columns to the CSV. col_string = "name,group," for i in range(0, num_topics - 1): col_string += "T" + str(i+1) + "," col_string += "T" + str(i+1) + ",ID\n" # Write the document information to the CSV file. csvreader = read_csv(t_file) for idx, doc in enumerate(doc_top_rank): col_string += "\"" for i in range(0, num_of_words - 1): col_string += str(doc_to_word[idx][i][0]) + ", " col_string += str(doc_to_word[idx][i+1][0]) + "\"," # Construct the Ranking for each topic. # Make all the topics that have prob. 0 as the last rank. #col_string += "D" + str(idx+1) col_string += csvreader.next()[0] for topic in doc: col_string += "," + str(topic+1) col_string += "," + str(idx+1) + "\n" # Final writing to the document. with open(d_file, "w") as file_handle: file_handle.write(col_string)
{ "repo_name": "codehacken/LDAExplore", "path": "processdata/fileops.py", "copies": "1", "size": "5671", "license": "mit", "hash": -8919080193711374000, "line_mean": 31.591954023, "line_max": 111, "alpha_frac": 0.5780285664, "autogenerated": false, "ratio": 3.687256176853056, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4765284743253056, "avg_score": null, "num_lines": null }
__author__ = 'ashwin' __email__ = 'gashwin1@umbc.edu' """" Standard File Operations. """ class FileReader: def __init__(self, column_var={}, idx2var=[], class_var_name="Classify"): self.col_var = column_var self.idx2var = idx2var self.class_var = class_var_name # Dispatch table to convert words to idx positions. self.class_pos = { 'last': lambda: len(self.col_var) - 1, 'first': 0 } # Store the file data. self.file_feature_data = None self.file_class_result = None def read_col_var_file(self, filename, var_separator=":", data_separator=","): """ FILE DESIGN: variable name list at the start of the file. <var1>:<value1>,<value2>, <var 2>: <var 3>: :param filename: Name of the file which contains the list of variables. :return: No return value. """ with open(filename, "r") as file_ptr: for line in file_ptr: var_list = line.split("\n")[0].split(var_separator) self.col_var[var_list[0]] = var_list[1].split(data_separator) self.idx2var.append(var_list[0]) def read_data(self, filename, separator=",", var_filter=None, class_result_pos="last"): """ FILE DESIGN: The file is a CSV file which contains data in the following format. <val1>,...,<valN> :param filename: :return: data map. [Row Y{col(x): val(x,y)}] """ # 1. var_filter contains the variable type which are having a $<name>$. # 2. filtered_var_idx is the filtered variable list that contains only the variables that # are to be filtered out. if type(class_result_pos) == str: class_idx = self.class_pos[class_result_pos]() elif type(class_result_pos) == int: class_idx = class_result_pos filtered_var_idx = [] for idx, var in enumerate(self.idx2var): for filter_item in var_filter: if self.col_var[var][0] == filter_item: filtered_var_idx.append(idx) # read and filter the data. file_feature_data = [] file_class_result = [] with open(filename, "r") as file_ptr: for line in file_ptr: col_val_map = {} col_val_list = line.split("\n")[0].split(separator) # Assign the values to each column. for idx in range(0, len(col_val_list) - 1): if not(filtered_var_idx.__contains__(idx)): col_val_map[self.idx2var[idx]] = col_val_list[idx] file_feature_data.append(col_val_map) file_class_result.append({self.class_var: col_val_list[class_idx]}) self.file_feature_data = file_feature_data self.file_class_result = file_class_result # cross_col_list contains the list of column names which need to be concatenated # so that a cross product can be calculated. # The current cross product method works mainly for discrete data. def cross_prod_var(self, file_feature_data, cross_col_list): # Construct the set of cross product columns. # cross_product_col is the name of the new cross product column (AxB). cross_product_col_list = [] cross_prod_col_idx = [] for col_list in cross_col_list: cross_product_col = "_" for col in col_list: cross_product_col += col + "_" cross_prod_col_idx.append(self.idx2var.index(col)) cross_product_col_list.append(cross_product_col) cross_ft_data = [] for data_point in file_feature_data: # Construct the cross product variables. new_data_point = {} for idx, col_list in enumerate(cross_col_list): data_point_cross_prod = "_" for col in col_list: data_point_cross_prod += data_point[col] + "_" # Add the cross product to the new data point. new_data_point[cross_product_col_list[idx]] = data_point_cross_prod # Add the other variables. for var in data_point: if self.idx2var.index(var) not in cross_prod_col_idx: new_data_point[var] = data_point[var] # Add the new data point to the data set. cross_ft_data.append(new_data_point) return cross_ft_data
{ "repo_name": "codehacken/Kb4ML", "path": "lib/stdops/fileops.py", "copies": "1", "size": "4563", "license": "mit", "hash": -4522964104592991700, "line_mean": 35.504, "line_max": 97, "alpha_frac": 0.5559938637, "autogenerated": false, "ratio": 3.796173044925125, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4852166908625125, "avg_score": null, "num_lines": null }
"""Splunk implementation of the DocManager interface. Receives documents from an OplogThread and takes the appropriate actions on Splunk. """ import logging from threading import Timer import bson.json_util from mongo_connector import errors from mongo_connector.constants import (DEFAULT_COMMIT_INTERVAL, DEFAULT_MAX_BULK) from mongo_connector.util import retry_until_ok from mongo_connector.doc_managers import DocManagerBase, exception_wrapper from mongo_connector.doc_managers.formatters import DefaultDocumentFormatter from bson.json_util import dumps import sys, os sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) import splunklib.client as client import splunklib.results as results from time import sleep try: from utils import * except ImportError: raise Exception("Add the SDK repository to your PYTHONPATH to run the examples " "(e.g., export PYTHONPATH=~/splunk-sdk-python.") class DocManager(DocManagerBase): """Splunk implementation of the DocManager interface. Receives documents from an OplogThread and takes the appropriate actions on Splunk. """ def __init__(self, url, auto_commit_interval=DEFAULT_COMMIT_INTERVAL, unique_key='_id', chunk_size=DEFAULT_MAX_BULK, meta_index_name="mongodb_meta", meta_type="mongodb_meta", **kwargs): self.host = url[0] self.port = url[1] self.username = "admin" self.password = "changeme" self.auto_commit_interval = auto_commit_interval self.doc_type = 'string' # default type is string, change if needed self.meta_index_name = meta_index_name self.meta_type = meta_type self.unique_key = unique_key self.chunk_size = chunk_size self._formatter = DefaultDocumentFormatter() def getConnection(self): return client.connect(username=self.username, password=self.password) def stop(self): """Stop the auto-commit thread.""" self.auto_commit_interval = None def apply_update(self, doc, update_spec): if "$set" not in update_spec and "$unset" not in update_spec: # Don't try to add ns and _ts fields back in from doc return update_spec return super(DocManager, self).apply_update(doc, update_spec) def search(self, start_ts, end_ts): """Splunk works on static event which are never get changed, hence no need of taking care of conflicts.""" tmp = [] return tmp; def commit(self): """Refresh all Splunk indexes.""" """Splunk does not need any refresh command""" def get_last_doc(self): """Splunk does not modify any document as it is log analysis tool, hence no documents to be return.""" return None def update(self, doc, update_spec): """Send updated doc to Splunk.""" doc = dict(doc.items() + update_spec.items()) index = doc["ns"] doc["_time"] = doc["_id"].generation_time service = self.getConnection() source = index.split(".") index_name = index.replace("_","-").replace(".","_").lower() # Check index presence if index_name not in service.indexes: service.indexes.create(index_name) # Index the source document index = service.indexes[index_name] with index.attached_socket(sourcetype='json', source=source[0], host="abacus") as sock: sock.send(dumps(doc, sort_keys=True)) print "Updation successful" if not doc: raise errors.EmptyDocsError( "Cannot upsert an empty sequence of " "documents into Splunk") return def upsert(self, doc): """Insert a document into Splunk.""" index = doc["ns"] doc["_time"] = doc["_id"].generation_time service = self.getConnection() source = index.split(".") index_name = index.replace("_","-").replace(".","_").lower() # Check index presence if index_name not in service.indexes: service.indexes.create(index_name) # Index the source document index = service.indexes[index_name] with index.attached_socket(sourcetype='json', source=source[0], host="abacus") as sock: sock.send(dumps(doc, sort_keys=True)) print "Insertion successful" if not doc: raise errors.EmptyDocsError( "Cannot upsert an empty sequence of " "documents into Splunk") return def bulk_upsert(self, docs): """Insert multiple documents into Splunk.""" for doc in docs: index = doc["ns"] doc["_time"] = doc["_id"].generation_time service = self.getConnection() source = index.split(".") index_name = index.replace("_","-").replace(".","_").lower() # Check index presence if index_name not in service.indexes: service.indexes.create(index_name) # Index the source document index = service.indexes[index_name] with index.attached_socket(sourcetype='json', source=source[0], host="abacus") as sock: sock.send(dumps(doc, sort_keys=True)) if not doc: raise errors.EmptyDocsError( "Cannot upsert an empty sequence of " "documents into Splunk") return def remove(self, doc): """Remove a document from Splunk.""" index = doc["ns"] doc_id = doc["_id"] source = index.split(".") index_name = index.replace("_","-").replace(".","_").lower() service = self.getConnection() jobs = service.jobs kwargs_normalsearch = {"exec_mode": "normal"} job = jobs.create("search index="+str(index_name)+" id.$oid="+str(doc_id)+" | delete", **kwargs_normalsearch) while True: job.refresh() stats = {"isDone": job["isDone"], "doneProgress": float(job["doneProgress"])*100} status = ("\r%(doneProgress)03.1f%%") % stats sys.stdout.write(status) sys.stdout.flush() print "hi" if stats["isDone"] == "1": sys.stdout.write("\n\nDone!\n\n") break sleep(1) # Get properties of the job print "Search job properties" print "Search job ID: ", job["sid"] print "Search result: ", job["isDone"] print "Search duration: ", job["runDuration"], "seconds" print "This job expires in: ", job["ttl"], "seconds" print "------------------------------------------\n" print "Search results:\n" print "search index="+str(index_name)+" id.$oid="+str(doc_id)+" | delete" # Index the source document index = service.indexes[index_name] with index.attached_socket(sourcetype='json', source=source[0], host="abacus") as sock: sock.send(dumps(doc, sort_keys=True)) return
{ "repo_name": "asifhj/mongo-connector", "path": "mongo_connector/doc_managers/splunk_doc_manager.py", "copies": "1", "size": "7923", "license": "apache-2.0", "hash": 7844574321847292000, "line_mean": 36.9138755981, "line_max": 117, "alpha_frac": 0.5863940427, "autogenerated": false, "ratio": 4.187632135306554, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5274026178006554, "avg_score": null, "num_lines": null }
__author__ = 'asifj' import logging from kafka import KafkaConsumer from pymongo import MongoClient import re import json import traceback import sys logging.basicConfig( format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s', level=logging.INFO ) DB_VM_MONGO_IP = "10.219.48.134" DB_LOCAL_MONGO_IP = "192.168.56.101" DB_NAME = "SAPNotesTopic" DB_PORT = 27017 KAFKA_IP = "172.22.147.242" KAFAK_PORT = "9020" KAFKA_TOPIC = "SAPNotesTopic" url = "http://172.22.147.248:8092/api/" def drop_database(): client = MongoClient(DB_LOCAL_MONGO_IP, DB_PORT) client.drop_database(DB_NAME) client.close() def upsert_document_debug(coll, doc): client = MongoClient(DB_LOCAL_MONGO_IP , DB_PORT) db = client[DB_NAME] collection = db[coll+"Debug"] post_id = collection.insert(doc[coll]) client.close() return post_id def upsert_document(coll, doc): client = MongoClient(DB_LOCAL_MONGO_IP, DB_PORT) db = client[DB_NAME] collection = db[coll] key = {'caseId': doc[coll]['caseId']} print key doc = doc[coll] post_id = collection.update(key, doc, upsert=True); client.close() return post_id print "Cleaning old Mongo docs..." drop_database() print "Mongo docs cleaned!" # To consume messages consumer = KafkaConsumer(KAFKA_TOPIC, bootstrap_servers=[KAFKA_IP+':9092'], auto_commit_enable=False, auto_offset_reset="smallest") # group_id='CLIEvent-grp', #consumer.configure(bootstrap_servers=['172.22.147.242:9092', '172.22.147.232:9092', '172.22.147.243:9092'], auto_commit_enable=False, auto_offset_reset="smallest") message_no = 1 for message in consumer: # message value is raw byte string -- decode if necessary! # e.g., for unicode: `message.value.decode('utf-8')` #print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition, # message.offset, message.key, # message.value)) topic = message.topic partition = message.partition offset = message.offset key = message.key message = message.value print "=================================================================================================================" if not message is None: try: document = json.loads(message) print "Event Type: "+str(document.keys()) print "Message No: "+str(message_no) collection = document.keys()[0] print "Debug Document ID: "+str(upsert_document_debug(collection, document)) document = json.loads(message) print upsert_document(collection, document) except Exception, err: print "CustomException" print "Kafka Message: "+str(message) print(traceback.format_exc()) print "=================================================================================================================" print "\n" message_no += 1
{ "repo_name": "asifhj/Python_SOAP_OSSJ_SAP_Fusion_Kafka_Spark_HBase", "path": "KafkaConsumerDEV-SAPNotesTopic.py", "copies": "1", "size": "3074", "license": "apache-2.0", "hash": 7600251682130696000, "line_mean": 33.3333333333, "line_max": 164, "alpha_frac": 0.5718932986, "autogenerated": false, "ratio": 3.5995316159250588, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4671424914525059, "avg_score": null, "num_lines": null }
__author__ = 'asifj' import requests from pymongo import MongoClient from bson import Binary, Code import json import csv import traceback import logging from tabulate import tabulate import datetime logging.basicConfig( format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s', level=logging.DEBUG ) class HBase: def __init__(self): self.url = "http://172.22.147.248:8092/api/" pass def get_case_by_case_id(self, document, row): print "API URL: "+self.url+"case-manager/cases/"+str(document['caseId']) r = requests.get(self.url+"case-manager/cases/"+str(document['caseId'])) print "CaseID: "+str(document['caseId']) print "Response: "+str(r.status_code) keys = len(document.keys()) print "Keys: "+str(keys) row.append(r.status_code) status = 0 if r.status_code==200: response = json.loads(r.text) #print json.dumps(response, indent=4) table = [] if not (str(document['betaType']).strip()) == ("" if response['betaType'] is None else str(response['betaType']).strip()): tmp = [str(document['betaType']).strip(), str(response['betaType']).strip()] tmp.append("Incorrect value for 'betaType'!") table.append(tmp) status = 1 if not (str(document['build']).strip()) == ("" if response['outage']['build'] is None else str(response['outage']['build']).strip()): tmp = [str(document['build']).strip(), str(response['outage']['build']).strip()] tmp.append("Incorrect value for 'build'!") table.append(tmp) status = 1 if not (str(document['ccEngineer']).strip()) == ("" if response['outage']['ccEngineer'] is None else str(response['outage']['ccEngineer']).strip()): tmp = [str(document['ccEngineer']).strip(), str(response['outage']['ccEngineer']).strip()] tmp.append("Incorrect value for 'ccEngineer'!") table.append(tmp) status = 1 if not (str(document['caseId']).strip()) == ("" if response['srId'] is None else str(response['srId']).strip()): tmp = [str(document['caseId']).strip(), str(response['srId']).strip()] tmp.append("Incorrect value for 'caseId'!") table.append(tmp) status = 1 if not (str(document['contractId']).strip()) == ("" if response['entitlement']['contractId'] is None else str(response['entitlement']['contractId']).strip()): tmp = [str(document['contractId']).strip(), str(response['entitlement']['contractId']).strip()] tmp.append("Incorrect value for 'contractId'!") table.append(tmp) status = 1 if not (str(document['contractStatus']).strip()) == ("" if response['entitlement']['contractStatus'] is None else str(response['entitlement']['contractStatus']).strip()): tmp = [str(document['contractStatus']).strip(), str(response['entitlement']['contractStatus']).strip()] tmp.append("Incorrect value for 'contractStatus'!") table.append(tmp) status = 1 if not (str(document['country']).strip()) == ("" if response['outage']['country'] is None else str(response['outage']['country']).strip()): tmp = [str(document['country']).strip(), str(response['outage']['country']).strip()] tmp.append("Incorrect value for 'country'!") table.append(tmp) status = 1 if not (str(document['courtesyDescription']).strip()) == ("" if response['courtesy'] is None else str(response['courtesy']).strip()): tmp = [str(document['courtesyDescription']).strip(), str(response['courtesy']).strip()] tmp.append("Incorrect value for 'courtesyDescription/courtesy'!") table.append(tmp) status = 1 if not (str(document['courtesykey']).strip()) == ("" if response['courtesyKey'] is None else str(response['courtesyKey']).strip()): tmp = [str(document['courtesykey']).strip(), str(response['courtesyKey']).strip()] tmp.append("Incorrect value for 'courtesykey'!") table.append(tmp) status = 1 if not (str(document['criticalIssue']).strip()) == ("" if response['outage']['criticalIssue'] is None else str(response['outage']['criticalIssue']).strip()): tmp = [str(document['criticalIssue']).strip(), str(response['outage']['criticalIssue']).strip()] tmp.append("Incorrect value for 'criticalIssue'!") table.append(tmp) status = 1 if not (str(document['criticalOutage']).strip()) == ("" if response['criticalOutage'] is None else str(response['criticalOutage']).strip()): tmp = [str(document['criticalOutage']).strip(), str(response['criticalOutage']).strip()] tmp.append("Incorrect value for 'criticalOutage'!") table.append(tmp) status = 1 if not (str(document['customerCaseNumber']).strip()) == ("" if response['outage']['custCaseNo'] is None else str(response['outage']['custCaseNo']).strip()): tmp = [str(document['customerCaseNumber']).strip(), str(response['outage']['custCaseNo']).strip()] tmp.append("Incorrect value for 'customerCaseNumber'!") table.append(tmp) status = 1 if not (str(document['cve']).strip()) == ("" if response['cve'] is None else str(response['cve']).strip()): tmp = [str(document['cve']).strip(), str(response['cve']).strip()] tmp.append("Incorrect value for 'cve'!") table.append(tmp) status = 1 if not (str(document['cvss']).strip()) == ("" if response['cvss'] is None else str(response['cvss']).strip()): tmp = [str(document['cvss']).strip(), str(response['cvss']).strip()] tmp.append("Incorrect value for 'cvss'!") table.append(tmp) status = 1 if not (str(document['description']).strip()) == ("" if response['desc'] is None else str(response['desc']).strip()): #tmp = [str(document['description']).strip(), str(response['desc']).strip(), "Incorrect value for 'description'!"] tmp = [str(document['description']).strip()[:10], str(response['desc']).strip()[:10], "Incorrect value for 'description'!"] table.append(tmp) status = 1 endDate = "" try: if response['entitlement']['endDate'] is None: endDate = "00000000" else: endDate = datetime.datetime.fromtimestamp(float(endDate)/1000).strftime('%Y%m%d') except Exception: print "EndDate issue: "+str(Exception.message) print(traceback.format_exc()) endDate = endDate.replace("00:00:00", "") endDate = endDate.replace("-", "") print "endDate: "+str(endDate) if not (str(document['endDate']).strip()) == endDate.strip(): tmp = [str(document['endDate']).strip(), endDate, "Incorrect value for 'endDate'!"] table.append(tmp) status = 1 if not (str(document['entitledSerialNumber']).strip()) == ("" if response['entitlement']['entitledSerialNumber'] is None else str(response['entitlement']['entitledSerialNumber']).strip()): tmp = [str(document['entitledSerialNumber']).strip(), str(response['entitlement']['entitledSerialNumber']).strip()] tmp.append("Incorrect value for 'entitledSerialNumber'!") table.append(tmp) status = 1 if not (str(document['entitlementChecked']).strip()) == ("" if response['entitlement']['entitlementChecked'] is None else str(response['entitlement']['entitlementChecked']).strip()): tmp = [str(document['entitlementChecked']).strip(), str(response['entitlement']['entitlementChecked']).strip()] tmp.append("Incorrect value for 'entitlementChecked'!") table.append(tmp) status = 1 if not (str(document['entitlementServiceLevel']).strip()) == ("" if response['entitlement']['entitlementServiceLevel'] is None else str(response['entitlement']['entitlementServiceLevel']).strip()): tmp = [str(document['entitlementServiceLevel']).strip(), str(response['entitlement']['entitlementServiceLevel']).strip()] tmp.append("Incorrect value for 'entitlementServiceLevel'!") table.append(tmp) status = 1 if not (str(document['entitlementSource']).strip()) == ("" if response['entitlement']['entitlementSource'] is None else str(response['entitlement']['entitlementSource']).strip()): tmp = [str(document['entitlementSource']).strip(), str(response['entitlement']['entitlementSource']).strip()] tmp.append("Incorrect value for 'entitlementSource'!") table.append(tmp) status = 1 if not (str(document.get('escalation', '')).strip()) == ("" if response['escalationDesc'] is None else str(response['escalationDesc']).strip()): tmp = [str(document['escalation']).strip(), str(response['escalationDesc']).strip()] tmp.append("Incorrect value for 'escalation'!") table.append(tmp) status = 1 if not (str(document['escalationLevelDescription']).strip()) == ("" if response['outage']['escalationLevel'] is None else str(response['outage']['escalationLevel']).strip()): tmp = [str(document['escalationLevelDescription']).strip(), str(response['outage']['escalationLevel']).strip()] tmp.append("Incorrect value for 'escalationLevelDescription'!") table.append(tmp) status = 1 if not (str(document['escalationLevelKey']).strip()) == ("" if response['outage']['escalationLevelkey'] is None else str(response['outage']['escalationLevelkey']).strip()): tmp = [str(document['escalationLevelKey']).strip(), str(response['outage']['escalationLevelkey']).strip()] tmp.append("Incorrect value for 'escalationLevelKey'!") table.append(tmp) status = 1 if not (str(document['escalationkey']).strip()) == ("" if response['escalationKey'] is None else str(response['escalationKey']).strip()): tmp = [str(document['escalationkey']).strip(), str(response['escalationKey']).strip()] tmp.append("Incorrect value for 'escalationkey'!") table.append(tmp) status = 1 if not (str(document['externallyReported']).strip()) == ("" if response['externallyReported'] is None else str(response['externallyReported']).strip()): tmp = [str(document['externallyReported']).strip(), str(response['externallyReported']).strip()] tmp.append("Incorrect value for 'externallyReported'!") table.append(tmp) status = 1 if not (str(document.get('followupMethod', '')).strip()) == ("" if response['outage']['followUpMethod'] is None else str(response['outage']['followUpMethod']).strip()): tmp = [str(document['followupMethod']).strip(), str(response['outage']['followUpMethod']).strip()] tmp.append("Incorrect value for 'followupMethod'!") table.append(tmp) status = 1 if not (str(document['followupMethodKey']).strip()) == ("" if response['outage']['followUpMethodkey'] is None else str(response['outage']['followUpMethodkey']).strip()): tmp = [str(document['followupMethodKey']).strip(), str(response['outage']['followUpMethodkey']).strip()] tmp.append("Incorrect value for 'followupMethodKey'!") table.append(tmp) status = 1 if not (str(document['jsaAdvisoryBoard']).strip()) == ("" if response['jsaAdvisoryBoard'] is None else str(response['jsaAdvisoryBoard']).strip()): tmp = [str(document['jsaAdvisoryBoard']).strip(), str(response['jsaAdvisoryBoard']).strip()] tmp.append("Incorrect value for 'jsaAdvisoryBoard'!") table.append(tmp) status = 1 if not (str(document['jtac']).strip()) == ("" if response['jtac'] is None else str(response['jtac']).strip()): tmp = [str(document['jtac']).strip(), str(response['jtac']).strip()] tmp.append("Incorrect value for 'jtac'!") table.append(tmp) status = 1 if not (str(document['knowledgeArticle']).strip()) == ("" if response['outage']['knowledgeArticle'] is None else str(response['outage']['knowledgeArticle']).strip()): tmp = [str(document['knowledgeArticle']).strip(), str(response['outage']['knowledgeArticle']).strip()] tmp.append("Incorrect value for 'knowledgeArticle'!") table.append(tmp) status = 1 ocd = document.get('outageCauseDescription', "") if ocd=="": ocd = document.get('ouatgeCauseDescription', "") if not (str(ocd).strip()) == ("" if response['outage']['outageCause'] is None else str(response['outage']['outageCause']).strip()): tmp = [str(ocd).strip(), str(response['outage']['outageCause']).strip()] tmp.append("Incorrect value for 'outageCauseDescription/outageCause'!") table.append(tmp) status = 1 if not (str(document['outageCauseKey']).strip()) == ("" if response['outage']['outageCausekey'] is None else str(response['outage']['outageCausekey']).strip()): tmp = [str(document['outageCauseKey']).strip(), str(response['outage']['outageCausekey']).strip()] tmp.append("Incorrect value for 'outageCauseKey'!") table.append(tmp) status = 1 if not (str(document['outageDescription']).strip()) == ("" if response['outage']['outage'] is None else str(response['outage']['outage']).strip()): tmp = [str(document['outageDescription']).strip(), str(response['outage']['outage']).strip()] tmp.append("Incorrect value for 'outageDescription/outage'!") table.append(tmp) status = 1 if not (str(document['outageImpactKey']).strip()) == ("" if response['outage']['outageImpactKey'] is None else str(response['outage']['outageImpactKey']).strip()): tmp = [str(document['outageImpactKey']).strip(), str(response['outage']['outageImpactKey']).strip()] tmp.append("Incorrect value for 'outageImpactKey'!") table.append(tmp) status = 1 if not (str(document['outageInfoAvailable']).strip()) == ("" if response['outage']['outageInfoAvailable'] is None else str(response['outage']['outageInfoAvailable']).strip()): tmp = [str(document['outageInfoAvailable']).strip(), str(response['outage']['outageInfoAvailable']).strip()] tmp.append("Incorrect value for 'outageInfoAvailable'!") table.append(tmp) status = 1 if not (str(document['outageKey']).strip()) == ("" if response['outage']['outageKey'] is None else str(response['outage']['outageKey']).strip()): tmp = [str(document['outageKey']).strip(), str(response['outage']['outageKey']).strip()] tmp.append("Incorrect value for 'outageKey'!") table.append(tmp) status = 1 if not (str(document['outageTypeDescription']).strip()) == ("" if response['outage']['outageType'] is None else str(response['outage']['outageType']).strip()): tmp = [str(document['outageTypeDescription']).strip(), str(response['outage']['outageType']).strip()] tmp.append("Incorrect value for 'outageTypeDescription/outageType'!") table.append(tmp) status = 1 if not (str(document['outageTypeKey']).strip()) == ("" if response['outage']['outageTypekey'] is None else str(response['outage']['outageTypekey']).strip()): tmp = [str(document['outageTypeKey']).strip(), str(response['outage']['outageTypekey']).strip()] tmp.append("Incorrect value for 'outageTypeKey'!") table.append(tmp) status = 1 if not (str(document['outsourcer']).strip()) == ("" if response['outage']['outsourcer'] is None else str(response['outage']['outsourcer']).strip()): tmp = [str(document['outsourcer']).strip(), str(response['outage']['outsourcer']).strip()] tmp.append("Incorrect value for 'outsourcer'!") table.append(tmp) status = 1 if not (str(document['overideOutage']).strip()) == ("" if response['outage']['overideOutage'] is None else str(response['outage']['overideOutage']).strip()): tmp = [str(document['overideOutage']).strip(), str(response['outage']['overideOutage']).strip()] tmp.append("Incorrect value for 'overideOutage'!") table.append(tmp) status = 1 if not (str(document['platform']).strip()) == ("" if response['platform'] is None else str(response['platform']).strip()): tmp = [str(document['platform']).strip(), str(response['platform']).strip()] tmp.append("Incorrect value for 'platform'!") table.append(tmp) status = 1 if not (str(document['previousOwnerSkill']).strip()) == ("" if response['outage']['previousOwnerSkill'] is None else str(response['outage']['previousOwnerSkill']).strip()): tmp = [str(document['previousOwnerSkill']).strip(), str(response['outage']['previousOwnerSkill']).strip()] tmp.append("Incorrect value for 'previousOwnerSkill'!") table.append(tmp) status = 1 if not (str(document['previousTeam']).strip()) == ("" if response['outage']['previousTeam'] is None else str(response['outage']['previousTeam']).strip()): tmp = [str(document['previousTeam']).strip(), str(response['outage']['previousTeam']).strip()] tmp.append("Incorrect value for 'previousTeam'!") table.append(tmp) status = 1 if not (str(document.get('priority', '')).strip()) == ("" if response['priority'] is None else str(response['priority']).strip()): tmp = [str(document['priority']).strip(), str(response['priority']).strip()] tmp.append("Incorrect value for 'priority'!") table.append(tmp) status = 1 if not (str(document['priorityKey']).strip()) == ("" if response['priorityKey'] is None else str(response['priorityKey']).strip()): tmp = [str(document['priorityKey']).strip(), str(response['priorityKey']).strip()] tmp.append("Incorrect value for 'priorityKey'!") table.append(tmp) status = 1 if not (str(document['processType']).strip()) == ("" if response['processType'] is None else str(response['processType']).strip()): tmp = [str(document['processType']).strip(), str(response['processType']).strip()] tmp.append("Incorrect value for 'processType'!") table.append(tmp) status = 1 if not (str(document['processTypeDescription']).strip()) == ("" if response['processTypeDesc'] is None else str(response['processTypeDesc']).strip()): tmp = [str(document['processTypeDescription']).strip(), str(response['processTypeDesc']).strip()] tmp.append("Incorrect value for 'processTypeDescription'!") table.append(tmp) status = 1 if not (str(document['productId']).strip()) == ("" if response['productId'] is None else str(response['productId']).strip()): tmp = [str(document['productId']).strip(), str(response['productId']).strip()] tmp.append("Incorrect value for 'productId'!") table.append(tmp) status = 1 if not (str(document['productSeries']).strip()) == ("" if response['productSeries'] is None else str(response['productSeries']).strip()): tmp = [str(document['productSeries']).strip(), str(response['productSeries']).strip()] tmp.append("Incorrect value for 'productSeries'!") table.append(tmp) status = 1 if not (str(document['raFa']).strip()) == ("" if response['outage']['raFa'] is None else str(response['outage']['raFa']).strip()): tmp = [str(document['raFa']).strip(), str(response['outage']['raFa']).strip()] tmp.append("Incorrect value for 'raFa'!") table.append(tmp) status = 1 if not (str(document['reason']).strip()) == ("" if response['reason'] is None else str(response['reason']).strip()): tmp = [str(document['reason']).strip(), str(response['reason']).strip()] tmp.append("Incorrect value for 'reason'!") table.append(tmp) status = 1 if not (str(document['release']).strip()) == ("" if response['release'] is None else str(response['release']).strip()): tmp = [str(document['release']).strip(), str(response['release']).strip()] tmp.append("Incorrect value for 'release'!") table.append(tmp) status = 1 if not (str(document['reporterDetails']).strip()) == ("" if response['reporterDetails'] is None else str(response['reporterDetails']).strip()): tmp = [str(document['reporterDetails']).strip(), str(response['reporterDetails']).strip()] tmp.append("Incorrect value for 'reporterDetails'!") table.append(tmp) status = 1 if not (str(document['routerName']).strip()) == ("" if response['outage']['routerName'] is None else str(response['outage']['routerName']).strip()): tmp = [str(document['routerName']).strip(), str(response['outage']['routerName']).strip()] tmp.append("Incorrect value for 'routerName'!") table.append(tmp) status = 1 if not (str(document['secVulnerability']).strip()) == ("" if response['secVulnerability'] is None else str(response['secVulnerability']).strip()): tmp = [str(document['secVulnerability']).strip(), str(response['secVulnerability']).strip()] tmp.append("Incorrect value for 'secVulnerability'!") table.append(tmp) status = 1 if not (str(document['serialNumber']).strip()) == ("" if response['serialNumber'] is None else str(response['serialNumber']).strip()): tmp = [str(document['serialNumber']).strip(), str(response['serialNumber']).strip()] tmp.append("Incorrect value for 'serialNumber'!") table.append(tmp) status = 1 if not (str(document.get('severity', '')).strip()) == ("" if response['severity'] is None else str(response['severity']).strip()): tmp = [str(document['severity']).strip(), str(response['severity']).strip()] tmp.append("Incorrect value for 'severity'!") table.append(tmp) status = 1 if not (str(document.get('severityKey', '')).strip()) == ("" if response['severityKey'] is None else str(response['severityKey']).strip()): tmp = [str(document['severityKey']).strip(), str(response['severityKey']).strip()] tmp.append("Incorrect value for 'severityKey'!") table.append(tmp) status = 1 if not (str(document['sirtBundle']).strip()) == ("" if response['sirtBundle'] is None else str(response['sirtBundle']).strip()): tmp = [str(document['sirtBundle']).strip(), str(response['sirtBundle']).strip()] tmp.append("Incorrect value for 'sirtBundle'!") table.append(tmp) status = 1 if not (str(document['sku']).strip()) == ("" if response['entitlement']['sku'] is None else str(response['entitlement']['sku']).strip()): tmp = [str(document['sku']).strip(), str(response['entitlement']['sku']).strip()] tmp.append("Incorrect value for 'sku'!") table.append(tmp) status = 1 if not (str(document['smeContact']).strip()) == ("" if response['smeContact'] is None else str(response['smeContact']).strip()): tmp = [str(document['smeContact']).strip(), str(response['smeContact']).strip()] tmp.append("Incorrect value for 'smeContact'!") table.append(tmp) status = 1 if not (str(document['software']).strip()) == ("" if response['software'] is None else str(response['software']).strip()): tmp = [str(document['software']).strip(), str(response['software']).strip()] tmp.append("Incorrect value for 'software'!") table.append(tmp) status = 1 if not (str(document['specialRelease']).strip()) == ("" if response['specialRelease'] is None else str(response['specialRelease']).strip()): tmp = [str(document['specialRelease']).strip(), str(response['specialRelease']).strip()] tmp.append("Incorrect value for 'specialRelease'!") table.append(tmp) status = 1 if not (str(document['srCategory1']).strip()) == ("" if response['srCat1'] is None else str(response['srCat1']).strip()): tmp = [str(document['srCategory1']).strip(), str(response['srCat1']).strip()] tmp.append("Incorrect value for 'srCategory1'!") table.append(tmp) status = 1 if not (str(document['srCategory2']).strip()) == ("" if response['srCat2'] is None else str(response['srCat2']).strip()): tmp = [str(document['srCategory2']).strip(), str(response['srCat2']).strip()] tmp.append("Incorrect value for 'srCategory2'!") table.append(tmp) status = 1 if not (str(document['srCategory3']).strip()) == ("" if response['srCat3'] is None else str(response['srCat3']).strip()): tmp = [str(document['srCategory3']).strip(), str(response['srCat3']).strip()] tmp.append("Incorrect value for 'srCategory3'!") table.append(tmp) status = 1 if not (str(document['srCategory4']).strip()) == ("" if response['srCat4'] is None else str(response['srCat4']).strip()): tmp = [str(document['srCategory4']).strip(), str(response['srCat4']).strip()] tmp.append("Incorrect value for 'srCategory4'!") table.append(tmp) status = 1 startDate = "" try: if response['entitlement']['startDate'] is None: startDate = "00000000" else: startDate = datetime.datetime.fromtimestamp(float(startDate)/1000).strftime('%Y%m%d') except Exception: print "StartDate issue: "+str(Exception.message) print(traceback.format_exc()) startDate = startDate.replace("00:00:00", "") startDate = startDate.replace("-", "") print "StartDate: "+str(startDate) if not (str(document['startDate']).strip()) == startDate.strip(): tmp = [str(document['startDate']).strip(), startDate, "Incorrect value for 'startDate'!"] table.append(tmp) status = 1 if not (str(document['status']).strip()) == ("" if response['status'] is None else str(response['status']).strip()): tmp = [str(document['status']).strip(), str(response['status']).strip(), "Incorrect value for 'status'!"] table.append(tmp) status = 1 if not (str(document['statusKey']).strip()) == ("" if response['statusKey'] is None else str(response['statusKey']).strip()): tmp = [str(document['statusKey']).strip(), str(response['statusKey']).strip(), "Incorrect value for 'statusKey'!"] table.append(tmp) status = 1 if not (str(document['technicalCategory2']).strip()) == ("" if response['techCat1'] is None else str(response['techCat1']).strip()): tmp = [str(document['technicalCategory2']).strip(), str(response['techCat1']).strip(), "Incorrect value for 'technicalCategory2'!"] table.append(tmp) status = 1 if not (str(document['technicalCategory3']).strip()) == ("" if response['techCat2'] is None else str(response['techCat2']).strip()): tmp = [str(document['technicalCategory3']).strip(), str(response['techCat2']).strip(), "Incorrect value for 'technicalCategory3'!"] table.append(tmp) status = 1 if not (str(document['technicalCategory4']).strip()) == ("" if response['techCat3'] is None else str(response['techCat3']).strip()): tmp = [str(document['technicalCategory4']).strip(), str(response['techCat3']).strip(), "Incorrect value for 'technicalCategory4'!"] table.append(tmp) status = 1 if not (str(document['temperature']).strip()) == ("" if response['outage']['temperature'] is None else str(response['outage']['temperature']).strip()): tmp = [str(document['temperature']).strip(), str(response['outage']['temperature']).strip()] tmp.append("Incorrect value for 'temperature'!") table.append(tmp) status = 1 if not (str(document['theaterDescription']).strip()) == ("" if response['outage']['theater'] is None else str(response['outage']['theater']).strip()): tmp = [str(document['theaterDescription']).strip(), str(response['outage']['theater']).strip()] tmp.append("Incorrect value for 'theaterDescription/theater'!") table.append(tmp) status = 1 if not (str(document['theaterKey']).strip()) == ("" if response['outage']['theaterkey'] is None else str(response['outage']['theaterkey']).strip()): tmp = [str(document['theaterKey']).strip(), str(response['outage']['theaterkey']).strip()] tmp.append("Incorrect value for 'theaterKey'!") table.append(tmp) status = 1 if not (str(document['top5']).strip()) == ("" if response['outage']['top5'] is None else str(response['outage']['top5']).strip()): tmp = [str(document['top5']).strip(), str(response['outage']['top5']).strip()] tmp.append("Incorrect value for 'top5'!") table.append(tmp) status = 1 if not (str(document['totalOutageTime']).strip()) == ("" if response['outage']['totalOutageTime'] is None else str(response['outage']['totalOutageTime']).strip()): tmp = [str(document['totalOutageTime']).strip(), str(response['outage']['totalOutageTime']).strip()] tmp.append("Incorrect value for 'totalOutageTime'!") table.append(tmp) status = 1 if not (str(document.get('urgency', '')).strip()) == ("" if response['urgency'] is None else str(response['urgency']).strip()): tmp = [str(document['urgency']).strip(), str(response['urgency']).strip()] tmp.append("Incorrect value for 'urgency'!") table.append(tmp) status = 1 if not (str(document['urgencyKey']).strip()) == ("" if response['urgencyKey'] is None else str(response['urgencyKey']).strip()): tmp = [str(document['urgencyKey']).strip(), str(response['urgencyKey']).strip()] tmp.append("Incorrect value for 'urgencyKey'!") table.append(tmp) status = 1 if not (str(document['version']).strip()) == ("" if response['version'] is None else str(response['version']).strip()): tmp = [str(document['version']).strip(), str(response['version']).strip()] tmp.append("Incorrect value for 'version'!") table.append(tmp) status = 1 if not (str(document['viaDescription']).strip()) == ("" if response['outage']['via'] is None else str(response['outage']['via']).strip()): tmp = [str(document['viaDescription']).strip(), str(response['outage']['via']).strip()] tmp.append("Incorrect value for 'viaDescription/via'!") table.append(tmp) status = 1 if not (str(document['viaKey']).strip()) == ("" if response['outage']['viaKey'] is None else str(response['outage']['viaKey']).strip()): tmp = [str(document['viaKey']).strip(), str(response['outage']['viaKey']).strip()] tmp.append("Incorrect value for 'viaKey'!") table.append(tmp) status = 1 if not (str(document['warrantyEndDate']).strip()) == ("00000000" if response['entitlement']['warrantyEndDate'] is None else str(response['entitlement']['warrantyEndDate']).strip()): tmp = [str(document['warrantyEndDate']).strip(), str(response['entitlement']['warrantyEndDate']).strip()] tmp.append("Incorrect value for 'warrantyEndDate'!") table.append(tmp) status = 1 if not (str(document['yearRoundSupport']).strip()) == ("" if response['outage']['support24X7'] is None else str(response['outage']['support24X7']).strip()): tmp = [str(document['yearRoundSupport']).strip(), str(response['outage']['support24X7']).strip()] tmp.append("Incorrect value for 'yearRoundSupport/support24X7'!") table.append(tmp) status = 1 if not (str(document['zzQ1']).strip()) == ("" if response['outage']['zzq1'] is None else str(response['outage']['zzq1']).strip()): tmp = [str(document['zzQ1']).strip(), str(response['outage']['zzq1']).strip()] tmp.append("Incorrect value for 'zzQ1'!") table.append(tmp) status = 1 if not (str(document['zzQ2']).strip()) == ("" if response['outage']['zzq2'] is None else str(response['outage']['zzq2']).strip()): tmp = [str(document['zzQ2']).strip(), str(response['outage']['zzq2']).strip()] tmp.append("Incorrect value for 'zzQ2'!") table.append(tmp) status = 1 if not (str(document['zzQ3']).strip()) == ("" if response['outage']['zzq3'] is None else str(response['outage']['zzq3']).strip()): tmp = [str(document['zzQ3']).strip(), str(response['outage']['zzq3']).strip()] tmp.append("Incorrect value for 'zzQ3'!") table.append(tmp) status = 1 if not (str(document['zzQ4']).strip()) == ("" if response['outage']['zzq4'] is None else str(response['outage']['zzq4']).strip()): tmp = [str(document['zzQ4']).strip(), str(response['outage']['zzq4']).strip()] tmp.append("Incorrect value for 'zzQ4'!") table.append(tmp) status = 1 if not (str(document['zzQ5']).strip()) == ("" if response['outage']['zzq5'] is None else str(response['outage']['zzq5']).strip()): tmp = [str(document['zzQ5']).strip(), str(response['outage']['zzq5']).strip()] tmp.append("Incorrect value for 'zzQ5'!") table.append(tmp) status = 1 if not (str(document['zzQ6']).strip()) == ("" if response['outage']['zzq6'] is None else str(response['outage']['zzq6']).strip()): tmp = [str(document['zzQ6']).strip(), str(response['outage']['zzq6']).strip()] tmp.append("Incorrect value for 'zzQ6'!") table.append(tmp) status = 1 if not (str(document['zzQ7']).strip()) == ("" if response['outage']['zzq7'] is None else str(response['outage']['zzq7']).strip()): tmp = [str(document['zzQ7']).strip(), str(response['outage']['zzq7']).strip()] tmp.append("Incorrect value for 'zzQ7'!") table.append(tmp) status = 1 if not (str(document['zzQ8']).strip()) == ("" if response['outage']['zzq8'] is None else str(response['outage']['zzq8']).strip()): tmp = [str(document['zzQ8']).strip(), str(response['outage']['zzq8']).strip()] tmp.append("Incorrect value for 'zzQ8'!") table.append(tmp) status = 1 if not (str(document['zzQ9']).strip()) == ("" if response['outage']['zzq9'] is None else str(response['outage']['zzq9']).strip()): tmp = [str(document['zzQ9']).strip(), str(response['outage']['zzq9']).strip()] tmp.append("Incorrect value for 'zzQ9'!") table.append(tmp) status = 1 if not (str(document['zzQ10']).strip()) == ("" if response['outage']['zzq10'] is None else str(response['outage']['zzq10']).strip()): tmp = [str(document['zzQ10']).strip(), str(response['outage']['zzq10']).strip()] tmp.append("Incorrect value for 'zzQ10'!") table.append(tmp) status = 1 row.append("No Match Found") if not (str(document['ccList']).strip()== ("" if response['outage']['ccCustomer'] is None else str(response['outage']['ccCustomer']).strip())): tmp = [str(document['ccList']).strip(), str(response['outage']['ccCustomer']).strip()] tmp.append("Incorrect value for 'ccList/ccCustomer'!") table.append(tmp) status = 1 if not (str(document['employeeEmail']).strip()== ("" if response['empEmailId'] is None else str(response['empEmailId']).strip())): tmp = [str(document['employeeEmail']).strip(), str(response['empEmailId']).strip()] tmp.append("Incorrect value for 'employeeEmail/empEmailId'!") table.append(tmp) status = 1 if not (str(document['employeeId']).strip()== ("" if response['empId'] is None else str(response['empId']).strip())): tmp = [str(document['employeeId']).strip(), str(response['empId']).strip()] tmp.append("Incorrect value for 'employeeId/empId'!") table.append(tmp) status = 1 if not (str(document['internalUse']).strip()== ("" if response['outage']['internalUse'] is None else str(response['outage']['internalUse']).strip())): tmp = [str(document['internalUse']).strip(), str(response['outage']['internalUse']).strip()] tmp.append("Incorrect value for 'internalUse/internalUse'!") table.append(tmp) status = 1 if not (str(document['numberOfSystemsAffected']).strip()== ("" if response['outage']['numOfSystemsAffected'] is None else str(response['outage']['numOfSystemsAffected']).strip())): tmp = [str(document['numberOfSystemsAffected']).strip(), str(response['outage']['numOfSystemsAffected']).strip()] tmp.append("Incorrect value for 'numberOfSystemsAffected/numOfSystemsAffected'!") table.append(tmp) status = 1 if not (str(document['numberOfUsersAffected']).strip()== ("" if response['outage']['numOfUsersAffected'] is None else str(response['outage']['numOfUsersAffected']).strip())): tmp = [str(document['numberOfUsersAffected']).strip(), str(response['outage']['numOfUsersAffected']).strip()] tmp.append("Incorrect value for 'numberOfUsersAffected/numOfUsersAffected'!") table.append(tmp) status = 1 if not (str(document['technicalCategory1']).strip()== ("" if response['prodSeriesTech'] is None else str(response['prodSeriesTech']).strip())): tmp = [str(document['technicalCategory1']).strip(), str(response['prodSeriesTech']).strip()] tmp.append("Incorrect value for 'technicalCategory1/prodSeriesTech'!") table.append(tmp) status = 1 if not (str(document['serviceProduct']).strip()== ("" if response['entitlement']['serviceProduct'] is None else str(response['entitlement']['serviceProduct']).strip())): tmp = [str(document['serviceProduct']).strip(), str(response['entitlement']['serviceProduct']).strip()] tmp.append("Incorrect value for 'serviceProduct/serviceProduct'!") table.append(tmp) status = 1 print "\n\n##############################################" print "\tMatching Dates details...." print "##############################################\n\n" print "Number of srDates in document: "+str(len(document['srReqDate'])) print "Number of Dates in API response: "+str(len(response['dates'])) if document['srReqDate']: if response['dates']: srReqDate = {'d': document['srReqDate']} for srd in srReqDate['d']: if srd['duration'] == "": srd['duration'] = None if srd['timeUnit'] == "": srd['timeUnit'] = None if srd['dateStamp'] != "": d = str(srd['dateStamp']) yy = d[:4] mm = d[4:6] dd = d[6:8] hh = d[8:10] mmm = d[10:12] ss = d[12:] srd['dateStamp'] = str(yy+"-"+mm+"-"+dd+" "+hh+":"+mmm+":"+ss) dates = {'d': response['dates']} for srd in srReqDate['d']: match_level = 0 found = 0 match_location = 0 counter = 0 old_match_level = 0 match_data = "" for d in dates['d']: match_level = 0 match_data = "" if ("" if srd['duration'] is None else srd['duration']) == ("" if d['duration'] is None else d['duration']): match_level += 1 if ("" if srd['dateStamp'] is None else srd['dateStamp']) == ("" if d['dateStamp'] is None else d['dateStamp']): match_level += 1 if ("" if srd['dateType'] is None else srd['dateType']) == ("" if d['dateType'] is None else d['dateType']): match_level += 1 if ("" if srd['timeUnit'] is None else srd['timeUnit']) == ("" if d['timeUnit'] is None else d['timeUnit']): if match_level >= 3: found = 1 match_level += 1 match_data = srd break; if match_level >= old_match_level: match_location = counter old_match_level = match_level counter += 1 if found == 0: #print "************************************************" print "Dates Data Mismatch, max number of values matched is "+str(old_match_level) print "Kafka ==> "+str(json.dumps(srd, sort_keys=True)) print "API ==> "+str(json.dumps(match_data, sort_keys=True)) tmp = ["", "", "Incorrect value for 'srDate'!"] table.append(tmp) status = 1 print "************************************************" else: #print "************************************************" print "Dates Data matched, highest level of match is "+str(match_level)+". Data is "+str(json.dumps(srd)) #print "\tKafak ==> "+str(json.dumps(srd, sort_keys=True)) #print "\tAPI ==> "+str(json.dumps(match_data, sort_keys=True)) print "************************************************" else: print "No dates found in API response, but available in Kafka message." print "Kafka Message: "+str(json.dumps(document['srReqDate'])) else: print "No dates found in Kafka message." for pf in document['partnerFunction']: if str(pf['partnerName']).strip() == "": pf['partnerName'] = None print "\n\n##############################################" print "\tMatching Partner Functions details...." print "##############################################\n\n" print "Number of PartnerFunction in document: "+str(len(document['partnerFunction'])) print "Number of PartnerFunction in API response: "+str(len(response['partnerFunctions'])) if document['partnerFunction']: if response['partnerFunctions']: for pf in document['partnerFunction']: match_level = 0 found = 0 match_location = 0 counter = 0 old_match_level = 0 match_data = "" for pf2 in response['partnerFunctions']: match_level = 0 match_data = "" if ("" if pf['partnerFunctionName'] is None else pf['partnerFunctionName']) == ("" if pf2['partnerFunctionName'] is None else pf2['partnerFunctionName']): match_level += 1 if ("" if pf['partnerFunctionKey'] is None else pf['partnerFunctionKey']) == ("" if pf2['partnerFunctionKey'] is None else pf2['partnerFunctionKey']): match_level += 1 if str(("" if pf['partnerId'] is None else pf['partnerId'])) == str(("" if pf2['partnerID'] is None else pf2['partnerID'])): match_level += 1 if ("" if pf['partnerName'] is None else pf['partnerName']) == ("" if pf2['partnerName'] is None else pf2['partnerName']): if match_level >= 3: found = 1 match_level += 1 match_data = pf2 break; if match_level >= old_match_level: match_location = counter old_match_level = match_level match_data = pf2 counter += 1 if found == 0: #print "************************************************" print "ParnterFunction Data Mismatch, highest level of match is "+str(old_match_level) print "Kafka ==> "+str(json.dumps(pf, sort_keys=True)) print "API ==> "+str(json.dumps(match_data, sort_keys=True)) tmp = ["", "", "Incorrect value for 'PartnerFunction'!"] table.append(tmp) status = 1 print "************************************************" else: #print "************************************************" print "PartnerFunction Data matched, highest level of match is "+str(match_level)+". Data is "+str(json.dumps(pf)) #print "Kafka ==> "+str(json.dumps(pf, sort_keys=True)) #print "API ==> "+str(json.dumps(match_data, sort_keys=True)) print "************************************************" else: print "No partners found in API response, but available in Kafka message." print "Kafka Message: "+str(json.dumps(document['partnerFunction'])) else: print "No partners found in Kafka message." if status==0: print "Match Found" row.append("Match Found") else: #print "\nCompared JSONs" #print "Kafka: "+str(document) #print "API: "+str(json.dumps(response, sort_keys=True)) print tabulate(table, headers=["Kafka", "API", "Status"], tablefmt="rst") else: print "No Match Found in Hadoop." row.append("No Match Found in Hadoop.") return row client = MongoClient('10.219.48.134', 27017) #client = MongoClient('192.168.56.101', 27017) db = client['SAPEvent'] collection = db['srDetails'] api = HBase() document_no = 0 documents = collection.find({ 'caseId': '2015-1118-T-0603'}) #documents = collection.find({}) ofile = open('srDetails.csv', "wb") writer = csv.writer(ofile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL) row = ["SNo", "CaseID", "KafkaJSON", "APIResponse", "Status"] writer.writerow(row) for document in documents: row = [] document_no += 1 row.append(document_no) row.append(document['caseId']) row.append(str(document).replace("\n", "")) print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" print "Document No: "+str(document_no) try: #print json.dumps(document, indent=4) row = api.get_case_by_case_id(document, row) #print document['endDate'] except Exception: print Exception.message print(traceback.format_exc()) writer.writerow(row) print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" print "\n\n" ofile.close()
{ "repo_name": "asifhj/Python_SOAP_OSSJ_SAP_Fusion_Kafka_Spark_HBase", "path": "srDetails.py", "copies": "1", "size": "52967", "license": "apache-2.0", "hash": -2022940697461366000, "line_mean": 59.805134189, "line_max": 209, "alpha_frac": 0.5164347613, "autogenerated": false, "ratio": 4.458501683501684, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5474936444801684, "avg_score": null, "num_lines": null }
__author__ = 'asifj' import requests from pymongo import MongoClient import json import csv import traceback import logging from tabulate import tabulate from bson.json_util import dumps client = MongoClient('10.219.48.134', 27017) #client = MongoClient('192.168.56.101', 27017) db = client['ImportedEvents_NOV18'] collection = db['srAttachements'] collection_new = db['srAttachements-new1'] document_no = 0 #2506813, documents = collection.find(no_cursor_timeout=True)[2506814:] inserts = 0 updates = 0 for document in documents: for key, value in document.iteritems(): document[key] = str(value).strip() key = {'caseId': document['SRID']} doc = collection_new.find_one({'caseId': document['SRID']}) del document['_id'] if not doc: inserts += 1 doc_to_ins = key doc_to_ins['attachment'] = [] for key, value in document.iteritems(): document[key] = str(value).strip() doc_to_ins['attachment'].append(document) collection_new.insert(doc_to_ins) else: updates += 1 doc['attachment'].append(document) collection_new.update(key, doc, upsert=True); print "Inserts: "+str(inserts) print "Updates: "+str(updates) print "Total: "+str(inserts+updates)
{ "repo_name": "asifhj/Python_SOAP_OSSJ_SAP_Fusion_Kafka_Spark_HBase", "path": "srAttachements-insert.py", "copies": "1", "size": "1313", "license": "apache-2.0", "hash": 6190701168526998000, "line_mean": 30.0243902439, "line_max": 63, "alpha_frac": 0.6405178979, "autogenerated": false, "ratio": 3.3666666666666667, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.45071845645666664, "avg_score": null, "num_lines": null }
__author__ = 'asifj' import requests from pymongo import MongoClient import json import csv import traceback import logging from tabulate import tabulate from bson.json_util import dumps logging.basicConfig( format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s', level=logging.DEBUG ) class HBase: def __init__(self): self.url = "http://172.22.147.248:8092/api/" pass def get_case_by_case_id(self, document, row): print "Making request..." print self.url+"case-manager/cases/"+str(document['caseId']) r = requests.get(self.url+"case-manager/cases/"+str(document['caseId'])) print "CaseID: "+str(document['caseId']) print "Response: "+str(r.status_code) keys = len(document.keys()) print "Keys: "+str(keys) row.append(r.status_code) if r.status_code==200: response = json.loads(r.text) table = [] if not (str(document['caseId']).strip() == "" if response['srId'] is None else str(response['srId']).strip()): print "Incorrect value for 'caseId'!" status = 1 print "Document" print "========" print dumps(document, sort_keys=True) print "Response CaseNotes" print "==================" print dumps(response['caseNotes'][4], sort_keys=True) response_notes_len = len(response['caseNotes']) document_notes_len = len(document['note']) print "No of notes in response: "+str(response_notes_len) print "No of notes in document: "+str(document_notes_len) found = 0 for i in range(0, response_notes_len): tmp = [] if document['note']['tdline'] == response['caseNotes'][i]['noteLogMin']: found == 1 changeDate = "" if response['caseNotes'][i]['changeDate'] is None else str(response['caseNotes'][i]['changeDate']).strip() changeTime = "" if response['caseNotes'][i]['changeTime'] is None else str(response['caseNotes'][i]['changeTime']).strip() originatorRole = "" if response['caseNotes'][i]['originatorRole'] is None else str(response['caseNotes'][i]['originatorRole']).strip() responsibleGroup = "" if response['caseNotes'][i]['responsibleGroup'] is None else str(response['caseNotes'][i]['responsibleGroup']).strip() countryKey = "" if response['caseNotes'][i]['countryKey'] is None else str(response['caseNotes'][i]['countryKey']).strip() noteType = "" if response['caseNotes'][i]['noteType'] is None else str(response['caseNotes'][i]['noteType']).strip() creationMethod = "" if response['caseNotes'][i]['creationMethod'] is None else str(response['caseNotes'][i]['creationMethod']).strip() originator = "" if response['caseNotes'][i]['originator'] is None else str(response['caseNotes'][i]['originator']).strip() privatePublic = "" if response['caseNotes'][i]['privatePublic'] is None else str(response['caseNotes'][i]['privatePublic']).strip() supervisor = "" if response['caseNotes'][i]['supervisor'] is None else str(response['caseNotes'][i]['supervisor']).strip() theater = "" if response['caseNotes'][i]['theater'] is None else str(response['caseNotes'][i]['theater']).strip() noteName = "" if response['caseNotes'][i]['noteName'] is None else str(response['caseNotes'][i]['noteName']).strip() changeDate = "" if response['caseNotes'][i]['changeDate'] is None else str(response['caseNotes'][i]['changeDate']).strip() if not str(document['udate']).strip() == changeDate: tmp = [i] tmp.append("udate") tmp.append(document['udate']) tmp.append(response['caseNotes'][i]['changeDate']) tmp.append("Failed") table.append(tmp) found = 1 if not str(document['utime']).strip() == changeTime: tmp = [i] tmp.append("utime") tmp.append(document['utime']) tmp.append(response['caseNotes'][i]['changeTime']) tmp.append("Failed") table.append(tmp) found = 1 if not str(document['zoriginatorrole']).strip() == originatorRole: tmp = [i] tmp.append("zoriginatorrole") tmp.append(document['zoriginatorrole']) tmp.append(response['caseNotes'][i]['originatorRole']) tmp.append("Failed") table.append(tmp) found = 1 if not str(document['zrespgroup']).strip() == responsibleGroup: tmp = [i] tmp.append("zrespgroup") tmp.append(document['zrespgroup']) tmp.append(response['caseNotes'][i]['responsibleGroup']) tmp.append("Failed") table.append(tmp) found = 1 if not str(document['zcountry']).strip() == countryKey: tmp = [i] tmp.append("zcountry") tmp.append(document['zcountry']) tmp.append(response['caseNotes'][i]['countryKey']) tmp.append("Failed") table.append(tmp) found = 1 if not str(document['tdid']).strip() == noteType: tmp = [i] tmp.append("tdid") tmp.append(document['tdid']) tmp.append(response['caseNotes'][i]['noteType']) tmp.append("Failed") table.append(tmp) found = 1 if not str(document['zmethod']).strip() == creationMethod: tmp = [i] tmp.append("zmethod") tmp.append(document['zmethod']) tmp.append(response['caseNotes'][i]['creationMethod']) tmp.append("Failed") table.append(tmp) found = 1 if not str(document['zorignator']).strip() == originator: tmp = [i] tmp.append("zorignator") tmp.append(document['zorignator']) tmp.append(response['caseNotes'][i]['originator']) tmp.append("Failed") table.append(tmp) found = 1 if not str(document['zpublic']).strip() == privatePublic: tmp = [i] tmp.append("zpublic") tmp.append(document['zpublic']) tmp.append(response['caseNotes'][i]['privatePublic']) tmp.append("Failed") table.append(tmp) found = 1 if not str(document['zsupervisor']).strip() == supervisor: tmp = [i] tmp.append("zsupervisor") tmp.append(document['zsupervisor']) tmp.append(response['caseNotes'][i]['supervisor']) tmp.append("Failed") table.append(tmp) found = 1 if not str(document['ztheater']).strip() == theater: tmp = [i] tmp.append("ztheater") tmp.append(document['ztheater']) tmp.append(response['caseNotes'][i]['theater']) tmp.append("Failed") table.append(tmp) found = 1 if not str(document['tdname']).strip() == noteName: tmp = [i] tmp.append("tdname") tmp.append(document['tdname']) tmp.append(response['caseNotes'][i]['noteName']) tmp.append("Failed") table.append(tmp) found = 1 if found == 1: print "Match Found" row.append("Match Found") print tabulate(table, headers=["NoteNo", "Key", "Kafka", "API", "Status"], tablefmt="rst") else: print tabulate(table, headers=["NoteNo", "Key", "Kafka", "API", "Status"], tablefmt="rst") else: print "No Match Found" row.append("No Match Found") return row def get_case_note_desc_by_case_note_id(self, case_id, note_id): pass def get_case_note_log_by_case_note_id(self, case_id, note_id): pass def get_account_by_account_id(self, account_id): pass def get_account_by_account_name(self, account_name): pass def get_user_by_user_id(self, user_id): pass def get_user_by_user_name(self, user_name): pass client = MongoClient('10.219.48.134', 27017) db = client['SAPNotesTopic'] collection = db['caseNotes'] api = HBase() document_no = 0 documents = collection.find({}) #documents = collection.find({'caseId':'2015-0924-T-2500'}) print {'caseId': '2015-0924-T-2500'} ofile = open('caseNotes.csv', "wb") writer = csv.writer(ofile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL) row = ["SNo", "CaseID", "KafkaJSON", "APIResponse", "Status"] writer.writerow(row) for document in documents: row = [] document_no += 1 row.append(document_no) row.append(document['caseId']) row.append(str(document).replace("\n", "")) print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" print "Document No: "+str(document_no) try: row = api.get_case_by_case_id(document, row) except Exception: print Exception.message print(traceback.format_exc()) writer.writerow(row) print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" print "\n\n" ofile.close()
{ "repo_name": "asifhj/Python_SOAP_OSSJ_SAP_Fusion_Kafka_Spark_HBase", "path": "caseNotes.py", "copies": "1", "size": "10945", "license": "apache-2.0", "hash": -5050272015149065000, "line_mean": 47.0807174888, "line_max": 160, "alpha_frac": 0.4727272727, "autogenerated": false, "ratio": 4.550935550935551, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.003678478170885217, "num_lines": 223 }
__author__ = 'asifj' import requests from pymongo import MongoClient import json import csv import traceback import logging from tabulate import tabulate logging.basicConfig( format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s', level=logging.DEBUG ) class HBase: def __init__(self): self.url = "http://172.22.147.248:8092/api/" pass def get_account_details(self, document, row): #http://172.22.147.248:8092/api/user-manager/accounts print "API URL: "+self.url+"user-manager/accounts/"+str(document['header']['partnerId']) r = requests.get(self.url+"user-manager/accounts/"+str(document['header']['partnerId'])) print "partnerId: "+str(document['header']['partnerId']) print "Response: "+str(r.status_code) keys = len(document.keys()) print "Keys: "+str(keys) row.append(r.status_code) status = 0 if r.status_code == 200: response = json.loads(r.text) table = [] response_account_len = len(response) document_account_len = len(document) print "Number of account details in document: "+str(document_account_len) print "Number of account details in API response: "+str(response_account_len) header = document.get("header", "") market = document.get("marketingAttributes", "") address = document.get("address", "") relationship = document.get("relationship", "") if header: print "Verifying header attributes..." if not str(document["header"]["rating"]).strip() == ("" if response["rating"] is None else response["rating"]): tmp = [str(document["header"]["rating"]).strip(), str(response["rating"]).strip(), "Incorrect value for 'rating'!"] table.append(tmp) status = 1 if not str(document["header"]["tranBlockReason"]).strip() == ("" if response["tranBlockReason"] is None else response["tranBlockReason"]): tmp = [str(document["header"]["tranBlockReason"]).strip(), str(response["tranBlockReason"]).strip(), "Incorrect value for 'tranBlockReason'!"] table.append(tmp) status = 1 if not str(document["header"]["accountType"]).strip() == ("" if response["accountType"] is None else response["accountType"]): tmp = [str(document["header"]["accountType"]).strip(), str(response["accountType"]).strip(), "Incorrect value for 'accountType'!"] table.append(tmp) status = 1 if not str(document["header"]["customerSince"]).strip() == ("" if response["customerSince"] is None else response["customerSince"]): tmp = [str(document["header"]["customerSince"]).strip(), str(response["customerSince"]).strip(), "Incorrect value for 'customerSince'!"] table.append(tmp) status = 1 if not str(document["header"]["ratingKey"]).strip() == ("" if response["ratingKey"] is None else response["ratingKey"]): tmp = [str(document["header"][""]).strip(), str(response["ratingKey"]).strip(), "Incorrect value for 'ratingKey'!"] table.append(tmp) status = 1 if not str(document["header"]["sapChangeTime"]).strip() == ("" if response["changeTime"] is None else response["changeTime"]): tmp = [str(document["header"]["sapChangeTime"]).strip(), str(response["changeTime"]).strip(), "Incorrect value for 'sapChangeTime'!"] table.append(tmp) status = 1 '''if not str(document["header"][""]).strip() == ("" if response[""] is None else response["sapCreateDate"]): tmp = [str(document["header"][""]).strip(), str(response[""]).strip(), "Incorrect value for 'ccEngineer'!"] table.append(tmp) status = 1''' if not str(document["header"]["commonId"]).strip() == ("" if response["commonId"] is None else response["commonId"]): tmp = [str(document["header"]["commonId"]).strip(), str(response["commonId"]).strip(), "Incorrect value for 'commonId'!"] table.append(tmp) status = 1 if not str(document["header"]["sapCreateTime"]).strip() == ("" if response["createTime"] is None else response["createTime"]): tmp = [str(document["header"][""]).strip(), str(response["createTime"]).strip(), "Incorrect value for 'sapCreateTime'!"] table.append(tmp) status = 1 if not str(document["header"]["accountTypeKey"]).strip() == ("" if response["accountTypeKey"] is None else response["accountTypeKey"]): tmp = [str(document["header"]["accountTypeKey"]).strip(), str(response["accountTypeKey"]).strip(), "Incorrect value for 'accountTypeKey'!"] table.append(tmp) status = 1 if not str(document["header"]["archivingFlag"]).strip() == ("" if response["archivingFlag"] is None else response["archivingFlag"]): tmp = [str(document["header"]["archivingFlag"]).strip(), str(response["archivingFlag"]).strip(), "Incorrect value for 'archivingFlag'!"] table.append(tmp) status = 1 if not str(document["header"]["partnerGrouping"]).strip() == ("" if response["partnerGrouping"] is None else response["partnerGrouping"]): tmp = [str(document["header"]["partnerGrouping"]).strip(), str(response["partnerGrouping"]).strip(), "Incorrect value for 'partnerGrouping'!"] table.append(tmp) status = 1 if not str(document["header"]["accountName"]).strip() == ("" if response["accountName"] is None else response["accountName"]): tmp = [str(document["header"]["accountName"]).strip(), str(response["accountName"]).strip(), "Incorrect value for 'accountName'!"] table.append(tmp) status = 1 if not str(document["header"]["accountClassKey"]).strip() == ("" if response["accountClassKey"] is None else response["accountClassKey"]): tmp = [str(document["header"]["accountClassKey"]).strip(), str(response["accountClassKey"]).strip(), "Incorrect value for 'accountClassKey'!"] table.append(tmp) status = 1 if not str(document["header"]["statusKey"]).strip() == ("" if response["statusKey"] is None else response["statusKey"]): tmp = [str(document["header"]["statusKey"]).strip(), str(response["statusKey"]).strip(), "Incorrect value for 'statusKey'!"] table.append(tmp) status = 1 if not str(document["header"]["partnerGroupingKey"]).strip() == ("" if response["partnerGroupingKey"] is None else response["partnerGroupingKey"]): tmp = [str(document["header"]["partnerGroupingKey"]).strip(), str(response["partnerGroupingKey"]).strip(), "Incorrect value for 'partnerGroupingKey'!"] table.append(tmp) status = 1 if not str(document["header"]["serviceRenewalDate"]).strip() == ("" if response["serviceRenewalDate"] is None else response["serviceRenewalDate"]): tmp = [str(document["header"]["serviceRenewalDate"]).strip(), str(response["serviceRenewalDate"]).strip(), "Incorrect value for 'serviceRenewalDate'!"] table.append(tmp) status = 1 if not str(document["header"]["status"]).strip() == ("" if response["status"] is None else response["status"]): tmp = [str(document["header"]["status"]).strip(), str(response["status"]).strip(), "Incorrect value for 'status'!"] table.append(tmp) status = 1 if not str(document["header"]["tranBlockReasonKey"]).strip() == ("" if response["tranBlockReasonKey"] is None else response["tranBlockReasonKey"]): tmp = [str(document["header"]["tranBlockReasonKey"]).strip(), str(response["tranBlockReasonKey"]).strip(), "Incorrect value for 'tranBlockReasonKey'!"] table.append(tmp) status = 1 if not str(document["header"]["accountGroup"]).strip() == ("" if response["accountGroup"] is None else response["accountGroup"]): tmp = [str(document["header"]["accountGroup"]).strip(), str(response["accountGroup"]).strip(), "Incorrect value for 'accountGroup'!"] table.append(tmp) status = 1 if not str(document["header"]["sapCreateDate"]).strip() == ("" if response["createDate"] is None else response["createDate"]): tmp = [str(document["header"]["sapCreateDate"]).strip(), str(response["createDate"]).strip(), "Incorrect value for 'sapCreateDate'!"] table.append(tmp) status = 1 if not str(document["header"]["dataOriginKey"]).strip() == ("" if response["dataOriginKey"] is None else response["dataOriginKey"]): tmp = [str(document["header"]["dataOriginKey"]).strip(), str(response["dataOriginKey"]).strip(), "Incorrect value for 'dataOriginKey'!"] table.append(tmp) status = 1 if not str(document["header"]["accountClass"]).strip() == ("" if response["accountClass"] is None else response["accountClass"]): tmp = [str(document["header"]["accountClass"]).strip(), str(response["accountClass"]).strip(), "Incorrect value for 'accountClass'!"] table.append(tmp) status = 1 if not str(document["header"]["sapChangeDate"]).strip() == ("" if response["changeDate"] is None else response["changeDate"]): tmp = [str(document["header"]["sapChangeDate"]).strip(), str(response["changeDate"]).strip(), "Incorrect value for 'sapChangeDate/changeDate'!"] table.append(tmp) status = 1 if not str(document["header"]["contactFirstName"]).strip() == ("" if response["firstName"] is None else response["firstName"]): tmp = [str(document["header"]["contactFirstName"]).strip(), str(response["firstName"]).strip(), "Incorrect value for 'contactFirstName'!"] table.append(tmp) status = 1 if not str(document["header"]["accountGroupKey"]).strip() == ("" if response["accountGroupKey"] is None else response["accountGroupKey"]): tmp = [str(document["header"]["accountGroupKey"]).strip(), str(response["accountGroupKey"]).strip(), "Incorrect value for 'accountGroupKey'!"] table.append(tmp) status = 1 if not str(document["header"]["dataOrigin"]).strip() == ("" if response["dataOrigin"] is None else response["dataOrigin"]): tmp = [str(document["header"]["dataOrigin"]).strip(), str(response["dataOrigin"]).strip(), "Incorrect value for 'dataOrigin'!"] table.append(tmp) status = 1 if not str(document["header"]["partnerType"]).strip() == ("" if response["partnerType"] is None else response["partnerType"]): tmp = [str(document["header"]["partnerType"]).strip(), str(response["partnerType"]).strip(), "Incorrect value for 'partnerType'!"] table.append(tmp) status = 1 if not str(document["header"]["contactLastNname"]).strip() == ("" if response["lastName"] is None else response["lastName"]): tmp = [str(document["header"]["contactLastNname"]).strip(), str(response["lastName"]).strip(), "Incorrect value for 'contactLastNname'!"] table.append(tmp) status = 1 if not str(document["header"]["partnerTypeKey"]).strip() == ("" if response["partnerKeyType"] is None else response["partnerKeyType"]): tmp = [str(document["header"]["partnerTypeKey"]).strip(), str(response["partnerKeyType"]).strip(), "Incorrect value for 'partnerTypeKey'!"] table.append(tmp) status = 1 else: print "No header attributes found in document!" if address: print "Verifying address attributes..." if not str(document["address"]["website"]).strip() == ("" if response["website"] is None else response["website"]): tmp = [str(document["address"]["website"]).strip(), str(response["website"]).strip(), "Incorrect value for 'website'!"] table.append(tmp) status = 1 if not str(document["address"]["city"]).strip() == ("" if response["city"] is None else response["city"]): tmp = [str(document["address"]["city"]).strip(), str(response["city"]).strip(), "Incorrect value for 'city'!"] table.append(tmp) status = 1 if not str(document["address"]["language"]).strip() == ("" if response["language"] is None else response["language"]): tmp = [str(document["address"]["language"]).strip(), str(response["language"]).strip(), "Incorrect value for 'language'!"] table.append(tmp) status = 1 if not str(document["address"]["extension"]).strip() == ("" if response["extension"] is None else response["extension"]): tmp = [str(document["address"]["extension"]).strip(), str(response["extension"]).strip(), "Incorrect value for 'extension'!"] table.append(tmp) status = 1 if not str(document["address"]["street1"]).strip() == ("" if response["street1"] is None else response["street1"]): tmp = [str(document["address"]["street1"]).strip(), str(response["street1"]).strip(), "Incorrect value for 'street1'!"] table.append(tmp) status = 1 if not str(document["address"]["street2"]).strip() == ("" if response["street2"] is None else response["street2"]): tmp = [str(document["address"]["street2"]).strip(), str(response["street2"]).strip(), "Incorrect value for 'street2'!"] table.append(tmp) status = 1 if not str(document["address"]["street3"]).strip() == ("" if response["street3"] is None else response["street3"]): tmp = [str(document["address"]["street3"]).strip(), str(response["street3"]).strip(), "Incorrect value for 'street3'!"] table.append(tmp) status = 1 if not str(document["address"]["street4"]).strip() == ("" if response["street4"] is None else response["street4"]): tmp = [str(document["address"]["street4"]).strip(), str(response["street4"]).strip(), "Incorrect value for 'street4'!"] table.append(tmp) status = 1 if not str(document["address"]["communicationType"]).strip() == ("" if response["communicationType"] is None else response["communicationType"]): tmp = [str(document["address"]["communicationType"]).strip(), str(response["communicationType"]).strip(), "Incorrect value for 'communicationType'!"] table.append(tmp) status = 1 if not str(document["address"]["phone"]).strip() == ("" if response["phone"] is None else response["phone"]): tmp = [str(document["address"]["phone"]).strip(), str(response["phone"]).strip(), "Incorrect value for 'phone'!"] table.append(tmp) status = 1 if not str(document["address"]["email"]).strip() == ("" if response["email"] is None else response["email"]): tmp = [str(document["address"]["email"]).strip(), str(response["email"]).strip(), "Incorrect value for 'email'!"] table.append(tmp) status = 1 if not str(document["address"]["communicationTypeKey"]).strip() == ("" if response["communicationTypeKey"] is None else response["communicationTypeKey"]): tmp = [str(document["address"]["communicationTypeKey"]).strip(), str(response["communicationTypeKey"]).strip(), "Incorrect value for 'communicationTypeKey'!"] table.append(tmp) status = 1 if not str(document["address"]["country"]).strip() == ("" if response["country"] is None else response["country"]): tmp = [str(document["address"]["country"]).strip(), str(response["country"]).strip(), "Incorrect value for 'country'!"] table.append(tmp) status = 1 if not str(document["address"]["postalCode"]).strip() == ("" if response["postalCode"] is None else response["postalCode"]): tmp = [str(document["address"]["postalCode"]).strip(), str(response["postalCode"]).strip(), "Incorrect value for 'postalCode'!"] table.append(tmp) status = 1 if not str(document["address"]["houseNumber"]).strip() == ("" if response["houseNumber"] is None else response["houseNumber"]): tmp = [str(document["address"]["houseNumber"]).strip(), str(response["houseNumber"]).strip(), "Incorrect value for 'houseNumber'!"] table.append(tmp) status = 1 if not str(document["address"]["region"]).strip() == ("" if response["region"] is None else response["region"]): tmp = [str(document["address"]["region"]).strip(), str(response["region"]).strip(), "Incorrect value for 'region'!"] table.append(tmp) status = 1 if not str(document["address"]["transportZone"]).strip() == ("" if response["transportZone"] is None else response["transportZone"]): tmp = [str(document["address"]["transportZone"]).strip(), str(response["transportZone"]).strip(), "Incorrect value for 'transportZone'!"] table.append(tmp) status = 1 else: print "No address attributes found in d,ocument!" if market: print "Verifying address attributes..." if not str(document["marketingAttributes"]["serviceRequestEntitlement"]).strip() == ("" if response["serviceRequestEntitlement"] is None else response["serviceRequestEntitlement"]): tmp = [str(document["marketingAttributes"]["serviceRequestEntitlement"]).strip(), str(response["serviceRequestEntitlement"]).strip(), "Incorrect value for 'serviceRequestEntitlement'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["srByEmail"]).strip() == ("" if response["srByEmail"] is None else response["srByEmail"]): tmp = [str(document["marketingAttributes"]["srByEmail"]).strip(), str(response["srByEmail"]).strip(), "Incorrect value for 'srByEmail'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["citizenship"]).strip() == ("" if response["citizenship"] is None else response["citizenship"]): tmp = [str(document["marketingAttributes"]["citizenship"]).strip(), str(response["citizenship"]).strip(), "Incorrect value for 'citizenship'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["rmaEntitlement"]).strip() == ("" if response["rmaEntitlement"] is None else response["rmaEntitlement"]): tmp = [str(document["marketingAttributes"]["rmaEntitlement"]).strip(), str(response["rmaEntitlement"]).strip(), "Incorrect value for 'rmaEntitlement'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["srEntitlement"]).strip() == ("" if response["srEntitlement"] is None else response["srEntitlement"]): tmp = [str(document["marketingAttributes"]["srEntitlement"]).strip(), str(response["srEntitlement"]).strip(), "Incorrect value for 'srEntitlement'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["authorizedForRma"]).strip() == ("" if response["autorizedForRMA"] is None else response["autorizedForRMA"]): tmp = [str(document["marketingAttributes"]["authorizedForRma"]).strip(), str(response["autorizedForRMA"]).strip(), "Incorrect value for 'authorizedForRma/autorizedForRMA'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["accountTemperature"]).strip() == ("" if response["accountTemperature"] is None else response["accountTemperature"]): tmp = [str(document["marketingAttributes"]["accountTemperature"]).strip(), str(response["accountTemperature"]).strip(), "Incorrect value for 'accountTemperature'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["entitlementValidTill"]).strip() == ("" if response["entitlementValidTill"] is None else response["entitlementValidTill"]): tmp = [str(document["marketingAttributes"]["entitlementValidTill"]).strip(), str(response["entitlementValidTill"]).strip(), "Incorrect value for 'entitlementValidTill'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["temperatureEndDate"]).strip() == ("" if response["temperatureEndDate"] is None else response["temperatureEndDate"]): tmp = [str(document["marketingAttributes"]["temperatureEndDate"]).strip(), str(response["temperatureEndDate"]).strip(), "Incorrect value for 'temperatureEndDate'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["accountServiceLevel"]).strip() == ("" if response["accountServiceLevel"] is None else response["accountServiceLevel"]): tmp = [str(document["marketingAttributes"]["accountServiceLevel"]).strip(), str(response["accountServiceLevel"]).strip(), "Incorrect value for 'accountServiceLevel'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["analysisFlag"]).strip() == ("" if response["analysisFlag"] is None else response["analysisFlag"]): tmp = [str(document["marketingAttributes"]["analysisFlag"]).strip(), str(response["analysisFlag"]).strip(), "Incorrect value for 'analysisFlag'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["courtesyCall"]).strip() == ("" if response["courtesyCall"] is None else response["courtesyCall"]): tmp = [str(document["marketingAttributes"]["courtesyCall"]).strip(), str(response["courtesyCall"]).strip(), "Incorrect value for 'courtesyCall'!"] table.append(tmp) status = 1 ''' if not str(document["marketingAttributes"]["abcd"]).strip() == ("" if response["abcd"] is None else response["abcd"]): tmp = [str(document["marketingAttributes"]["abcd"]).strip(), str(response["abcd"]).strip(), "Incorrect value for 'ccEngineer'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["abcd"]).strip() == ("" if response["abcd"] is None else response["abcd"]): tmp = [str(document["marketingAttributes"]["abcd"]).strip(), str(response["abcd"]).strip(), "Incorrect value for 'ccEngineer'!"] table.append(tmp) status = 1 if not str(document["marketingAttributes"]["abcd"]).strip() == ("" if response["abcd"] is None else response["abcd"]): tmp = [str(document["marketingAttributes"]["abcd"]).strip(), str(response["abcd"]).strip(), "Incorrect value for 'ccEngineer'!"] table.append(tmp) status = 1''' else: print "No address attributes found in document!" tmp = [] if status == 0: print "Match Found" row.append("Match Found") else: print tabulate(table, headers=["LinkNo", "Key", "Kafka", "API", "Status"], tablefmt="rst") else: print "No Match Found in Hadoop." row.append("No Match Found in Hadoop.") return row #client = MongoClient('10.219.48.134', 27017) client = MongoClient('192.168.56.101', 27017) db = client['SAPEvent'] collection = db['customerMaster'] api = HBase() document_no = 0 documents = collection.find({}) #documents = collection.find({'header.partnerId': 100001407}) ofile = open('customerMaster.csv', "wb") writer = csv.writer(ofile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL) row = ["SNo", "CaseID", "KafkaJSON", "APIResponse", "Status"] writer.writerow(row) for document in documents: row = [] document_no += 1 row.append(document_no) print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" print "Document No: "+str(document_no) if not document.get("header", ""): print "No header in document..." row.append("NA") row.append(str(document).replace("\n", "")) row.append("No header in document") else: row.append(document['header']['partnerId']) row.append(str(document).replace("\n", "")) try: row = api.get_account_details(document, row) except Exception: print Exception.message print(traceback.format_exc()) writer.writerow(row) print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" print "\n\n" ofile.close()
{ "repo_name": "asifhj/Python_SOAP_OSSJ_SAP_Fusion_Kafka_Spark_HBase", "path": "customerMaster.py", "copies": "1", "size": "29036", "license": "apache-2.0", "hash": -4879155310968164000, "line_mean": 68.3050847458, "line_max": 197, "alpha_frac": 0.5201818432, "autogenerated": false, "ratio": 4.803308519437552, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5823490362637552, "avg_score": null, "num_lines": null }
__author__ = 'asifj' import logging from kafka import KafkaConsumer import json import traceback from bson.json_util import dumps from kafka import SimpleProducer, KafkaClient from utils import Utils logging.basicConfig( format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s', level=logging.INFO ) inputs = [] consumer = KafkaConsumer("SAPEvent", bootstrap_servers=['172.22.147.242:9092', '172.22.147.232:9092', '172.22.147.243:9092'], auto_commit_enable=False, auto_offset_reset="smallest") message_no = 1 inputs = consumer.fetch_messages() '''for message in consumer: topic = message.topic partition = message.partition offset = message.offset key = message.key message = message.value print "=================================================================================================================" if message is not None: try: document = json.loads(message) collection = document.keys()[0] if collection == "customerMaster": print "customerMaster" elif collection == "srAttachements": #print dumps(document, sort_keys=True) inputs.append(document) except Exception, err: print "CustomException" print "Kafka Message: "+str(message) print(traceback.format_exc()) print "=================================================================================================================" print "\n" message_no += 1 ''' # To send messages synchronously kafka = KafkaClient('172.22.147.232:9092,172.22.147.242:9092,172.22.147.243:9092') producer = SimpleProducer(kafka) for i in inputs: try: #producer.send_messages(b'SAPEvent', json.dumps(input)) document = json.loads(str(i.value)) type = document.keys()[0] if type == "srDetails": print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" row = [] utils = Utils() row = utils.validate_sr_details( document['srDetails'], row) print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" print "\n\n" except Exception: print "Kafka: "+str(document) print Exception.message print(traceback.format_exc())
{ "repo_name": "asifhj/Python_SOAP_OSSJ_SAP_Fusion_Kafka_Spark_HBase", "path": "KafkaCP.py", "copies": "1", "size": "2510", "license": "apache-2.0", "hash": 1918513525047149800, "line_mean": 37.21875, "line_max": 181, "alpha_frac": 0.4928286853, "autogenerated": false, "ratio": 4.312714776632302, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.001373848974177497, "num_lines": 64 }
__author__ = 'asifj' import requests from pymongo import MongoClient import json import csv import traceback import logging from tabulate import tabulate from bson.json_util import dumps client = MongoClient('10.219.48.134', 27017) #client = MongoClient('192.168.56.101', 27017) db = client['ImportedEvents'] collection = db['srDate'] collection_new = db['srDates-new'] document_no = 0 documents = collection.find(no_cursor_timeout=True) inserts = 0 updates = 0 for document in documents: for key, value in document.iteritems(): document[key] = str(value).strip() key = {'caseId': document['SRID']} doc = collection_new.find_one({'caseId': document['SRID']}) del document['_id'] if not doc: inserts += 1 doc_to_ins = key doc_to_ins['dates'] = [] for key, value in document.iteritems(): document[key] = str(value).strip() doc_to_ins['dates'].append(document) collection_new.insert(doc_to_ins) else: updates += 1 doc['dates'].append(document) collection_new.update(key, doc, upsert=True); #print "Inserts: "+str(inserts) #print "Updates: "+str(updates) print "Total: "+str(inserts+updates)
{ "repo_name": "asifhj/Python_SOAP_OSSJ_SAP_Fusion_Kafka_Spark_HBase", "path": "srDates-insert.py", "copies": "1", "size": "1259", "license": "apache-2.0", "hash": 6901324190817669000, "line_mean": 28.7073170732, "line_max": 63, "alpha_frac": 0.6282764098, "autogenerated": false, "ratio": 3.393530997304582, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4521807407104582, "avg_score": null, "num_lines": null }
__author__ = 'Asish Mahapatra: asishkm@gmail.com' import requests import re import os import img2pdf from multiprocessing import Process, Manager import sys # works only when the scribd document is composed solely of images (.jpg) img_folder = './images' DEBUG = True output_folder = '' json_pattern = re.compile(r'https.*scribdassets.*jsonp') img_pattern = re.compile(r'orig.*(http.*scribd.{,45}jpg)') def sort_json(link): if 'jpg' in link: pat = re.compile(r'([0-9]*)-.*jpg') s = link.index('jpg')-16 elif 'jsonp' in link: pat = re.compile(r'([0-9]*)-.*jsonp') s = link.index('jsonp')-16 val = re.search(pat, link[s:]) return int(val.groups()[0]) def write_image(img_file, img_link): f = open(img_file, 'wb') while 1: try: f.write(requests.get(img_link).content) except requests.ConnectionError: print 'break at getting img ', img_link continue break f.close() def get_images(scribd_link): print 'at get_images' scribd_conn = requests.get(scribd_link) json_list = re.findall(json_pattern, scribd_conn.content) img_links = re.findall(img_pattern, scribd_conn.content) json_list.extend(img_links) json_list = sorted(json_list, key = sort_json) #n = str(len(str(len(json_list)))) return json_list def get_img_links(json_list): print 'at get_img_links' img_links = [] for i, link in enumerate(json_list): if i%50 ==0: print i if 'jpg' in link: img_links.append(link) elif 'jsonp' in link: while 1: try: json_conn = requests.get(link) except requests.ConnectionError: print 'breaking at getting json', continue break resp = json_conn.content try: img_link = re.findall(img_pattern, resp)[0] except IndexError: print 'no img link' continue img_links.append(img_link) #n = str(len(str(len(img_links)))) return img_links def download_img(img_links, start_index, end_index, img_folder, pdf_file_name, f): print 'at downloading_img' image_list = set() print len(img_links), 'files' print 'images downloaded to {}'.format(img_folder) i = start_index end_index = min(end_index, len(img_links)) n = str(len((str(len(img_links))))) while i < end_index: img_name = '{}{:0{k}d}.jpg'.format(pdf_file_name,i+1, k = n) img_file = os.path.join(img_folder, img_name) image_list.add(img_file) if os.path.exists(img_file): print img_file, 'exists' continue write_image(img_file, img_links[i]) i += 1 print 'Completed img {} \n'.format(i+1) f.extend(image_list) def convert_to_pdf(img_list, output_folder, pdf_file_name): pdf_bytes = img2pdf.convert(img_list, dpi = 100) with open(os.path.join(output_folder, pdf_file_name), 'wb') as f: f.write(pdf_bytes) if __name__ == "__main__": global f, image_list processes = [] a = raw_input('''scribd link, pdf file name, \ img_folder, out_folder separated by spaces \n''') print a.split() scribd, pdf, img_folder, out_folder = a.split() img_links = get_img_links(get_images(scribd)) n = len(img_links) print 'image links scraped', n manager = Manager() f = manager.list() for i in range(5): process = Process(target = download_img, args = [img_links, i*n/4, (i+1)*n/4, img_folder, pdf, f]) print 'process {} started'.format(i) process.start() processes.append(process) for process in processes: process.join() print 'image list secured' image_list = sorted(list(f)) print 'converting' convert_to_pdf(image_list, out_folder, pdf+'.pdf')
{ "repo_name": "kluge-iitk/Scribd_Image_Downloader", "path": "scribd_downloader.py", "copies": "1", "size": "4027", "license": "mit", "hash": -2268019747635987200, "line_mean": 25.3202614379, "line_max": 82, "alpha_frac": 0.5723863919, "autogenerated": false, "ratio": 3.3446843853820596, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9367080137839895, "avg_score": 0.009998127888432892, "num_lines": 153 }
__author__ = 'Asish Mahapatra: asishkm@gmail.com' import requests import re import os import img2pdf from multiprocessing import Process, Manager import time json_pattern = re.compile(r'https.*scribdassets.*jsonp') img_pattern = re.compile(r'orig.*(http.*scribd.{,45}jpg)') patterns = {'jpg': re.compile(r'([0-9]*)-.*jpg'), 'jsonp': re.compile(r'([0-9]*)-.*jsonp')} DEBUG = False def sort_json(link): ''' Gets the page number of the image or the jsonp file. Urls have the forum 'pagenum-randomhash.jpg / jsonp' ''' for pattern in patterns: if pattern in link: s = link.index(pattern)-16 val = re.search(patterns[pattern], link[s:]) return int(val.groups()[0]) def img_jsonp_urls(scribd_link): ''' Gets a sorted list of URLs based on page number with both jsonp or jpg extensions ''' print 'at get_images' scribd_conn = requests.get(scribd_link) json_list = re.findall(json_pattern, scribd_conn.content) img_links = re.findall(img_pattern, scribd_conn.content) json_list.extend(img_links) json_list = sorted(json_list, key = sort_json) return json_list def write_image(img_file, img_link): ''' Downloads the image from img_link onto the img_file ''' f = open(img_file, 'wb') while 1: try: f.write(requests.get(img_link).content) except requests.ConnectionError: print 'break at getting img ', img_link, img_file continue except Exception as e: print e, img_file continue break f.close() def get_img_url(jsonp_link): ''' Gets the image url from a .jsonp link ''' while 1: try: json_conn = requests.get(jsonp_link) except requests.ConnectionError: print 'breaking at getting json', jsonp_link continue break resp = json_conn.content try: img_link = re.findall(img_pattern, resp)[0] except IndexError: print 'no img link' img_link = '' return img_link def convert_to_pdf(img_list, output_folder, pdf_file_name): ''' Converts a list of image files into a pdf at the ouput directory ''' pdf_bytes = img2pdf.convert(img_list, dpi = 100) with open(os.path.join(output_folder, pdf_file_name), 'wb') as f: f.write(pdf_bytes) def download_img(urls, start, end, img_dir, pdf_name, shared): ''' Downloads the images from the start index to end index of the list of urls (urls) to the img_dir Also appends the paths of the images to the shared list created my Manager.manager().list() ''' print 'at downloading_img' image_list = set() print len(urls), 'files' print 'images downloaded to {}'.format(img_dir) i = start end = min(end, len(urls)) n = str(len(str(len(urls)))) while i < end: img_name = '{}{:0{k}d}.jpg'.format(pdf_name, i+1, k = n) img_path = os.path.join(img_dir, img_name) image_list.add(img_path) if os.path.exists(img_path): print img_path, 'exists' i += 1 continue if 'jsonp' in urls[i]: img_url = get_img_url(urls[i]) else: img_url = urls[i] write_image(img_path, img_url) i += 1 if DEBUG: print 'Completed img {}'.format(i+1) shared.extend(image_list) if __name__ == '__main__': processes = [] a = raw_input('''scribd link, pdf file name, \ img_folder, out_folder separated by spaces \n''') print a.split() scribd, pdf, img_folder, out_folder = a.split() N = int(raw_input('Enter number of separate processes: ')) s = time.clock() print 'started at', time.ctime() combined_list = img_jsonp_urls(scribd) n = len(combined_list) manager = Manager() img_files = manager.list() k = n/N for i in range(N): start = k*i end = k*(i+1) if i == N-1: end = max(end, n) process = Process(target = download_img, args = [combined_list, start, end, img_folder, pdf, img_files]) print 'process {} started'.format(i) process.start() processes.append(process) for process in processes: process.join() print 'image file names secured' img_files = sorted(list(img_files)) print 'converting' convert_to_pdf(img_files, out_folder, pdf + '.pdf') print 'finished at', time.ctime() print 'time taken: {} seconds'.format(time.clock() - s)
{ "repo_name": "kluge-iitk/Scribd_Image_Downloader", "path": "scribd_downloader1.py", "copies": "1", "size": "4727", "license": "mit", "hash": 3555740545377851400, "line_mean": 24.8306010929, "line_max": 72, "alpha_frac": 0.5722445526, "autogenerated": false, "ratio": 3.511887072808321, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4584131625408321, "avg_score": null, "num_lines": null }
from collections import Counter import jellyfish import scipy.stats from scipy import integrate import numpy as np import datetime import math #Each function takes two inputs and give back a feature score (a distance measure) def levenshtein_similarity(s, t): """ Levenshtein Similarity """ Ns = len(s); Nt = len(t); lev_sim = 1.0 - (jellyfish.levenshtein_distance(s, t)) / float(max(Ns, Nt)) return lev_sim def jaro_winkler_similarity(s, t): """ Jaro-Winkler Similarity """ jw_sim = jellyfish.jaro_winkler(s, t) return jw_sim #Get an aggregate of terms in your text def text_to_vector(text): return Counter(text) def gaussian_overlap(data1,data2): """finds the area overlap between two bell curves. Data can be provided as list of numbers. data1,data2: list of numbers. Returns a float that represents the area of intersection""" mean1=np.mean(data1) mean2=np.mean(data2) std1=np.std(data1) std2=np.std(data1) f = lambda x: min(scipy.stats.norm(mean1, std1).pdf(x),scipy.stats.norm(mean2, std2).pdf(x)) area, error=integrate.quad(f, -np.inf,+np.inf) area = float(area) if math.isnan(area): area=0.0 return area #get cosine similarity between two document vectors def cosine_similarity(vec1, vec2): intersection = set(vec1.keys()) & set(vec2.keys()) numerator = sum([vec1[x] * vec2[x] for x in intersection]) sum1 = sum([vec1[x] ** 2 for x in vec1.keys()]) sum2 = sum([vec2[x] ** 2 for x in vec2.keys()]) denominator = math.sqrt(sum1) * math.sqrt(sum2) if not denominator: return 0.0 else: return float(numerator) / denominator #get cosine similarity between texts def get_cosine_similarity(text1,text2): vec1=text_to_vector(text1) vec2=text_to_vector(text2) dist=cosine_similarity(vec1,vec2) return dist if __name__ == '__main__': # example of guassian intersection data1=[1,2,3,3,2,1] data2=[4,5,6,6,5,4] area,error=gaussian_overlap(data1,data2) print area
{ "repo_name": "YongchaoShang/tika-img-similarity", "path": "features.py", "copies": "2", "size": "2683", "license": "apache-2.0", "hash": -8946976321610435000, "line_mean": 23.1711711712, "line_max": 125, "alpha_frac": 0.6880357808, "autogenerated": false, "ratio": 3.1902497027348393, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.487828548353484, "avg_score": null, "num_lines": null }
import itertools import features as feat import math import re #split a string when we see a transition from one type to another say alpha, numeric, spl chars. def break_natural_boundaries(string): stringbreak=[] if len(string.split(' ')) > 1: stringbreak = string.split(' ') else: spl = '[a-z][\%|\$|\^|\*|\@|\!|\_|\-|\(|\)|\:|\;|\'|\"|\{|\}|\[|\]|]' up = '[A-Z]' low = '[a-z]' num = '\d' matchindex = set() matchindex.update(set(m.start() for m in re.finditer(up + low, string))) matchindex.update(set(m.start() for m in re.finditer(low + up, string))) matchindex.update(set(m.start() for m in re.finditer(num + up, string))) matchindex.update(set(m.start() for m in re.finditer(up + num, string))) matchindex.update(set(m.start() for m in re.finditer(low + num, string))) matchindex.update(set(m.start() for m in re.finditer(num + low, string))) matchindex.update(set(m.start() for m in re.finditer(spl + up, string))) matchindex.update(set(m.start() for m in re.finditer(up + spl, string))) matchindex.update(set(m.start() for m in re.finditer(low + spl, string))) matchindex.update(set(m.start() for m in re.finditer(spl + low, string))) matchindex.update(set(m.start() for m in re.finditer(spl + num, string))) matchindex.update(set(m.start() for m in re.finditer(num + spl, string))) matchindex.add(len(string)-1) matchindex = sorted(matchindex) start = 0 for i in matchindex: end = i stringbreak.append(string[start:end + 1]) start = i+1 return stringbreak def meta_levenshtein(string1,string2,Sim='levenshtein',theta=0.5,strict=-1,idf=dict()): ''' Implements ideas from the paper : Robust Similarity Measures for Named Entities Matching by Erwan et al. Sim = jaro_winkler, levenshtein : can be chosen as the secondary matching function. theta is the secondary similarity threshold: If set higher it will be more difficult for the strings to match. strict=-1 for doing all permutations of the substrings strict=1 for no permutations idf=provide a dictionary for {string(word),float(idf od the word)}: More useful when mathings multi word entities (And word importances are very important) like: 'harry potter', 'the wizard harry potter' ''' # set the secondary simlarity function if Sim == 'levenshtein': simf = lambda x, y: feat.levenshtein_similarity(unicode(x, 'utf-8'), unicode(y, 'utf-8')) elif Sim=='jaro_winkler': simf = lambda x, y: feat.jaro_winkler_similarity(unicode(x, 'utf-8'), unicode(y, 'utf-8')) # set the idf and normalization functions if len(idf)>0: idf=lambda x,y :idf[x]*idf[y] norm=lambda x,y,ex,ey: math.sqrt(sum([i**2 for i in ex]))*math.sqrt(sum([i**2 for i in ey])) else: idf=lambda x,y:1 norm=lambda x,y,ex,ey:max(len(x),len(y)) # break the string down into sub-strings ( if it has not spaces already) string1break=break_natural_boundaries(string1) string2break=break_natural_boundaries(string2) # swap the bigger one to string2 if len(string1break)>len(string2break): temp=string1break string1break=string2break string2break=temp # make permutations of srtingbreak perm1 = [] perm2 = [] if strict==-1: perm1=itertools.permutations(string1break) perm2=itertools.permutations(string2break) elif strict==1: perm1.append(string1break) perm2.append(string2break) # Do secondary matching for each permutation and each (Levenshtein) shift (E=edges that qualify/remain after applying the threshold: theta) bestscore=0.0 for st1 in perm1: for st2 in perm2: for k in range (0,len(st2)-len(st1)+1): permscore = 0.0 E1=[] E2=[] for i in range(0,len(st1)): # calculate the secondary similarites for a fixed instance simtemp=simf(st1[i],st2[i+k])*idf(st1[i],st2[i+k]) if simtemp>=theta: E1.append(st1[i]) E2.append(st2[i+k]) permscore+=simtemp if permscore>bestscore: bestscore=permscore bestscore=bestscore/norm(st1,st2,E1,E2) return bestscore if __name__ == '__main__': #usage example print meta_levenshtein('abacus1cat','cat1cus')
{ "repo_name": "harsham05/tika-similarity", "path": "metalevenshtein.py", "copies": "2", "size": "5214", "license": "apache-2.0", "hash": 6021096736705333000, "line_mean": 35.4615384615, "line_max": 159, "alpha_frac": 0.6300345224, "autogenerated": false, "ratio": 3.464451827242525, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5094486349642525, "avg_score": null, "num_lines": null }
import nltk import string import os from stemming.porter2 import stem import io import sys import argparse import csv import features as feat # A class to do stylstic extractions from text: To use programatically, initialize the class. This will calculate different kinds of stylistic features from the text, # eg. as many times it finds a punctuation it will add the word 'punc' to the 'featspace list'. Similarly, all the extractions are added to 'featspace' # in form of signatures. To access a specific feature, call that specific method from the object of the class. class psykey: def __init__(self, text, wordlistfolder): self.text = text self.tokens = nltk.word_tokenize(text) self.sentenses = nltk.sent_tokenize(text) self.tags = nltk.pos_tag(self.tokens) self.featspace = [] self.psykfeatspace(self.featspace, wordlistfolder) self.bigrams(self.featspace) self.number_count(self.featspace) self.punc_count(self.featspace) self.big_word_count(self.featspace) self.words_per_sentence(self.featspace) self.sentence_count(self.featspace) self.countPOS(self.featspace, 'CC') self.countPOS(self.featspace, 'NP') self.countPOS(self.featspace, 'NNP') self.words(self.featspace) self.stem(self.featspace) # Counts a specific POS tags def countPOS(self, featspace, postag): tags = self.tags count = 0 for word in tags: if word[1] == postag: count += 1 featspace.append(postag) # Counts number of words def words(self, featspace): tokens = self.tokens featspace.extend(tokens) return len(tokens) # Count number of sentenses def sentence_count(self, featspace): sentences = self.sentenses count = len(sentences) for i in range(0, count): featspace.append('sentcount') return count # Counds the average number of words per sentence def words_per_sentence(self, featspace): token_length = len(self.tokens) sentences_length = len(self.sentenses) count = int(token_length / sentences_length) for i in range(0, count): featspace.append('wordspersentence') # Counts the number of big words: words bigger than 6 chars def big_word_count(self, featspace): count = 0 tokens = self.tokens for word in tokens: if len(word) > 6: count += 1 featspace.append('bigword') # Counts the total number of punctuations in the text def punc_count(self, featspace): count = 0 tokens = self.tokens punctuations = string.punctuation.replace('.', '') for word in tokens: if word in punctuations: count += 1 featspace.append('punc') # Counts teh number of numerical words in text def number_count(self, featspace): count = 0 tokens = self.tokens for word in tokens: flag = 0 for ch in word: if ch in '0123456789': flag = 1 break if flag == 1: featspace.append('numbers') count += 1 # Creates bigrams def bigrams(self, featspace): tokens = self.tokens for count in range(0, len(tokens) - 1): featspace.append(tokens[count] + tokens[count + 1]) # Opens the folder with all the wordlists. matches the words in text with the words in each file. If match found, creates a feature/signature with the name of the # file. def psykfeatspace(self, featspace, wordlistfolder): tokens = self.tokens for filename in os.listdir(wordlistfolder): if '.txt' in filename: names = [line.strip() for line in open(os.path.join(wordlistfolder , filename), 'r')] else: continue for token in tokens: if token in names: featspace.append(filename.replace('.txt', '')) # Creates stemmed words from the text def stem(self, featspace): tokens = self.tokens for token in tokens: featspace.append(stem(token)) def ClaculatePairwise(inputdir, outputcsv, wordlists): files = os.listdir(inputdir) calculated=set() with open(outputcsv, "wb") as outF: a = csv.writer(outF, delimiter=',') a.writerow(["file1", "file2", "Similarity_score"]) for file1 in files: for file2 in files: if '.txt' in file1 and '.txt' in file2 and file1+'\t'+file2 not in calculated and file1!=file2: calculated.add(file1+'\t'+file2) calculated.add(file2 + '\t' + file1) text1 = open(os.path.join(inputdir,file1), 'rU') text2 = open(os.path.join(inputdir,file2), 'rU') else: continue raw1 = text1.read() raw2 = text2.read() psykey1 = psykey(raw1, wordlists) psykey2 = psykey(raw2, wordlists) score = feat.get_cosine_similarity(psykey1.featspace, psykey2.featspace) a.writerow([file1, file2, score]) text1.close() text2.close() if __name__ == '__main__': argParser = argparse.ArgumentParser('Cosine Similarity based on stylistic features') argParser.add_argument('--inputDir', required=True,help='path to directory for storing the output CSV File, containing pair-wise Cosine similarity on stylistic features') argParser.add_argument('--outCSV', required=True, help='path to output file') argParser.add_argument('--wordlists', required=True, help='wordlist folder with files containing lists of words (one per line)') args = argParser.parse_args() if args.inputDir and args.outCSV and args.wordlists: ClaculatePairwise(args.inputDir, args.outCSV, args.wordlists)
{ "repo_name": "YongchaoShang/tika-img-similarity", "path": "psykey.py", "copies": "4", "size": "6723", "license": "apache-2.0", "hash": 5051461476537124000, "line_mean": 36.1436464088, "line_max": 174, "alpha_frac": 0.6199613268, "autogenerated": false, "ratio": 3.9155503785672683, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.653551170536727, "avg_score": null, "num_lines": null }
import os import argparse import cv2 as cv from DetectorAPI import DetectorAPI def blurBoxes(image, boxes): """ Argument: image -- the image that will be edited as a matrix boxes -- list of boxes that will be blurred, each box must be int the format (x_top_left, y_top_left, x_bottom_right, y_bottom_right) Returns: image -- the blurred image as a matrix """ for box in boxes: # unpack each box x1, y1, x2, y2 = [d for d in box] # crop the image due to the current box sub = image[y1:y2, x1:x2] # apply GaussianBlur on cropped area blur = cv.blur(sub, (10, 10)) # paste blurred image on the original image image[y1:y2, x1:x2] = blur return image def main(args): # assign model path and threshold model_path = args.model_path threshold = args.threshold # create detection object odapi = DetectorAPI(path_to_ckpt=model_path) # open image image = cv.imread(args.input_image) # real face detection boxes, scores, classes, num = odapi.processFrame(image) # filter boxes due to threshold # boxes are in (x_top_left, y_top_left, x_bottom_right, y_bottom_right) format boxes = [boxes[i] for i in range(0, num) if scores[i] > threshold] # apply blurring image = blurBoxes(image, boxes) # # show image # cv.imshow('blurred', image) # if image will be saved then save it if args.output_image: cv.imwrite(args.output_image, image) print('Image has been saved successfully at', args.output_image, 'path') else: cv.imshow('blurred', image) # when any key has been pressed then close window and stop the program cv.waitKey(0) cv.destroyAllWindows() if __name__ == "__main__": # creating argument parser parser = argparse.ArgumentParser(description='Image blurring parameters') # adding arguments parser.add_argument('-i', '--input_image', help='Path to your image', type=str, required=True) parser.add_argument('-m', '--model_path', default='/opt/blurry-faces/face_model/face.pb', help='Path to .pb model', type=str) parser.add_argument('-o', '--output_image', help='Output file path', type=str) parser.add_argument('-t', '--threshold', help='Face detection confidence', default=0.7, type=float) args = parser.parse_args() print(args) # if input image path is invalid then stop assert os.path.isfile(args.input_image), 'Invalid input file' # if output directory is invalid then stop if args.output_image: assert os.path.isdir(os.path.dirname( args.output_image)), 'No such directory' main(args)
{ "repo_name": "grycap/scar", "path": "examples/mask-detector-workflow/blurry-faces/src/auto_blur_image.py", "copies": "1", "size": "3111", "license": "apache-2.0", "hash": 4831196582019585000, "line_mean": 28.9134615385, "line_max": 137, "alpha_frac": 0.568948891, "autogenerated": false, "ratio": 3.923076923076923, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4992025814076923, "avg_score": null, "num_lines": null }
import os import argparse import cv2 as cv def blurBoxes(image, boxes): """ Argument: image -- the image that will be edited as a matrix boxes -- list of boxes that will be blurred, each box must be int the format (x_top_left, y_top_left, width, height) Returns: image -- the blurred image as a matrix """ for box in boxes: # unpack each box x,y,w,h = [d for d in box] # crop the image due to the current box sub = image[y:y+h, x:x+w] # apply GaussianBlur on cropped area blur = cv.GaussianBlur(sub, (23,23), 30) # paste blurred image on the original image image[y:y+h, x:x+w] = blur return image def main(args): # open the image image = cv.imread(args.input_image) # create a copy and do temp operations without affecting the original image temp_image = image.copy() # an array to store selected regions coordinates ROIs = [] # keep getting ROIs until pressing 'q' while True: # get ROI cv.selectROI(window_name, image_matrix, selecting_start_point) box = cv.selectROI('blur', temp_image, fromCenter=False) # add selected box to box list ROIs.append(box) # draw a rectangle on selected ROI cv.rectangle(temp_image, (box[0],box[1]), (box[0]+box[2], box[1]+box[3]), (0,255,0), 3) print('ROI is saved, press q to stop capturing, press any other key to select other ROI') # if 'q' is pressed then break key = cv.waitKey(0) if key & 0xFF == ord('q'): break # apply blurring image = blurBoxes(image, ROIs) # if image will be saved then save it if args.output_image: cv.imwrite(args.output_image,image) cv.imshow('blurred',image) cv.waitKey(0) if __name__ == "__main__": # creating argument parser parser = argparse.ArgumentParser(description='Image blurring parameters') # adding arguments parser.add_argument('-i', '--input_image', help='Path to your image', type=str, required=True) parser.add_argument('-o', '--output_image', help='Output file path', type=str) args = parser.parse_args() # if input image path is invalid then stop assert os.path.isfile(args.input_image), 'Invalid input file' # if output directory is invalid then stop if args.output_image: assert os.path.isdir(os.path.dirname( args.output_image)), 'No such directory' main(args)
{ "repo_name": "grycap/scar", "path": "examples/mask-detector-workflow/blurry-faces/src/manual_blur_image.py", "copies": "1", "size": "2676", "license": "apache-2.0", "hash": -5675342359476043000, "line_mean": 28.4065934066, "line_max": 120, "alpha_frac": 0.5904334828, "autogenerated": false, "ratio": 3.706371191135734, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.47968046739357345, "avg_score": null, "num_lines": null }
__author__ = 'as' from bs4 import BeautifulSoup import json import sqlite3 import urllib.request import urllib.parse import urllib.error import urllib import os from urllib.request import urlretrieve mapsURL = "http://archives.bulbagarden.net/w/index.php?title=Special:Search&limit=1000&offset=0&profile=images&search=*Map.png" bulbapediaMapsPage = urllib.request.urlopen(mapsURL) inputMaps = bulbapediaMapsPage.read() soup = BeautifulSoup(inputMaps, "html.parser") # Cache this page as it might be removed in future, if it does not exist if not os.path.isfile('maps_websitelist.txt'): urlretrieve(mapsURL, 'maps_websitelist.txt') # Search for class with all the maps hrefs mapsList = soup.find(attrs={"class": "mw-search-results"}) unfilteredList = [] #print(mapsList) # Append all the hrefs to unfilteredList, the list which is searched through for images of each region for link in mapsList.find_all('a'): mapLink = link.get('href') # print(mapLink) unfilteredList.append(mapLink) regionList = ["Kanto", "Johto", "Hoenn", "Sinnoh", "Unova", "Kalos"] # Create dictionary of each region regionDictionary = {"Kanto":[], "Johto":[], "Hoenn":[], "Sinnoh":[], "Unova":[], "Kalos":[]} # Search through images for those that are in a region, then append to dictionary for image in unfilteredList: for name in regionList: if name in image: regionDictionary[name].append(image) for region in regionList: try: os.mkdir(region,) except: print("Directory "+region+" already exists!") for image in regionDictionary[region]: bulbapediaLink = "http://bulbapedia.bulbagarden.net" rawURL = bulbapediaLink + image # regionDictionary[region][image] = regionDictionary[region][rawURL] imageWebsite = urllib.request.urlopen(rawURL) imageText = imageWebsite.read() soup = BeautifulSoup(imageText, "html.parser") imageSoup = soup.find(attrs={"class": "fullImageLink"}) mapImage = imageSoup.a.img.get("src") imageName = mapImage.split('/')[-1] urllib.request.urlretrieve(mapImage, region + "\\" + imageName) # print(regionListFolder) # while "href" in mapsList: # mapName = ########################################
{ "repo_name": "foxtrot94/ECE-Pokedex", "path": "Scrapper/scrapper_maps.py", "copies": "1", "size": "2269", "license": "mit", "hash": -4508411977283524000, "line_mean": 29.6621621622, "line_max": 127, "alpha_frac": 0.6831203173, "autogenerated": false, "ratio": 3.4907692307692306, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9650904021821337, "avg_score": 0.00459710524957868, "num_lines": 74 }
__author__ = 'as' from bs4 import BeautifulSoup import json import sqlite3 import urllib.request import urllib.parse import urllib.error # Needed to convert names with accents to normal from unidecode import unidecode # Learn how to scrape from website! conn = sqlite3.connect('..//database/pokedex.sqlite3') c = conn.cursor(); c.execute("delete from " + "pokemon_nationalID") c.execute("delete from " + " sqlite_sequence where name = 'pokemon_nationalID'") conn.commit() # Before we go on, we extract the completed pokemon table ordered by pokemonID, store in list, # the index plus 1 corresponds to the nationalID # this is a tuple, we want a simple list pokemonlist = c.execute("select name from pokemon ORDER BY pokemonUniqueID") simplepokemonlist = [] # convert to simple list for row in pokemonlist: simplepokemonlist.append(row[0]) print(simplepokemonlist) # Constant needs to be updated whenever serebii adds more CURRENT_NUMBER_OF_POKEMON_IN_SEREBII = 721 # Current list of exception pokemon (who have names that are subsets of others" EXCEPTION_POKEMON = ["Pidgeot", "Paras", "Porygon", "Kabuto", "Mew", "Klink"] serebiiPokedex = urllib.request.urlopen("http://www.serebii.net/pokedex-xy/") # print(serebiiPokedex.read()) inputData = serebiiPokedex.read() soup = BeautifulSoup(inputData, "html.parser") # form now contains all instances of form, and it's children. But not needed, we can use the find(value=) # to find the first instance of all the "/pokedex-xy/"+nationalID+".shtml" tags # form = soup.find_all('form') # cycle through all the numbers from 1 to 721, constructing the string for the nationalID HTML i = 1 while i <= CURRENT_NUMBER_OF_POKEMON_IN_SEREBII: # formats 1 to 001 etc nationalID = "%03d" % (i) formline = soup.find(value="/pokedex-xy/"+nationalID+".shtml").get_text() # make sure it gets rid of accents formline = unidecode(formline) i += 1 # Split the formline into its number and name (default separator is space) # Exception, watch out for pokemon with spaces in name. So need to combine everything after number pokemonIDCouple = formline.split() pokemonIDCouple[1:] = [' '.join(pokemonIDCouple[1:])] # pokemonIDCouple[0] has the number # pokemonIDCouple[1] has the name print(pokemonIDCouple) # find all instances of the name in simplepokemonlist in the pokemon table, returns indices of this pokemon that # shares nationalID # Exceptions: Klink, Mew, Porygon, Kabuto, Paras and Pidgeot which see others as well, this was put in a list at the begining # TODO: Implement non case sensitive check indices = [i for i, x in enumerate(simplepokemonlist) if pokemonIDCouple[1] in x] # check for mentioned exceptions if len(indices) > 1: if pokemonIDCouple[1] in EXCEPTION_POKEMON: # check for exact match only indices = [i for i, x in enumerate(simplepokemonlist) if pokemonIDCouple[1] == x] print(indices) # go through the indices which correspond to a pokemon id, and map to nationalID found in pokemonIDCouple[0] # nationalID = pokemonIDCouple[0] # pokemonID = incrementedpokeID for pokemonID in indices: #need to increment incrementedpokeID = pokemonID + 1 c.execute("INSERT INTO pokemon_nationalID VALUES (?,?)", (incrementedpokeID, pokemonIDCouple[0])) conn.commit()
{ "repo_name": "foxtrot94/ECE-Pokedex", "path": "Scrapper/scrapper_national_id.py", "copies": "1", "size": "3403", "license": "mit", "hash": -8987059250710883000, "line_mean": 31.1037735849, "line_max": 130, "alpha_frac": 0.7178959741, "autogenerated": false, "ratio": 3.3962075848303392, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4614103558930339, "avg_score": null, "num_lines": null }
__author__ = 'as' # from __future__ import print_function from PIL import Image from PIL import ImageFileIO import os import fnmatch # # im = Image.open('Fighter-Front.gif') # transparency = im.info['transparency'] # im.save('test1.png', transparency=transparency) # # im.seek(im.tell()+1) # transparency = im.info['transparency'] # im.save('test2.png', transparency=transparency) # Currently 719 pokemon gifs in assets, along with mega and alternate forms NUMBER_OF_POKEMON = 719 # # im = Image.open('001.gif') # im.show() # print( im.format, im.size, im.mode) # def extractFrames(inGif, outFolder): rawGIF = Image.open('001.gif') # rawGIF.seek() redundant # # rawGIF.save('testy.png','PNG',) rawGIF_array = [] for file in os.listdir('.'): if fnmatch.fnmatch(file, '*.gif'): # print(file) rawGIF_array.append(file) # print(rawGIF_array) for gif in rawGIF_array: RAWGIF = Image.open(gif) savename = gif.split('.') RAWGIF.save(savename[0]+'.png', 'PNG', ) print(RAWGIF) # print # Steps: # 1) Open image # 2) Freeze at first frame # 3) Store frozen frame # 4) Save image in directory # search for gifs, store into list, convert each element in list # FNMATCH # from __future__ import print_function # import os, sys # from PIL import Image # # for infile in sys.argv[1:]: # f, e = os.path.splitext(infile) # outfile = f + ".jpg" # if infile != outfile: # try: # Image.open(infile).save(outfile) # except IOError: # print("cannot convert", infile) # extractFrames('ban_ccccccccccc.gif', 'output') # while i < NUMBER_OF_POKEMON: # # # if condition needed to check whether pokemon has mega/alternate form # im = Image.open('..//sprites/pokemon/gen5/animated/'+i+'.gif')
{ "repo_name": "foxtrot94/ECE-Pokedex", "path": "Scrapper/gifToPng.py", "copies": "1", "size": "1804", "license": "mit", "hash": -3303035394453629400, "line_mean": 23.7260273973, "line_max": 76, "alpha_frac": 0.6435698448, "autogenerated": false, "ratio": 3.0016638935108153, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4145233738310815, "avg_score": null, "num_lines": null }
__author__ = 'as' import bs4 import json import sqlite3 # scrapper populates the following tables: moves, category, types # # used to place what the current generation is, (currently gen 6) currentGeneration = 6 with open("pokemon.json") as data_file: data = json.load(data_file) conn = sqlite3.connect('..//database/pokedex.sqlite3') c = conn.cursor(); c.execute("delete from " + "moves") c.execute("delete from " + "category") c.execute("delete from " + "types") c.execute("delete from " + "types_effectiveness") c.execute("delete from " + " sqlite_sequence where name = 'moves'") c.execute("delete from " + " sqlite_sequence where name = 'category'") c.execute("delete from " + " sqlite_sequence where name = 'types'") c.execute("delete from " + " sqlite_sequence where name = 'types_effectiveness'") # Used for listing the form of the move, whether physical (fighting) or special (fire), or Non-Damage (status boost) categoryList = ["Physical", "Special", "Non-Damaging"] for category in categoryList: c.execute("INSERT INTO category VALUES (?,?)", (None, category)) conn.commit() # Goes through json type dictionary to first find name, compare type to name, then append it to list typeListInfo = data["types"] typeList = [] for type in typeListInfo: # if 'name'in type: typeList.append(type['name']) for type in typeList: c.execute("INSERT INTO types VALUES (?,?)", (None, type)) conn.commit() for type in typeListInfo: fromTypeID = typeList.index(type['name'])+1 for type_effectivnes in type['atk_effectives']: toTypeId = typeList.index(type_effectivnes[0])+1 effectiveLevel = type_effectivnes[1] c.execute("INSERT INTO types_effectiveness VALUES (?,?,?)", (fromTypeID, effectiveLevel, toTypeId)) conn.commit() # Parameters: moveID (autoincrements, so put None), name (str), description (str), power (int), accuracy (int), pp (int), # affects (str), genFirstAppeared (int), secondaryEffects(str), typeID (int), categoryID (int) # The latest build, with moves dictionary in the pokemon.json file, will be needed for moves in data["moves"]: name = moves["name"] # The description shows what the move does, such as increased defence, etc description = moves["description"] power = moves["power"] accuracy = moves["accuracy"] # Power points, or the number of times the move can be done pp = moves["pp"] # TODO # Look at description above for clarification affects = "hello" # Generation that move was first introduced, # temp goes through json file to find how many generations the move is in, obviously it is in the latest # generation, so [( 1 + currentGeneration) - temp] determines the generation the move was first introduced temp = len(moves["genfamily"]) genFirstAppeared = 1 + currentGeneration - temp # TODO Affects column in table needs to be added typeString = moves["type"] typeID = typeList.index(typeString) + 1 # Determines whether move is physical, special, or other categoryString = moves["category"] categoryID = categoryList.index(categoryString) + 1 todo = "TO DO" c.execute("INSERT INTO moves VALUES (?,?,?,?,?,?,?,?,?,?)", (None, name, description, power, accuracy, pp, affects, genFirstAppeared, typeID, categoryID)) conn.commit() conn.commit() conn.close()
{ "repo_name": "foxtrot94/ECE-Pokedex", "path": "Scrapper/scrapper_moves.py", "copies": "1", "size": "3502", "license": "mit", "hash": 5972386433846345000, "line_mean": 30.2767857143, "line_max": 121, "alpha_frac": 0.663335237, "autogenerated": false, "ratio": 3.7215727948990436, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9866565513269066, "avg_score": 0.003668503725995451, "num_lines": 112 }
__author__ = 'as' import bs4 import json import sqlite3 with open("pokemon.json") as data_file: data = json.load(data_file) conn = sqlite3.connect('../database/pokedex.sqlite3') c = conn.cursor() c.execute("delete from " + "pokemon_abilities") # this file could possibly "inherit" from "scrapper_pokemon"... # for pokemons, i in enumerate(data["pokemon"]): # pokemonID = pokemons["alts"][0] if i < len(pokemons[""]) # The database pokemon ID will be mapped with the database ability ID. # Each pokemon can have up to 3 abilities. The pokemon.json file shows the abilities that each pokemon can have. # What needs to be done is to match the pokemon ID to the ability ID. # How can this be done? By parsing through the the json file each time and when the pokemon's ability is found, # this ability is compared to the abilities table and returns the abilities ID to fill the pokemon_abilities table. # However, there are multiple abilities per pokemon. So will an intermediary table be used??? # variables: pokemonID (int, not null) and abilityID (int, not null) conn.commit() conn.close()
{ "repo_name": "foxtrot94/ECE-Pokedex", "path": "Scrapper/scrapper_pokemonAbilities.py", "copies": "1", "size": "1105", "license": "mit", "hash": -8759316399337368000, "line_mean": 34.6451612903, "line_max": 115, "alpha_frac": 0.7330316742, "autogenerated": false, "ratio": 3.4423676012461057, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4675399275446106, "avg_score": null, "num_lines": null }
__author__ = 'as' import bs4; import sqlite3; import json; #import pykemon ### RANDOM CRAP #pokemonHeight = type(pokemons["alts"][0]["height"]) #len(genArray) #int(pokemonHeight) #print(height) #checks the data type ### # used to place what the current generation is, (currently gen 6) currentGeneration = 6 with open("pokemon.json") as data_file: data = json.load(data_file) conn = sqlite3.connect('..//database/pokedex.sqlite3') c = conn.cursor(); c.execute("delete from " + "pokemon_unique_info") # c.execute("ALTER TABLE pokemon AUTOINCREMENT = 1") c.execute("delete from " + " sqlite_sequence where name = 'pokemon'") c.execute("delete from " + "pokemon_suffix") c.execute("delete from" + " sqlite_sequence where name = 'pokemon_suffix'") conn.commit() counter = 1 for pokemons in data["pokemon"]: # Length of alts, to determine whether pokemon has mega evolutions # Name of the pokemon name = pokemons["name"] #length = len(pokemons["alts"]) # i is used as a counter but not a counter, which goes through the form (1,2,3) numbering the dictionary while # pokemon form simply writes the stats of each form for i, pokemonForm in enumerate(pokemons["alts"]): # Height of the pokemon, in meters height = pokemons["alts"][i]["height"] # Weight of the pokemon, in kilograms weight = pokemons["alts"][i]["weight"] # Attack of the pokemon attack = pokemons["alts"][i]["atk"] # Defence of the pokemon defence = pokemons["alts"][i]["def"] # Health points (HP) of the pokemon healthPoints = pokemons["alts"][i]["hp"] # Special Attack of the pokemon spAttack = pokemons["alts"][i]["spa"] # Special Defence of the pokemon spDefence = pokemons["alts"][i]["spd"] # Speed of the pokemon speed = pokemons["alts"][i]["spe"] basestat = healthPoints+attack+defence+spAttack+spDefence+speed #Pushes entries into database c.execute("INSERT INTO pokemon VALUES (?,?,?,?,?,?,?,?,?,?,?)", (None, name, height, weight, attack, defence, healthPoints, spAttack, spDefence, speed, basestat)) conn.commit() #++ pokemonID = c.lastrowid suffix = pokemons["alts"][i]["suffix"] if suffix is not "": print(suffix) c.execute("INSERT INTO pokemon_suffix VALUES (?,?)", (pokemonID, suffix)) conn.commit() # c.execute("INSERT INTO pokemon ( name, description, height, weight, attack, defence, hp, spattack, spdefence, speed, genFirstAppeared, hatchTime, catchRate, genderRatioMale) VALUES ( name, description, height, weight, attack, defence, healthPoints, spAttack, spDefence, speed, generation, hatchTime, catchRate, genderRatioMale)") # counter+=1 conn.commit() conn.close()
{ "repo_name": "foxtrot94/ECE-Pokedex", "path": "Scrapper/scrapper_pokemon.py", "copies": "1", "size": "3113", "license": "mit", "hash": -3484669600206293000, "line_mean": 29.2330097087, "line_max": 335, "alpha_frac": 0.5888210729, "autogenerated": false, "ratio": 3.7415865384615383, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9774169694873863, "avg_score": 0.011247583297534973, "num_lines": 103 }
__author__ = 'as' import sqlite3 ## Function that either takes an exact pokemon Name, or pokemonNationalID and returns the pokemonUniqueID ## In the case of megas, of various forms that share the same name, or ID all will be returned ## To use from Scrapper import function_pokemonID # Connect to database by the second argument, pass the cursor that's connected to our database # example of use: # >>> function_pokemonID.returnPokemonUniqueIDFromNationalID(9, c) # [(675, 9), (676, 9)] # position 0 would be the first non-mega instance # Position 0 of each tuple within is the uniqueID def returnPokemonUniqueIDFromNationalID(pokemonNationalID, databasecursor): nationalPokemonIDtuple = (pokemonNationalID,) pokemon_NationalID_List = databasecursor.execute("select * from pokemon_nationalID WHERE pokemonNationalID=?", nationalPokemonIDtuple) return databasecursor.fetchall() # Example of use: # >>> function_pokemonID.returnPokemonUniqueIDFromName("Alakazam", c) # [(2, 'Alakazam', 1.5, 48.0, 50, 45, 55, 135, 95, 120), (3, 'Alakazam', 1.2, 48.0, 50, 65, 55, 175, 95, 150)] # Position 0 is the first non-mega form # Position 0 within this is the uniqueID def returnPokemonUniqueIDFromName(pokemonName, databasecursor): pokemonNametuple = (pokemonName,) pokemon_NationalID_List = databasecursor.execute("select * from pokemon_unique_info WHERE name=?", pokemonNametuple) return databasecursor.fetchall() def returnPokemonNamefromUniqueID(uniqueID, databasecursor): pokemonIDtuple = (uniqueID,) pokemon_NationalID_List = databasecursor.execute("select * from pokemon_unique_info WHERE pokemonUniqueID=?", pokemonIDtuple) return databasecursor.fetchall() # example of use: # >>> function_pokemonID.returnPokemonNationalIDFromUniqueID(675, c) # [(675, 9)] # Position 0 of each tuple within is the uniqueID # Pos 1 is the nationalID def returnPokemonNationalIDFromUniqueID(uniqueID, databasecursor): pokemonIDtuple = (uniqueID,) pokemon_NationalID_List = databasecursor.execute("select * from pokemon_nationalID WHERE pokemonUniqueID=?", pokemonIDtuple) return databasecursor.fetchall() def returnCommonInfoFromNationalID(nationalID, databasecursor): pokemonIDtuple = (nationalID,) pokemon_NationalID_List = databasecursor.execute("select * from pokemon_common_info WHERE pokemonNationalID=?", pokemonIDtuple) return databasecursor.fetchall()
{ "repo_name": "foxtrot94/ECE-Pokedex", "path": "Scrapper/function_pokemonID.py", "copies": "1", "size": "2417", "license": "mit", "hash": 2642649068800548000, "line_mean": 40.6724137931, "line_max": 138, "alpha_frac": 0.7662391394, "autogenerated": false, "ratio": 3.5130813953488373, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4779320534748837, "avg_score": null, "num_lines": null }
__author__ = 'as' # This file: # 1) finds the generation the pokemon first appeared in, # 2) links the pokemonID to NationalID, # 3) links type to typeID # 4) links ability to abilityID import json import bs4 import sqlite3 from Scrapper import function_pokemonID # Open the json file with open("pokemon.json") as data_file: data = json.load(data_file) # Connect to database conn = sqlite3.connect('..//database/pokedex.sqlite3') c = conn.cursor(); c.execute("delete from " + "pokemon_types") c.execute("delete from " + " sqlite_sequence where name = 'pokemon_types'") c.execute("delete from " + "pokemon_abilities") c.execute("delete from " + " sqlite_sequence where name = 'pokemon_abilities'") uniqueID = 0 currentGeneration = 6 for pokemons in data["pokemon"]: for counter, pokemonForm in enumerate(pokemons["alts"]): uniqueID += 1 # Generation that pokemon was first introduced, # temp goes through json file to find how many generations the pokemon is in, obviously it is in the latest # generation, so [( 1 + currentGeneration) - temp] determines the generation the pokemon was first introduced temp = len(pokemons["genfamily"]) generation = 1 + currentGeneration - temp # Generation first appeared is stored in pokemon_common_info table # Need to get national ID of the pokemon currently being populated # uniqueID =1 uniqueIDtuple = (uniqueID,) pokemon_NationalID_List = c.execute("select * from pokemon_nationalID WHERE pokemonUniqueID=?", uniqueIDtuple) pokemon_NationalID = c.fetchall()[0] c.execute("UPDATE pokemon_common_info set genFirstAppeared=? WHERE pokemonNationalID = (?)", (generation, pokemon_NationalID[1])) # Types that a Pokemon has typesList = pokemonForm["types"] for types in typesList: typesTuple = (types,) pokemon_Types_List = c.execute("select * from types WHERE name=?", typesTuple) pokemon_Types = c.fetchone() c.execute("INSERT INTO pokemon_types VALUES (?,?)", (uniqueID, pokemon_Types[0])) print(pokemon_Types[0]) # Abilities that a Pokemon has abilitiesList = pokemonForm["abilities"] for abilities in abilitiesList: abilitiesTuple = (abilities,) pokemon_Abilities_List = c.execute("select * from abilities WHERE name=?", abilitiesTuple) pokemon_Abilities = c.fetchone() c.execute("INSERT INTO pokemon_abilities VALUES (?,?)", (uniqueID, pokemon_Abilities[0])) print(pokemon_Abilities[1]) conn.commit() conn.commit() conn.close()
{ "repo_name": "foxtrot94/ECE-Pokedex", "path": "Scrapper/scrapper_pokemonTypes.py", "copies": "1", "size": "2685", "license": "mit", "hash": 1070691331694013200, "line_mean": 30.6, "line_max": 137, "alpha_frac": 0.6629422719, "autogenerated": false, "ratio": 3.8302425106990015, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4993184782599001, "avg_score": null, "num_lines": null }
__author__ = 'AssadMahmood' import os import requests from asposecloud import Product from asposecloud.common import Utils class Folder: """ Wrapper class for Aspose for Cloud Storage API. The Aspose for Cloud File Storage API let's you upload and download files for use with our Product APIs. """ def __init__(self): self.str_uri_folder = Product.product_uri + 'storage/folder/' self.str_uri_file = Product.product_uri + 'storage/file/' self.str_uri_exist = Product.product_uri + 'storage/exist/' self.str_uri_disc = Product.product_uri + 'storage/disc' def upload_file(self, local_file, remote_folder='', storage_type='Aspose', storage_name=None): """ Upload a local file to cloud storage. :param local_file: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: returns True or False """ if not local_file: raise ValueError("local_file not specified.") filename = os.path.basename(local_file) str_uri = self.str_uri_file if remote_folder: str_uri = str_uri + remote_folder + '/' str_uri += filename str_uri = Utils.append_storage(str_uri, '', storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = Utils.upload_file_binary(local_file, signed_uri).json() if response['Status'] == 'OK': return True else: return False def get_files(self, remote_folder='', storage_type='Aspose', storage_name=None): """ Download the file list of specified folder :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: list of files """ str_uri = self.str_uri_folder + remote_folder str_uri = str_uri[:-1] if str_uri[-1] == '/' else str_uri str_uri = Utils.append_storage(str_uri, '', storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return response['Files'] def get_file(self, filename, storage_type='Aspose', storage_name=None): """ Download the file to your local storage from remote storage :param filename: :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: returns file stream """ if not filename: raise ValueError("filename not specified.") str_uri = self.str_uri_file + filename str_uri = Utils.append_storage(str_uri, '', storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) return response def file_exists(self, filename, storage_type='Aspose', storage_name=None): """ Check if a file already exist on specified storage. :param filename: :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: True or False """ if not filename: raise ValueError("filename not specified.") str_uri = self.str_uri_exist + filename str_uri = Utils.append_storage(str_uri, '', storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return response['FileExist']['IsExist'] def delete_file(self, filename, storage_type='Aspose', storage_name=None): """ Delete a file from specified storage. :param filename: :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: True or False """ if not filename: raise ValueError("filename not specified.") str_uri = self.str_uri_file + filename str_uri = Utils.append_storage(str_uri, '', storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return True if response['Code'] == 200 else False def create_folder(self, folder_name, storage_type='Aspose', storage_name=None): """ Create a new folder on specified storage :param folder_name: name of the folder to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: Ture or False """ if not folder_name: raise ValueError("folder_name not specified.") str_uri = self.str_uri_folder + folder_name str_uri = Utils.append_storage(str_uri, '', storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.put(signed_uri, '', headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return True if response['Code'] == 200 else False def delete_folder(self, folder_name, storage_type='Aspose', storage_name=None): """ Delete a folder from specified storage :param folder_name: name of the folder to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: True or False """ if not folder_name: raise ValueError("folder_name not specified.") str_uri = self.str_uri_folder + folder_name str_uri = Utils.append_storage(str_uri, '', storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return True if response['Code'] == 200 else False def get_disc_usage(self, storage_type='Aspose', storage_name=None): """ Get the disc usage details of specified storage :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: list with disc usage details """ str_uri = Utils.append_storage(self.str_uri_disc, '', storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return response['DiscUsage']
{ "repo_name": "asposeforcloud/Aspose_Cloud_SDK_For_Python", "path": "asposecloud/storage.py", "copies": "1", "size": "7339", "license": "mit", "hash": 5504752519129291000, "line_mean": 38.4569892473, "line_max": 114, "alpha_frac": 0.6132988146, "autogenerated": false, "ratio": 3.792764857881137, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9901017979113788, "avg_score": 0.0010091386734698176, "num_lines": 186 }
__author__ = 'AssadMahmood' import requests import hmac import hashlib import re import string import os import json from urlparse import urlparse from asposecloud import AsposeApp from asposecloud import Product class Utils: """ A common collection of utility function to perform various tasks. """ def __init__(self): return @staticmethod def build_uri(path, qry_data=None): """ URI Builder - Accept path and query string to generate a URI. :param path: e.g http://api.aspose.com/v1/testurl :param qry_data: a dictionary which holds query string data e.g {'param1': 'value1', 'param2': 'value2'} :return: returns a uri with query string e.g http://api.aspose.com/v1/testurl?param1=value1&param2=value2 """ qry_str = '' for key, value in qry_data.iteritems(): qry_str += str(key) + '=' + str(value) + '&' if qry_str: uri = path + '?' + qry_str[:-1] else: uri = path return uri @staticmethod def validate_result(result): """ Validates if an API call have any error in the body. :param result: body of the response :return: True or False """ if type(result) == requests.Response: result = result.content elif type(result) == dict: result = json.dumps(result) results = ['Unknown file format', 'Unable to read beyond the end of the stream', 'Index was out of range', 'Cannot read that as a ZipFile', 'Not a Microsoft PowerPoint 2007 presentation', 'Index was outside the bounds of the array', 'An attempt was made to move the position before the beginning of the stream'] for val in results: if re.search(val, result): return val return None @staticmethod def upload_file_binary(local_file, uri): """ Upload a local file to provided url :param local_file: path/to/local/file on your local machine :param uri: target uri to upload file :return: Response object """ with open(local_file, 'rb') as payload: return requests.put(uri, payload, stream=True) @staticmethod def sign(url_to_sign): """ Add Signature to the url for authentication. :param url_to_sign: :return: Returns Signed URL """ url_to_sign = url_to_sign.replace(" ", "%20") url = urlparse(url_to_sign) if url.query == "": url_part_to_sign = url.scheme + "://" + url.netloc + url.path + "?appSID=" + AsposeApp.app_sid else: url_part_to_sign = url.scheme + "://" + url.netloc + url.path + "?" + url.query + "&appSID=" + \ AsposeApp.app_sid signature = hmac.new(AsposeApp.app_key, url_part_to_sign, hashlib.sha1).digest().encode('base64')[:-1] signature = re.sub('[=_-]', '', signature) if url.query == "": return url.scheme + "://" + url.netloc + url.path + "?appSID=" + AsposeApp.app_sid + "&signature=" + \ signature else: return url.scheme + "://" + url.netloc + url.path + "?" + url.query + "&appSID=" + AsposeApp.app_sid + \ "&signature=" + signature @staticmethod def append_storage(uri, remote_folder='', storage_type='Aspose', storage_name=''): """ Utility function to help append storage parameters in URI :param uri: :param remote_folder: :param storage_type: :param storage_name: :return: """ tmp_uri = None if remote_folder and not remote_folder.isspace(): tmp_uri = "folder=" + remote_folder + "&" if storage_name and not storage_type == "Aspose": tmp_uri = tmp_uri + "storage=" + storage_name if tmp_uri: if string.find(tmp_uri, '?'): tmp_uri = "&" + tmp_uri else: tmp_uri = "?" + tmp_uri if tmp_uri: if tmp_uri[-1:] == '&': tmp_uri = tmp_uri[:-1] if tmp_uri: return uri + tmp_uri else: return uri @staticmethod def save_file(response_stream, filename): """ Save a response stream as local file :param response_stream: File Stream :param filename: File name along with path to store :return: returns full path of newly created file """ with open(filename, 'wb') as f: for chunk in response_stream.iter_content(): f.write(chunk) return filename @staticmethod def get_filename(filename): """ Extract filename without extension :param filename: :return: filename without extension """ return os.path.splitext(os.path.basename(filename))[0] @staticmethod def download_file(remote_filename, output_filename, remote_folder='', storage_type='Aspose', storage_name=None): """ Download a file from remote storage and save it on local storage. :param remote_filename: filename on remote storage :param output_filename: filename along with path to store on local storage :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: output location of the downloaded file """ if remote_folder: remote_filename = remote_folder + '/' + remote_filename str_uri = Product.product_uri + 'storage/file/' + remote_filename str_uri = Utils.append_storage(str_uri, '', storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) Utils.save_file(response, AsposeApp.output_path + output_filename) return AsposeApp.output_path + output_filename
{ "repo_name": "asposeforcloud/Aspose_Cloud_SDK_For_Python", "path": "asposecloud/common.py", "copies": "1", "size": "6419", "license": "mit", "hash": -2058735620956643800, "line_mean": 31.9179487179, "line_max": 118, "alpha_frac": 0.5704938464, "autogenerated": false, "ratio": 4.101597444089457, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5172091290489457, "avg_score": null, "num_lines": null }
__author__ = 'AssadMahmood' import requests import json import os.path from asposecloud import AsposeApp from asposecloud import Product from asposecloud.common import Utils class Document: """ Wrapper class for Aspose.PDF API Document Resource. The Aspose.PDF API let's you manipulate PDF files. """ def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'pdf/' + self.filename def get_properties(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DocumentProperties']['List'] if response['DocumentProperties']['List'] else False def delete_properties(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Status'] == 'OK' else False def get_property(self, property_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param property_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties/' + property_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DocumentProperty'] if response['Code'] == 200 else False def set_property(self, property_name, property_value, remote_folder='', storage_type='Aspose', storage_name=None): """ :param property_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties/' + property_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'Value':property_value}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DocumentProperty'] if response['Code'] == 200 else False def create_empty_pdf(self, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Create a pdf file from any supported format file e.g. html,xml,jpeg,svg,tiff :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) if not stream_out: if output_filename is None: output_filename = self.filename output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.pdf' Utils.save_file(response, output_path) return output_path else: return response.content def create_pdf(self, template_file, source_format, data_file=None, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Create a pdf file from any supported format file e.g. html,xml,jpeg,svg,tiff :param template_file: :param source_format: :param data_file: like xml file for xslt format :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not template_file: raise ValueError("template_file not specified") if not source_format: raise ValueError("source_format not specified") save_format = 'pdf' str_uri = self.base_uri + '?templateFile=' + template_file + '&templateType=' + source_format if(data_file): str_uri += '&dataFile=' + data_file str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) if not stream_out: if output_filename is None: output_filename = self.filename output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content def add_stamp(self, post_data, page_number, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Add Signature to a pdf file :param page_number: :param post_data: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not post_data: raise ValueError("post_data not specified") if not page_number: raise ValueError("page_number not specified") str_uri = self.base_uri + '/pages/' + page_number + '/stamp' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, post_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) if not stream_out: if output_filename is None: output_filename = self.filename output_path = AsposeApp.output_path + output_filename Utils.save_file(response, output_path) return output_path else: return response.content def add_new_page(self, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Add new page to a pdf file :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) if not stream_out: if output_filename is None: output_filename = self.filename output_path = AsposeApp.output_path + output_filename Utils.save_file(response, output_path) return output_path else: return response.content def delete_page(self, page_number, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Add new page to a pdf file :param page_number: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not page_number: raise ValueError("page_number not specified") str_uri = self.base_uri + '/pages/' + str(page_number) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) if not stream_out: if output_filename is None: output_filename = self.filename output_path = AsposeApp.output_path + output_filename Utils.save_file(response, output_path) return output_path else: return response.content def move_page(self, page_number, new_location, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Move a page to new location :param page_number: :param new_location: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages/' + str(page_number) + '/movePage?newIndex=' + str(new_location) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) if not stream_out: if output_filename is None: output_filename = self.filename output_path = AsposeApp.output_path + output_filename Utils.save_file(response, output_path) return output_path else: return response.content def add_signature(self, post_data, page_number=None, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Add Signature to a pdf file :param page_number: :param post_data: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not post_data: raise ValueError("post_data not specified") str_uri = self.base_uri if page_number: str_uri += '/pages/' + page_number str_uri += '/sign' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, post_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) if not stream_out: if output_filename is None: output_filename = self.filename output_path = AsposeApp.output_path + output_filename Utils.save_file(response, output_path) return output_path else: return response.content def get_form_fields_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ Get Form Feilds Count :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/fields' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Fields']['List']) if response['Code'] == 200 else False def get_all_form_fields(self, remote_folder='', storage_type='Aspose', storage_name=None): """ Get All Form Fields :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/fields' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Fields'] if response['Code'] == 200 else False def get_form_field(self, field_name, remote_folder='', storage_type='Aspose', storage_name=None): """ Get a Form Field :param field_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not field_name: raise ValueError("field_name not specified") str_uri = self.base_uri + '/fields/' + field_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Field'] if response['Code'] == 200 else False def update_form_field(self, field_name, field_type, field_value, remote_folder='', storage_type='Aspose', storage_name=None): """ Update Form Field Value :param field_name: :param field_type: :param field_value: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not field_name: raise ValueError("field_name not specified") if not field_type: raise ValueError("field_type not specified") if not field_value: raise ValueError("field_value not specified") post_data = {'Name': field_name, 'Type': field_type, 'Values': [field_value]} str_uri = self.base_uri + '/fields' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, post_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Field'] if response['Code'] == 200 else False def get_page_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ Get count of pages in pdf file :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Pages']['List']) if response['Pages']['List'] else 0 def append_documents(self, append_file, start_page=None, end_page=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Append a pdf file to base pdf :param append_file: :param start_page: :param end_page: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/appendDocument' qry = {'appendFile': append_file} if start_page: qry['startPage'] = start_page if end_page: qry['endPage'] = end_page str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if validate_output: return validate_output else: return True def split_pdf(self, start_page=None, end_page=None, save_format=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param start_page: :param end_page: :param save_format: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/split' qry = {} if start_page: qry['from'] = start_page if end_page: qry['to'] = end_page if save_format: qry['format'] = save_format str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if validate_output: return validate_output else: return response @staticmethod def merge_documents(merged_filename, source_files, remote_folder='', storage_type='Aspose', storage_name=None): """ Merge multiple pdf files :param merged_filename: :param source_files: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ json_data = json.dumps({'List': source_files}) str_uri = Product.product_uri + 'pdf/' + merged_filename + '/merge' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Status'] == 'OK' else False class TextEditor: """ Wrapper class for Aspose.PDF API for Text Editing features. The Aspose.PDF API let's you manipulate PDF files. """ def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'pdf/' + self.filename def get_fragment_count(self, page_number, remote_folder='', storage_type='Aspose', storage_name=None): """ Count fragments in a pdf file on given page number :param page_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages/' + str(page_number) + '/fragments' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['TextItems']['List']) if response['TextItems']['List'] else 0 def get_segment_count(self, page_number, fragment_number, remote_folder='', storage_type='Aspose', storage_name=None): """ Count fragments in a pdf file on given page number :param page_number: :param fragment_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages/' + str(page_number) + '/fragments/' + str(fragment_number) + '/segments' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['TextItems']['List']) if response['TextItems']['List'] else 0 def get_text(self, page_number, remote_folder='', storage_type='Aspose', storage_name=None): """ Get text from give page number :param page_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages/' + str(page_number) + '/textitems' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) output_text = '' for item in response['TextItems']['List']: output_text += item['Text'] return output_text def get_text_items(self, page_number=None, fragment_number=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Get text items from given page number :param page_number: :param fragment_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri if(page_number): str_uri += '/pages/' + str(page_number) if(fragment_number): str_uri += '/fragments/' + str(fragment_number) str_uri += '/textitems' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['TextItems']['List'] if response['TextItems']['List'] else False def get_text_format(self, page_number=None, fragment_number=None, segment_number=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Get text format from given page number :param page_number: :param fragment_number: :param segment_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri if(page_number): str_uri += '/pages/' + str(page_number) if(fragment_number): str_uri += '/fragments/' + str(fragment_number) if(segment_number): str_uri += '/segments/' + str(segment_number) str_uri += '/textformat' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['TextFormat'] if response['TextFormat'] else False def replace_text(self, old_text, new_text, page_number=None, is_reg=False, remote_folder='', storage_type='Aspose', storage_name=None): """ Replace text in pdf file :param old_text: :param new_text: :param page_number: :param is_reg: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri if(page_number): str_uri += '/pages/' + str(page_number) str_uri += '/replaceText' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'OldValue': old_text, 'NewValue': new_text, 'Regex': is_reg}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return True else: return validate_output def replace_multiple_text(self, post_data, page_number=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Replace text in pdf file :param post_data: :param page_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri if(page_number): str_uri += '/pages/' + str(page_number) str_uri += '/replaceTextList' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps(post_data) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return True else: return validate_output def replace_image_using_file(self, page_number, image_index, image_file, remote_folder='', storage_type='Aspose', storage_name=None): """ Replace image in pdf file :param page_number: :param image_index: :param image_file: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages/' + str(page_number) + '/image/' + str(image_index) str_uri += '?imageFile=' + image_file str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return response else: return validate_output class Extractor: """ Wrapper class for Aspose.PDF API Extraction features. The Aspose.PDF API let's you manipulate PDF files. """ def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'pdf/' + self.filename def get_image(self, page_number, image_index, save_format, width=None, height=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Get image from given page :param page_number: :param image_index: :param save_format: :param width: :param height: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not save_format: raise ValueError("save_format not specified") if not page_number: raise ValueError("page_number not specified") str_uri = self.base_uri + '/pages/' + str(page_number) + '/images/' + str(image_index) qry = {'format': save_format} if width and height: qry['width'] = width qry['height'] = height str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_' + str(page_number) \ + '_' + str(image_index) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output def get_image_count(self, page_number, remote_folder='', storage_type='Aspose', storage_name=None): """ Count images on a given page :param page_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not page_number: raise ValueError("page_number not specified") str_uri = self.base_uri + '/pages/' + str(page_number) + '/images' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Images']['List']) if response['Images']['List'] else 0 def get_annotations(self, page_number, remote_folder='', storage_type='Aspose', storage_name=None): """ Count annotations in a pdf file on given page number :param page_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages/' + str(page_number) + '/annotations' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Annotations']['List'] if response['Annotations']['List'] else 0 def get_annotations_count(self, page_number, remote_folder='', storage_type='Aspose', storage_name=None): """ Count annotations in a pdf file on given page number :param page_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages/' + str(page_number) + '/annotations' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Annotations']['List']) if response['Annotations']['List'] else 0 def get_annotation(self, page_number, annotation_index, remote_folder='', storage_type='Aspose', storage_name=None): """ Count annotations in a pdf file on given page number :param page_number: :param annotation_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages/' + str(page_number) + '/annotation/' + str(annotation_index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Annotation'] if response['Code'] == 200 else 0 def get_bookmarks(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/bookmarks' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Bookmarks']['List'] if response['Bookmarks']['List'] else 0 def get_bookmarks_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/bookmarks' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Bookmarks']['List']) if response['Bookmarks']['List'] else 0 def get_bookmark(self, bookmark_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param bookmark_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/bookmarks/' + str(bookmark_index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Bookmark'] if response['Code'] == 200 else 0 def get_links(self, page_number, remote_folder='', storage_type='Aspose', storage_name=None): """ :param page_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages/' + str(page_number) + '/links' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Links']['List'] if response['Links']['List'] else 0 def get_links_count(self, page_number, remote_folder='', storage_type='Aspose', storage_name=None): """ :param page_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages/' + str(page_number) + '/links' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Links']['List']) if response['Links']['List'] else 0 def get_link(self, page_number, link_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param page_number: :param link_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pages/' + str(page_number) + '/link/' + str(link_index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Link'] if response['Code'] == 200 else 0 def get_attachments(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/attachments' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Attachments']['List'] if response['Attachments']['List'] else 0 def get_attachments_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/attachments' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Attachments']['List']) if response['Attachments']['List'] else 0 def get_attachment(self, attachment_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param attachment_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/attachment/' + str(attachment_index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Attachment'] if response['Code'] == 200 else 0 def download_attachment(self, attachment_index, output_filename, remote_folder='', storage_type='Aspose', storage_name=None): """ :param attachment_index: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/attachments/' + str(attachment_index) + '/download' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + output_filename Utils.save_file(response, output_path) return output_path else: return validate_output class Converter: """ Wrapper class for Aspose.PDF API. The Aspose.PDF API let's you manipulate PDF files. """ def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'pdf/' + self.filename def convert_to_image(self, page_number, save_format, width=None, height=None, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Convert a page to image :param page_number: :param save_format: :param width: :param height: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not save_format: raise ValueError("save_format not specified") if not page_number: raise ValueError("page_number not specified") str_uri = self.base_uri + '/pages/' + str(page_number) qry = {'format': save_format} if(width): qry['width'] = width if(height): qry['height'] = height str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = self.filename output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '_' + str(page_number) + '.' + \ save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output @staticmethod def convert_by_url(url, save_format, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Convert a pdf file to any supported format :param save_format: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not url: raise ValueError("url not specified") if not save_format: raise ValueError("save_format not specified") str_uri = Product.product_uri + 'pdf/convert?url=' + url + '&format=' + save_format str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) if not stream_out: if output_filename is None: output_filename = os.path.basename(url) save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content def convert(self, save_format, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Convert a pdf file to any supported format :param save_format: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not save_format: raise ValueError("save_format not specified") str_uri = self.base_uri + '?format=' + save_format str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) if not stream_out: if output_filename is None: output_filename = self.filename save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content @staticmethod def convert_local_file(input_file, save_format, stream_out=False, output_filename=None): """ Convert a local pdf file to any supported format :param input_file: :param save_format: :param stream_out: :param output_filename: :return: """ if not input_file: raise ValueError("input_file not specified") if not save_format: raise ValueError("save_format not specified") str_uri = Product.product_uri + 'pdf/convert?format=' + save_format signed_uri = Utils.sign(str_uri) response = None try: response = Utils.upload_file_binary(input_file, signed_uri) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) if not stream_out: if output_filename is None: output_filename = input_file save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content
{ "repo_name": "asposeforcloud/Aspose_Cloud_SDK_For_Python", "path": "asposecloud/pdf/__init__.py", "copies": "1", "size": "60440", "license": "mit", "hash": -279800195546893120, "line_mean": 35.8536585366, "line_max": 129, "alpha_frac": 0.5770350761, "autogenerated": false, "ratio": 4.084887807515544, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5161922883615544, "avg_score": null, "num_lines": null }
__author__ = 'AssadMahmood' import requests import json from asposecloud import AsposeApp from asposecloud import Product from asposecloud.common import Utils # ======================================================================== # DOCUMENT CLASS # ======================================================================== class Document: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'words/' + self.filename def save_as(self, options_xml, remote_folder='', storage_type='Aspose', storage_name=None): """ :param options_xml: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/saveAs' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, options_xml, headers={ 'content-type': 'application/xml', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(response['SaveResult']['DestDocument']['Href'], response['SaveResult']['DestDocument']['Href'], remote_folder, storage_type, storage_name) else: return validate_output def split_document(self, from_page=None, to_page=None, save_format='pdf', remote_folder='', storage_type='Aspose', storage_name=None): """ :param from_page: :param to_page: :param save_format: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/split' qry = {'format': save_format} if from_page: qry['from'] = from_page if to_page: qry['to'] = to_page str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['SplitResult'] def get_page_setup(self, section_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param section_id: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/sections/' + str(section_id) + '/pageSetup' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['PageSetup'] def update_page_setup(self, section_id, options_xml, remote_folder='', storage_type='Aspose', storage_name=None): """ :param section_id: :param options_xml :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/sections/' + str(section_id) + '/pageSetup' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, options_xml, headers={ 'content-type': 'application/xml', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['PageSetup'] def append_document(self, doc_list, remote_folder='', storage_type='Aspose', storage_name=None): """ :param doc_list: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/appendDocument' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps(doc_list) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(self.filename, self.filename, remote_folder, storage_type, storage_name) else: return validate_output def get_properties(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DocumentProperties']['List'] def remove_header_footer(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/headersFooters' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def get_bookmarks_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/bookmarks' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Bookmarks']['BookmarkList']) def get_hyperlinks(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/hyperlinks' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Hyperlinks']['HyperlinkList'] def get_hyperlink(self, hyperlink_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param hyperlink_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not hyperlink_index: raise ValueError("hyperlink_index not specified") str_uri = self.base_uri + '/hyperlinks/' + str(hyperlink_index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Hyperlink'] def get_hyperlinks_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/hyperlinks' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Hyperlinks']['HyperlinkList']) def get_bookmarks(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/bookmarks' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Bookmarks']['BookmarkList'] def get_bookmark(self, bookmark_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param bookmark_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not bookmark_name: raise ValueError("bookmark_name not specified") str_uri = self.base_uri + '/bookmarks/' + bookmark_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Bookmark'] def update_bookmark(self, bookmark_name, bookmark_text, remote_folder='', storage_type='Aspose', storage_name=None): """ :param bookmark_name: :param bookmark_text: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not bookmark_name: raise ValueError("bookmark_name not specified") if not bookmark_text: raise ValueError("bookmark_text not specified") post_data = {'Text': bookmark_text} str_uri = self.base_uri + '/bookmarks/' + bookmark_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, post_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def get_document_info(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = Utils.append_storage(self.base_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Document'] def get_property(self, property_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param property_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties/' + property_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DocumentProperty'] def set_property(self, property_name, property_value, remote_folder='', storage_type='Aspose', storage_name=None): """ :param property_name: :param property_value: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties/' + property_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'Value': property_value}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DocumentProperty'] def delete_property(self, property_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param property_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties/' + property_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def accept_tracking_changes(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/revisions/acceptAll' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def reject_tracking_changes(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/revisions/rejectAll' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_document_protection(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/protection' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def protect_document(self, options, remote_folder='', storage_type='Aspose', storage_name=None): """ :param options: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/protection' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) post_data = json.dumps(options) response = None try: response = requests.put(signed_uri, post_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def unprotect_document(self, options, remote_folder='', storage_type='Aspose', storage_name=None): """ :param options: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/protection' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) post_data = json.dumps(options) response = None try: response = requests.delete(signed_uri, data=post_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def update_document_protection(self, options, remote_folder='', storage_type='Aspose', storage_name=None): """ :param options: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/protection' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) post_data = json.dumps(options) response = None try: response = requests.post(signed_uri, post_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response # ======================================================================== # EXTRACTOR CLASS # ======================================================================== class Extractor: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'words/' + self.filename def get_stats(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/statistics' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['StatData'] if response['Code'] == 200 else False def get_sections(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/sections' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Sections'] def get_section(self, section_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param section_id :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/sections/' + str(section_id) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Section'] def get_paragraphs(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/paragraphs' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Paragraphs'] def get_paragraph(self, para_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param para_id :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/paragraphs/' + str(para_id) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Paragraph'] def get_paragraph_run(self, para_id, run_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param para_id :param run_index :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/paragraphs/' + str(para_id) + '/runs/' + str(run_index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Run'] def get_paragraph_run_font(self, para_id, run_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param para_id :param run_index :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/paragraphs/' + str(para_id) + '/runs/' + str(run_index) + '/font' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Font'] def update_paragraph_run_font(self, para_id, run_index, font_data, remote_folder='', storage_type='Aspose', storage_name=None): """ :param para_id :param run_index :param font_data :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/paragraphs/' + str(para_id) + '/runs/' + str(run_index) + '/font' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) post_data = json.dumps(font_data) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, post_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Font'] def get_text(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/textItems' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) output_text = '' for item in response['TextItems']['List']: output_text += item['Text'] return output_text def get_drawing_object_list(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/drawingObjects' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DrawingObjects']['List'] def get_ole_data(self, ole_index, ole_format, remote_folder='', storage_type='Aspose', storage_name=None): """ :param ole_index: :param ole_format: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/drawingObjects/' + str(ole_index) + '/oleData' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.save_file(response, AsposeApp.output_path + Utils.get_filename(self.filename) + '_' + str(ole_index) + '.' + ole_format) else: return validate_output def get_image_data(self, image_index, image_format, remote_folder='', storage_type='Aspose', storage_name=None): """ :param image_index: :param image_format: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/drawingObjects/' + str(image_index) + '/imagedata' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.save_file(response, AsposeApp.output_path + Utils.get_filename(self.filename) + '_' + str(image_index) + '.' + image_format) else: return validate_output def convert_drawing_object(self, object_index, render_format, remote_folder='', storage_type='Aspose', storage_name=None): """ :param object_index: :param render_format: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/drawingObjects/' + str(object_index) + '?format=' + render_format str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.save_file(response, AsposeApp.output_path + Utils.get_filename(self.filename) + '_' + str(object_index) + '.' + render_format) else: return validate_output @staticmethod def get_drawing_object(object_uri, output_path, remote_folder='', storage_type='Aspose', storage_name=None): """ :param object_uri: :param output_path: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ object_index = object_uri[-1:] str_uri = Product.product_uri + 'words/' + object_uri str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) object_info = response['DrawingObject'] if not object_info['ImageDataLink'] is None: str_uri = Product.product_uri + 'words/' + object_uri + '/imageData' output_path = output_path + 'DrawingObject_' + str(object_index) + '.jpg' elif not object_info['OleDataLink'] is None: str_uri = Product.product_uri + 'words/' + object_uri + '/oleData' output_path = output_path + 'DrawingObject_' + str(object_index) + '.xlsx' else: str_uri = Product.product_uri + 'words/' + object_uri + '?format=jpg' output_path = output_path + 'DrawingObject_' + str(object_index) + '.jpg' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.save_file(response, output_path) else: return validate_output # ======================================================================== # MAIL MERGE CLASS # ======================================================================== class MailMerge: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'words/' + self.filename def execute(self, str_xml, with_regions=False, remote_folder='', storage_type='Aspose', storage_name=None): """ :param str_xml: :param with_regions: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not str_xml: raise ValueError("str_xml not specified") str_uri = self.base_uri + '/executeMailMerge' str_uri = str_uri + '?withRegions=true' if with_regions else str_uri str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, str_xml, headers={ 'content-type': 'application/xml', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(response['Document']['FileName'], self.filename, remote_folder, storage_type, storage_name) else: return validate_output def execute_template(self, str_xml, with_regions=False, remote_folder='', storage_type='Aspose', storage_name=None): """ :param str_xml: :param with_regions: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not str_xml: raise ValueError("str_xml not specified") str_uri = self.base_uri + '/executeTemplate' str_uri = str_uri + '?withRegions=true' if with_regions else str_uri str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, str_xml, headers={ 'content-type': 'application/xml', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(response['Document']['FileName'], self.filename, remote_folder, storage_type, storage_name) else: return validate_output def get_field_names(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/mailMergeFieldNames' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['FieldNames']['Names'] if response['Code'] == 200 else False # ======================================================================== # BUILDER CLASS # ======================================================================== class Builder: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'words/' + self.filename def insert_watermark_image(self, image_file, angle, remote_folder='', storage_type='Aspose', storage_name=None): """ :param image_file: :param angle: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/insertWatermarkText' qry = {'imageFile': image_file, 'rotationAngle': angle} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(self.filename, self.filename, remote_folder, storage_type, storage_name) else: return validate_output def insert_watermark_text(self, text, angle, remote_folder='', storage_type='Aspose', storage_name=None): """ :param text: :param angle: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/insertWatermarkText' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'Text': text, 'RotationAngle': angle}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(self.filename, self.filename, remote_folder, storage_type, storage_name) else: return validate_output def remove_watermark(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/watermark' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(self.filename, self.filename, remote_folder, storage_type, storage_name) else: return validate_output def replace_text(self, old_text, new_text, match_case=False, match_whole_word=False, remote_folder='', storage_type='Aspose', storage_name=None): """ :param old_text: :param new_text: :param match_case: :param match_whole_word: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/replaceText' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'OldValue': old_text, 'NewValue': new_text, 'IsMatchCase': match_case, 'IsMatchWholeWord': match_whole_word}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return True else: return validate_output # ======================================================================== # CONVERTER CLASS # ======================================================================== class Converter: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'words/' + self.filename def convert(self, save_format, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param save_format: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not save_format: raise ValueError("save_format not specified") str_uri = self.base_uri + '?format=' + save_format str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = self.filename save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output @staticmethod def convert_local_file(input_file, save_format, stream_out=False, output_filename=None): """ :param input_file: :param save_format: :param stream_out: :param output_filename: :return: """ if not input_file: raise ValueError("input_file not specified") if not save_format: raise ValueError("save_format not specified") str_uri = Product.product_uri + 'words/convert?format=' + save_format signed_uri = Utils.sign(str_uri) response = None try: with open(input_file, 'rb') as payload: response = Utils.upload_file_binary(input_file, signed_uri) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = input_file save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output @staticmethod def convert_webpage(json_data, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param json_data: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = Product.product_uri + 'words/loadWebDocument' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) post_data =json.dumps(json_data) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, post_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response
{ "repo_name": "asposeforcloud/Aspose_Cloud_SDK_For_Python", "path": "asposecloud/words/__init__.py", "copies": "1", "size": "56910", "license": "mit", "hash": -6134324245572024000, "line_mean": 35.2484076433, "line_max": 131, "alpha_frac": 0.5674398173, "autogenerated": false, "ratio": 4.08161801620885, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.001336130147117088, "num_lines": 1570 }
__author__ = 'AssadMahmood' import requests import json from asposecloud import AsposeApp from asposecloud import Product from asposecloud.common import Utils # ======================================================================== # EXTRACTOR CLASS # ======================================================================== class Document: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'slides/' + self.filename def create_empty_presentation(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def merge_presentations(self, presentation_list, remote_folder='', storage_type='Aspose', storage_name=None): """ :param presentation_list: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/merge' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) json_data = json.dumps(presentation_list) response = None try: response = requests.put(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Document'] if response['Code'] == 200 else False def split_presentation(self, from_slide, to_slide, destination=None, save_format=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param from_slide: :param to_slide: :param destination: :param save_format: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/split' qry_str = {'from': from_slide, 'to': to_slide} if(destination): qry_str['destFolder'] = destination if(save_format): qry_str['format'] = save_format str_uri = Utils.build_uri(str_uri,qry_str) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def add_slide(self, position, remote_folder='', storage_type='Aspose', storage_name=None): """ :param position: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/slides?position=' + str(position) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def clone_slide(self, slide_no, position, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_no: :param position: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/slides?position=' + str(position) + '&SlideToClone=' + str(slide_no) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def change_slide_position(self, old_position, new_position, remote_folder='', storage_type='Aspose', storage_name=None): """ :param old_position: :param new_position: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/slides?OldPosition=' + str(old_position) + '&NewPosition=' + str(new_position) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def delete_all_slides(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/slides' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_background(self, slide_no, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_no: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/slides/' + str(slide_no) +'/background' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_properties(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DocumentProperties']['List'] def get_property(self, property_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param property_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties/' + property_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DocumentProperty'] def set_property(self, property_name, property_value, remote_folder='', storage_type='Aspose', storage_name=None): """ :param property_name: :param property_value: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ json_data = json.dumps({'Value': property_value}) str_uri = self.base_uri + '/documentProperties/' + property_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DocumentProperty'] def delete_property(self, property_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param property_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties/' + property_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def delete_all_properties(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def add_custom_property(self, properties_list, remote_folder='', storage_type='Aspose', storage_name=None): """ :param properties_list: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ json_data = json.dumps(properties_list) str_uri = self.base_uri + '/documentProperties' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_background(self, slide_number, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not slide_number: raise ValueError("slide_number not specified") str_uri = self.base_uri + '/slides/' + str(slide_number) + '/background' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Background'] def delete_background(self, slide_number, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not slide_number: raise ValueError("slide_number not specified") str_uri = self.base_uri + '/slides/' + str(slide_number) + '/background' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def delete_slide(self, slide_number, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not slide_number: raise ValueError("slide_number not specified") str_uri = self.base_uri + '/slides/' + str(slide_number) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def get_slide_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/slides' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Slides']['SlideList']) if response['Slides']['SlideList'] else 0 def replace_text(self, slide_number, old_text, new_text, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param old_text: :param new_text: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/slides/' + str(slide_number) + '/replaceText' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'OldValue': old_text, 'NewValue': new_text}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return True else: return validate_output def replace_all_text(self, old_text, new_text, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param old_text: :param new_text: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/replaceText' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'OldValue': old_text, 'NewValue': new_text}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return True else: return validate_output def get_text_items(self, slide_number, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/slides/' + str(slide_number) + '/textItems' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['TextItems']['Items'] if response['TextItems']['Items'] else False def get_all_text_items(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/textItems' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['TextItems']['Items'] if response['TextItems']['Items'] else False # ======================================================================== # EXTRACTOR CLASS # ======================================================================== class Extractor: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'slides/' + self.filename def get_comments(self, slide_no, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_no: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/slides/' + str(slide_no) +'/comments' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_aspect_ratio(self, slide_no, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_no: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/slides/' + str(slide_no) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Slide']['Width'] / response['Slide']['Height'] if response['Code'] == 200 else False def get_image_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/images' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Images']['List']) if response['Images']['List'] else 0 def get_slide_image_count(self, slide_number, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not slide_number: raise ValueError("slide_number not specified") str_uri = self.base_uri + '/slides/' + str(slide_number) + '/images' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Images']['List']) if response['Images']['List'] else 0 def get_shapes(self, slide_number, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not slide_number: raise ValueError("slide_number not specified") str_uri = self.base_uri + '/slides/' + str(slide_number) + '/shapes' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['ShapeList']['ShapesLinks'] def get_shape(self, slide_number, shape_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param shape_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not slide_number: raise ValueError("slide_number not specified") if not shape_index: raise ValueError("shape_index not specified") str_uri = self.base_uri + '/slides/' + str(slide_number) + '/shapes/' + str(shape_index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Shape'] def get_color_scheme(self, slide_number, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not slide_number: raise ValueError("slide_number not specified") str_uri = self.base_uri + '/slides/' + str(slide_number) + '/theme/colorScheme' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['ColorScheme'] def get_font_scheme(self, slide_number, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not slide_number: raise ValueError("slide_number not specified") str_uri = self.base_uri + '/slides/' + str(slide_number) + '/theme/fontScheme' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['FontScheme'] def get_format_scheme(self, slide_number, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not slide_number: raise ValueError("slide_number not specified") str_uri = self.base_uri + '/slides/' + str(slide_number) + '/theme/formatScheme' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['FormatScheme'] def get_placeholder(self, slide_number, placeholder_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param placeholder_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not slide_number: raise ValueError("slide_number not specified") if not placeholder_index: raise ValueError("placeholder_index not specified") str_uri = self.base_uri + '/slides/' + str(slide_number) + '/placeholders/' + str(placeholder_index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Placeholder'] def get_placeholder_count(self, slide_number, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not slide_number: raise ValueError("slide_number not specified") str_uri = self.base_uri + '/slides/' + str(slide_number) + '/placeholders' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Placeholders']['PlaceholderLinks']) if response['Placeholders']['PlaceholderLinks'] else 0 # ======================================================================== # CONVERTER CLASS # ======================================================================== class Converter: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'slides/' + self.filename def convert(self, save_format, additional_params=None, slide_number=None, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param save_format: :param additional_params: :param slide_number: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not save_format: raise ValueError("save_format not specified") str_uri = self.base_uri + '/slides' if(slide_number): str_uri += '/' + str(slide_number) qry_str = {'format': save_format} if(additional_params): qry_str.update(additional_params) str_uri = Utils.build_uri(str_uri, qry_str) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = self.filename save_format = 'zip' if save_format == 'html' else save_format if(slide_number): output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '_' + str(slide_number) + '.' + \ save_format else: output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output @staticmethod def convert_local_file(input_file, save_format, stream_out=False, output_filename=None): """ Convert a local pdf file to any supported format :param input_file: :param save_format: :param stream_out: :param output_filename: :return: """ if not input_file: raise ValueError("input_file not specified") if not save_format: raise ValueError("save_format not specified") str_uri = Product.product_uri + 'slides/convert?format=' + save_format signed_uri = Utils.sign(str_uri) response = None try: response = Utils.upload_file_binary(input_file, signed_uri) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) if not stream_out: if output_filename is None: output_filename = input_file save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content def convert_to_image(self, slide_number, save_format, width=None, height=None, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param slide_number: :param save_format: :param width: :param height: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not save_format: raise ValueError("save_format not specified") if not slide_number: raise ValueError("slide_number not specified") str_uri = self.base_uri + '/slides/' + str(slide_number) qry = {'format': save_format} if width and height: qry['width'] = width qry['height'] = height str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = self.filename output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '_' + str(slide_number) + '.' + \ save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output
{ "repo_name": "asposeforcloud/Aspose_Cloud_SDK_For_Python", "path": "asposecloud/slides/__init__.py", "copies": "1", "size": "42867", "license": "mit", "hash": -400411937884812740, "line_mean": 35.7957081545, "line_max": 151, "alpha_frac": 0.5720017729, "autogenerated": false, "ratio": 4.089192025183631, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5161193798083631, "avg_score": null, "num_lines": null }
__author__ = 'AssadMahmood' import requests import json from asposecloud import AsposeApp from asposecloud import Product from asposecloud.common import Utils # ======================================================================== # WORKSHEET CLASS # ======================================================================== class Worksheet: def __init__(self, filename, worksheet_name): self.filename = filename self.worksheet_name = worksheet_name if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'cells/' + self.filename + '/worksheets/' + self.worksheet_name def get_pictures_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pictures' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Pictures']['PictureList']) def get_ole_objects_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/oleobjects' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['OleObjects']['OleObjectList']) def get_charts_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/charts' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Charts']['ChartList']) def get_comments_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/comments' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Comments']['CommentList']) def get_hyperlinks_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/comments' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Hyperlinks']['HyperlinkList']) def get_mergedcells_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/mergedCells' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['MergedCells']['Count'] def get_validations_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/validations' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Validations']['Count'] def get_cell_style(self, cell_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param cell_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/' + cell_name + '/style' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Style'] def get_cell(self, cell_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param cell_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/' + cell_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Cell'] def get_first_cell(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/firstcell' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Cell'] def get_last_cell(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/endcell' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Cell'] def get_autoshape(self, index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/autoshapes/' + str(index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['AutoShapes'] def get_chart(self, index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/charts/' + str(index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Chart'] def get_hyperlink(self, index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/hyperlinks/' + str(index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Hyperlink'] def get_ole_object(self, index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/oleobjects/' + str(index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['OleObject'] def get_picture(self, index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/pictures/' + str(index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Picture'] def get_validation(self, index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/validations/' + str(index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Validation'] def get_merged_cells(self, index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/mergedCells/' + str(index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['MergedCell'] def get_comment(self, cell_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param cell_name: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/comments/' + cell_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Comment'] def get_autoshapes_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/autoshapes' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['AutoShapes']['AutoShapeList']) def get_cells_count(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Cells']['CellCount'] def get_max_column(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Cells']['MaxColumn'] def get_max_data_column(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/maxdatacolumn' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_min_column(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/mincolumn' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_min_data_column(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/mindatacolumn' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_max_row(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Cells']['MaxRow'] def get_max_data_row(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/maxdatarow' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_min_row(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/minrow' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_min_data_row(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/mindatarow' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_cells_list(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Cells']['CellList'] def get_row_list(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/rows' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Rows']['RowsList'] def get_columns_list(self, offset, count, remote_folder='', storage_type='Aspose', storage_name=None): """ :param offset: :param count: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/columns' qry = {'offset': offset, 'count': count} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Columns']['ColumnsList'] def get_column(self, col_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param col_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/columns/' + col_index str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Column'] def add_ole_object(self, ole_file, image_file, upper_left_row, upper_left_col, height, width, remote_folder='', storage_type='Aspose', storage_name=None): """ :param ole_file: :param image_file: :param upper_left_row: :param upper_left_col: :param height: :param width: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/oleobjects' qry = {'oleFile': ole_file, 'imageFile': image_file, 'upperLeftRow': upper_left_row, 'upperLeftColumn': upper_left_col, 'height': height, 'width': width} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def update_ole_object(self, index, xml_str, remote_folder='', storage_type='Aspose', storage_name=None): """ :param index: :param xml_str: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/oleobjects/' + str(index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, xml_str, headers={ 'content-type': 'application/json', 'accept': 'application/json', }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def delete_ole_object(self, index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/oleobjects/' + str(index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def delete_all_ole_objects(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/oleobjects' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def update_properties(self, worksheet_name, gridlines=False, pagebreak=False, ruler=False, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param gridlines: :param pagebreak: :param ruler: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ json_data = json.dumps({'Name': worksheet_name, 'IsGridlinesVisible': gridlines, 'IsPageBreakPreview': pagebreak, 'IsRulerVisible': ruler}) str_uri = self.base_uri str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, data=json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(self.filename, self.filename, remote_folder, storage_type, storage_name) else: return validate_output def set_cell_value(self, cell_name, value_type, value, remote_folder='', storage_type='Aspose', storage_name=None): """ :param cell_name: :param value_type: :param value: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/' + cell_name qry = {'value': value, 'type': value_type} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def set_formula(self, cell_name, formula, remote_folder='', storage_type='Aspose', storage_name=None): """ :param cell_name: :param formula: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/cells/' + cell_name qry = {'formula': formula} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def calculate_formula(self, formula, remote_folder='', storage_type='Aspose', storage_name=None): """ :param formula: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/formulaResult' qry = {'formula': formula} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Value'] def hide_worksheet(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/visible?isVisible=false' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def unhide_worksheet(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/visible?isVisible=true' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def move_worksheet(self, dest_worksheet_name, position, remote_folder='', storage_type='Aspose', storage_name=None): """ :param dest_worksheet_name: :param position: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not dest_worksheet_name: raise ValueError("dest_worksheet_name not provided.") if not position: raise ValueError("position not provided.") options = {} options['DestinationWorsheet'] = dest_worksheet_name options['Position'] = position post_data = json.dumps(options) str_uri = self.base_uri + '/position' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, post_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def sort_data(self, sort_order, sort_area, remote_folder='', storage_type='Aspose', storage_name=None): """ :param sort_order: :param sort_area: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not sort_order: raise ValueError("sort_order not provided.") if not sort_area: raise ValueError("sort_area not provided.") post_data = json.dumps(sort_order) str_uri = self.base_uri + '/sort?cellArea=' + sort_area str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, post_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def copy_worksheet(self, new_worksheet_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param new_worksheet_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not new_worksheet_name: raise ValueError("new_worksheet_name not provided.") str_uri = Product.product_uri + 'cells/' + self.filename + '/workbook/worksheets/' + new_worksheet_name \ + '/copy?sourcesheet=' + self.worksheet_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def rename_worksheet(self, new_worksheet_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param new_worksheet_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not new_worksheet_name: raise ValueError("new_worksheet_name not provided.") str_uri = Product.product_uri + 'cells/' + self.filename + '/workbook/worksheets/' + self.worksheet_name + '/Rename?newname=' + new_worksheet_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def update_properties(self, properties, remote_folder='', storage_type='Aspose', storage_name=None): """ :param properties: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not properties: raise ValueError("properties not provided.") str_uri = Product.product_uri + 'cells/' + self.filename + '/workbook/worksheets/' + self.worksheet_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, properties, headers={ 'content-type': 'application/xml', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def set_background_image(self, image_filename, remote_folder='', storage_type='Aspose', storage_name=None): """ :param image_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/Background?imageFile=' + image_filename str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def delete_background_image(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/Background' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def freeze_panes(self, row, col, freezed_rows, freezed_cols, remote_folder='', storage_type='Aspose', storage_name=None): """ :param row: :param col: :param freezed_rows: :param freezed_cols: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/FreezePanes' qry = {'row': row,'column': col, 'freezedRows': freezed_rows, 'freezedColumns': freezed_cols} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def unfreeze_panes(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/FreezePanes' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response # ======================================================================== # WORKBOOK CLASS # ======================================================================== class Workbook: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'cells/' + self.filename def add_worksheet(self, worksheet_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def remove_worksheet(self, worksheet_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def clear_modify_password(self, password, remote_folder='', storage_type='Aspose', storage_name=None): """ :param password: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/writeProtection' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'Password': password}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, data=json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def set_modify_password(self, password, remote_folder='', storage_type='Aspose', storage_name=None): """ :param password: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/writeProtection' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'Password': password}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def unprotect_workbook(self, password, remote_folder='', storage_type='Aspose', storage_name=None): """ :param password: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/protection' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'Password': password}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, data=json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def protect_workbook(self, protection_type, password, remote_folder='', storage_type='Aspose', storage_name=None): """ :param protection_type: :param password: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/protection' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'ProtectionType': protection_type, 'Password': password}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def encrypt_workbook(self, encryption_type, password, key_length, remote_folder='', storage_type='Aspose', storage_name=None): """ :param encryption_type: :param password: :param key_length: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/encryption' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'EncryptionType': encryption_type, 'KeyLength': key_length, 'Password': password}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def decrypt_workbook(self, encryption_type, password, key_length, remote_folder='', storage_type='Aspose', storage_name=None): """ :param encryption_type: :param password: :param key_length: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/encryption' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'EncryptionType': encryption_type, 'KeyLength': key_length, 'Password': password}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, data=json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def get_default_style(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/defaultStyle' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Style'] def get_name_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/names' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Names']) def get_worksheets_count(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return len(response['Worksheets']['WorksheetList']) @staticmethod def process_smart_marker(workbook_name, data_filename, remote_folder='', storage_type='Aspose', storage_name=None): """ :param workbook_name: :param data_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = Product.product_uri + 'cells/' + workbook_name qry = {'xmlFile': data_filename} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response @staticmethod def create_from_smart_template(workbook_name, template_filename, data_filename, remote_folder='', storage_type='Aspose', storage_name=None): """ :param workbook_name: :param template_filename: :param data_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = Product.product_uri + 'cells/' + workbook_name qry = {'templateFile': template_filename, 'dataFile': data_filename} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response @staticmethod def create_workbook_from_template(workbook_name, template_filename, remote_folder='', storage_type='Aspose', storage_name=None): """ :param workbook_name: :param template_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = Product.product_uri + 'cells/' + workbook_name + '?templateFile=' + template_filename str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response @staticmethod def create_empty_workbook(workbook_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param workbook_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = Product.product_uri + 'cells/' + workbook_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def split_workbook(self, save_format, remote_folder='', storage_type='Aspose', storage_name=None): """ :param save_format: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/split?format=' + save_format str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Result']['Documents'] def get_properties(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DocumentProperties']['DocumentPropertyList'] def get_property(self, property_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param property_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties/' + property_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DocumentProperty'] def set_property(self, property_name, property_value, remote_folder='', storage_type='Aspose', storage_name=None): """ :param property_name: :param property_value: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties/' + property_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps({'Value': property_value}) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['DocumentProperty'] def delete_property(self, property_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param property_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties/' + property_name str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return True if response['Code'] == 200 else False def save_as(self, options, output_filename, stream_out=False, remote_folder='', storage_type='Aspose', storage_name=None): """ :param options: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not options: raise ValueError("options not specified") if not output_filename: raise ValueError("output_filename not specified") str_uri = self.base_uri + '/saveAs?newfilename=' + output_filename str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, options, headers={ 'content-type': 'application/xml', 'accept': 'application/json' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = self.filename output_path = AsposeApp.output_path + output_filename Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output def merge(self, merge_with, remote_folder='', storage_type='Aspose', storage_name=None): """ :param merge_with: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/merge?mergeWith=' + merge_with str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response # ======================================================================== # TEXT EDITOR CLASS # ======================================================================== class ChartEditor: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'cells/' + self.filename def get_border(self, worksheet_name, chart_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param chart_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + \ '/charts/' + str(chart_index) + '/chartArea/border' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return response['Line'] else: return validate_output def get_fill_format(self, worksheet_name, chart_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param chart_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + \ '/charts/' + str(chart_index) + '/chartArea/fillFormat' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return response['FillFormat'] else: return validate_output def get_chart_area(self, worksheet_name, chart_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param chart_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/charts/' + str(chart_index) + '/chartArea' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return response['ChartArea'] else: return validate_output def add_chart(self, worksheet_name, chart_type, upper_left_row, upper_left_column, lower_right_row, lower_right_column, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param chart_type: :param upper_left_row: :param upper_left_column: :param lower_right_row: :param lower_right_column: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/charts' qry = {'chartType': chart_type, 'upperLeftRow': upper_left_row, 'upperLeftColumn': upper_left_column, 'lowerRightRow': lower_right_row, 'lowerRightColumn': lower_right_column} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(self.filename, self.filename, remote_folder, storage_type, storage_name) else: return validate_output def delete_chart(self, worksheet_name, chart_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param chart_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/charts/' + str(chart_index) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(self.filename, self.filename, remote_folder, storage_type, storage_name) else: return validate_output def delete_charts(self, worksheet_name, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/charts' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(self.filename, self.filename, remote_folder, storage_type, storage_name) else: return validate_output def show_chart_legend(self, worksheet_name, chart_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param chart_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/charts/' + str(chart_index) + '/legend' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.put(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(self.filename, self.filename, remote_folder, storage_type, storage_name) else: return validate_output def hide_chart_legend(self, worksheet_name, chart_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param chart_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/charts/' + str(chart_index) + '/legend' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return Utils.download_file(self.filename, self.filename, remote_folder, storage_type, storage_name) else: return validate_output # ======================================================================== # TEXT EDITOR CLASS # ======================================================================== class TextEditor: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'cells/' + self.filename def get_text_items(self, worksheet_name=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if worksheet_name is None: str_uri = self.base_uri + '/textItems' else: str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/textItems' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return response['TextItems']['TextItemList'] else: return validate_output def find_text(self, text, worksheet_name=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param text: :param worksheet_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if worksheet_name is None: str_uri = self.base_uri + '/findText' else: str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/findText' qry = {'text': text} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return response['TextItems']['TextItemList'] else: return validate_output def replace_text(self, old_text, new_text, worksheet_name=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param old_text: :param new_text: :param worksheet_name: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if worksheet_name is None: str_uri = self.base_uri + '/replaceText' else: str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/replaceText' qry = {'OldValue': old_text, 'NewValue': new_text} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: return True else: return validate_output # ======================================================================== # EXTRACTOR CLASS # ======================================================================== class Extractor: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'cells/' + self.filename def get_autoshape(self, shape_index, worksheet_name, save_format, password=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param shape_index: :param worksheet_name: :param save_format: :param password: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/autoshapes/' + str(shape_index) qry = {'format': save_format} if not password is None: qry['password'] = password str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_shape_' + str(shape_index)\ + '.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output def get_chart(self, chart_index, worksheet_name, save_format, password=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param chart_index: :param worksheet_name: :param save_format: :param password: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/charts/' + str(chart_index) qry = {'format': save_format} if not password is None: qry['password'] = password str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_chart_' + str(chart_index)\ + '.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output def get_oleobject(self, ole_index, worksheet_name, save_format, password=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param ole_index: :param worksheet_name: :param save_format: :param password: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/oleobjects/' + str(ole_index) qry = {'format': save_format} if not password is None: qry['password'] = password str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_ole_' + str(ole_index)\ + '.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output def get_picture(self, picture_index, worksheet_name, save_format, password=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param picture_index: :param worksheet_name: :param save_format: :param password: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/pictures/' + str(picture_index) qry = {'format': save_format} if not password is None: qry['password'] = password str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_pic_' + str(picture_index)\ + '.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output # ======================================================================== # CONVERTER CLASS # ======================================================================== class Converter: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'cells/' + self.filename def autoshape_to_image(self, shape_index, worksheet_name, save_format, password=None, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param shape_index: :param worksheet_name: :param save_format: :param password: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/autoshapes/' + str(shape_index) qry = {'format': save_format} if not password is None: qry['password'] = password str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = self.filename save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '_shape_' + str(shape_index)\ + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output def chart_to_image(self, chart_index, worksheet_name, save_format, password=None, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param chart_index: :param worksheet_name: :param save_format: :param password: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/charts/' + str(chart_index) qry = {'format': save_format} if not password is None: qry['password'] = password str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = self.filename save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '_chart_' + str(chart_index)\ + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output def oleobject_to_image(self, ole_index, worksheet_name, save_format, password=None, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param ole_index: :param worksheet_name: :param save_format: :param password: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/oleobjects/' + str(ole_index) qry = {'format': save_format} if not password is None: qry['password'] = password str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = self.filename save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '_ole_' + str(ole_index)\ + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output def picture_to_image(self, picture_index, worksheet_name, save_format, password=None, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param picture_index: :param worksheet_name: :param save_format: :param password: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/worksheets/' + worksheet_name + '/pictures/' + str(picture_index) qry = {'format': save_format} if not password is None: qry['password'] = password str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = self.filename save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '_pic_' + str(picture_index)\ + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output def convert_to_image(self, worksheet_name, save_format, password=None, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param worksheet_name: :param save_format: :param password: :param stream_out: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not save_format: raise ValueError("save_format not specified") str_uri = self.base_uri + '/worksheets/' + worksheet_name qry = {'format': save_format} if not password is None: qry['password'] = password str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = self.filename save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output def convert(self, save_format, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param save_format: :param stream_out: :param output_filename: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not save_format: raise ValueError("save_format not specified") str_uri = self.base_uri + '?format=' + save_format str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = self.filename save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output @staticmethod def convert_local_file(input_file, save_format, stream_out=False, output_filename=None): """ :param input_file: :param save_format: :param stream_out: :param output_filename: :return: """ if not input_file: raise ValueError("input_file not specified") if not save_format: raise ValueError("save_format not specified") str_uri = Product.product_uri + 'cells/convert?format=' + save_format signed_uri = Utils.sign(str_uri) response = None try: response = Utils.upload_file_binary(input_file, signed_uri) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = input_file save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output
{ "repo_name": "asposeforcloud/Aspose_Cloud_SDK_For_Python", "path": "asposecloud/cells/__init__.py", "copies": "1", "size": "116739", "license": "mit", "hash": -3800899888527085000, "line_mean": 36.3445297505, "line_max": 155, "alpha_frac": 0.5711715879, "autogenerated": false, "ratio": 4.05273390036452, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.512390548826452, "avg_score": null, "num_lines": null }
__author__ = 'AssadMahmood' import requests import os from asposecloud import Product from asposecloud.common import Utils class Builder: """ Wrapper class for Aspose.Barcode for Cloud API. The Aspose.Barcode for Cloud let's you generate Barcodes. """ def __init__(self): self.base_uri = Product.product_uri + 'barcode' def generate(self, code_text, symbology='QR', image_format='png', x_res=None, y_res=None, x_dim=None, y_dim=None, extra_params=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Generate Barcode :param code_text: :param symbology: :param image_format: :param x_res: :param y_res: :param x_dim: :param y_dim: :param extra_params: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not code_text: raise ValueError("code_text not specified.") if symbology == '': raise ValueError("symbology can not be empty.") if image_format == '': raise ValueError("image_format can not be empty.") str_uri = self.base_uri + '/generate' qry_str = {'text': code_text, 'type': symbology, 'format': image_format} if x_res: qry_str['resolutionX'] = x_res if y_res: qry_str['resolutionY'] = y_res if x_dim: qry_str['dimensionX'] = x_dim if y_dim: qry_str['dimensionsY'] = y_dim if extra_params: qry_str.update(extra_params) str_uri = Utils.build_uri(str_uri, qry_str) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) return response class Reader: """ Wrapper class for Aspose.Barcode for Cloud API. The Aspose.Barcode for Cloud let's you read Barcodes. """ def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'barcode' def read(self, symbology=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Read a Barcode :param symbology: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/' + self.filename + '/recognize' if symbology: str_uri += '?type=' + symbology str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }).json() return response['Barcodes'] if response['Code'] == 200 else False def read_by_algorithm(self, symbology=None, algorithm=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Read a Barcode :param symbology: :param algorithm: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/' + self.filename + '/recognize' if symbology: str_uri += '?type=' + symbology if algorithm: str_uri = Utils.build_uri(str_uri, {'BinarizationHints': algorithm}) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }).json() return response['Barcodes'] if response['Code'] == 200 else False def read_region(self, x, y, w, h, symbology=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Read a Barcode :param x: :param y: :param w: :param h: :param symbology: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/' + self.filename + '/recognize?rectX=' + str(x) + '&rectY=' + str(y) str_uri += '&rectWidth=' + str(w) + '&rectHeight=' + str(h) if symbology: str_uri += '&type=' + symbology str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return response['Barcodes'] if response['Code'] == 200 else False def read_with_checksum(self, checksum_validation, symbology=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Read a Barcode :param checksum_validation: :param symbology: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/' + self.filename + '/recognize?checksumValidation=' + checksum_validation if symbology: str_uri += '&type=' + symbology str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return response['Barcodes'] if response['Code'] == 200 else False def read_barcode_count(self, barcodes_count, symbology=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Read a Barcode :param barcodes_count: :param symbology: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/' + self.filename + '/recognize?barcodesCount=' + barcodes_count if symbology: str_uri += '&type=' + symbology str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return response['Barcodes'] if response['Code'] == 200 else False def read_from_url(self, url, symbology=None, remote_folder='', storage_type='Aspose', storage_name=None): """ Read a Barcode :param url: :param symbology: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not url: raise ValueError("url not specified") str_uri = self.base_uri + '/' + self.filename + '/recognize?url=' + url if symbology: str_uri += '&type=' + symbology str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return response['Barcodes'] if response['Code'] == 200 else False @staticmethod def read_from_local_image(local_image, symbology): """ Read barcode from local image :param local_image: :param symbology: :return: Text of barcode """ if not local_image: raise ValueError("local_image not specified") filename = os.path.basename(local_image) str_uri = Product.product_uri + 'storage/file/' + filename signed_uri = Utils.sign(str_uri) Utils.upload_file_binary(local_image, signed_uri) str_uri = Product.product_uri + 'barcode/' + filename + '/recognize' if symbology: str_uri += '?type=' + symbology signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return response['Barcodes'] if response['Code'] == 200 else False
{ "repo_name": "asposeforcloud/Aspose_Cloud_SDK_For_Python", "path": "asposecloud/barcode/__init__.py", "copies": "1", "size": "9386", "license": "mit", "hash": -4105530374753272300, "line_mean": 35.5214007782, "line_max": 130, "alpha_frac": 0.5904538675, "autogenerated": false, "ratio": 3.688015717092338, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.47784695845923375, "avg_score": null, "num_lines": null }
__author__ = 'AssadMahmood' import requests from asposecloud import Product from asposecloud.common import Utils class Extractor: """ Wrapper class for Aspose.OCR for Cloud API. The Aspose.OCR for Cloud let's you extract text from image. """ def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'ocr/' + self.filename + '/recognize' def extract(self, remote_folder='', storage_type='Aspose', storage_name=None, **kwargs): """ Extract text from image :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :param kwargs: :return: Text """ language = kwargs.get('language') use_default_dictionaries = kwargs.get('use_default_dictionaries') x = kwargs.get('x') y = kwargs.get('y') height = kwargs.get('height') width = kwargs.get('width') str_uri = self.base_uri + '?' if language: str_uri += '&language=' + language if use_default_dictionaries: str_uri += '&useDefaultDictionaries=true' if x: str_uri += 'rectX=' + x if y: str_uri += 'rectY=' + y if height: str_uri += 'rectHeight=' + height if width: str_uri += 'rectWidth=' + width str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return response['Text'] if response['Code'] == 200 else False @staticmethod def extract_from_local_file(local_file): """ Extract text from image using a local file. :param local_file: :return: Text """ str_uri = Product.product_uri + 'ocr/recognize' signed_uri = Utils.sign(str_uri) with open(local_file, 'rb') as payload: response = requests.post(signed_uri, payload, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return response['Text'] if response['Code'] == 200 else False @staticmethod def extract_from_url(url): """ Extract text from image using url :param url: :return: Text """ str_uri = Product.product_uri + 'ocr/recognize?url=' + url signed_uri = Utils.sign(str_uri) response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }).json() return response['Text'] if response['Code'] == 200 else False
{ "repo_name": "asposeforcloud/Aspose_Cloud_SDK_For_Python", "path": "asposecloud/ocr/__init__.py", "copies": "1", "size": "3084", "license": "mit", "hash": -2229402053814823000, "line_mean": 30.793814433, "line_max": 118, "alpha_frac": 0.5771725032, "autogenerated": false, "ratio": 3.8453865336658355, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9866619047564261, "avg_score": 0.011187997860314885, "num_lines": 97 }
__author__ = 'AssadMahmood' import unittest import asposecloud import os.path import json from asposecloud.storage import Folder from asposecloud.cells import Converter from asposecloud.cells import Workbook from asposecloud.cells import Worksheet class TestAsposeCells(unittest.TestCase): def setUp(self): with open('setup.json') as json_file: data = json.load(json_file) asposecloud.AsposeApp.app_key = str(data['app_key']) asposecloud.AsposeApp.app_sid = str(data['app_sid']) asposecloud.AsposeApp.output_path = str(data['output_location']) asposecloud.Product.product_uri = str(data['product_uri']) def test_convert_storage_file(self): folder = Folder() response = folder.upload_file('./data/test_convert_cell.xlsx') self.assertEqual(True, response) converter = Converter('test_convert_cell.xlsx') converter.convert('tiff') self.assertTrue(os.path.exists('./output/test_convert_cell.tiff')) def test_split_workbook(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) wb = Workbook('split_workbook.xlsx') response = wb.split_workbook('xlsx') self.assertEqual(list, type(response)) def test_get_first_cell(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) ws = Worksheet('split_workbook.xlsx','Sheet1') response = ws.get_first_cell(0,0); self.assertEqual(dict, type(response)) def test_get_last_cell(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) ws = Worksheet('split_workbook.xlsx','Sheet1') response = ws.get_last_cell(0,0); self.assertEqual(dict, type(response)) def test_get_max_row(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) ws = Worksheet('split_workbook.xlsx','Sheet1') response = ws.get_max_row(0,0); self.assertEqual(int, type(response)) def test_get_max_data_row(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) ws = Worksheet('split_workbook.xlsx','Sheet1') response = ws.get_max_data_row(0,0); self.assertEqual(int, type(response)) def test_get_max_column(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) ws = Worksheet('split_workbook.xlsx','Sheet1') response = ws.get_max_column(0,0); self.assertEqual(int, type(response)) def test_get_max_data_column(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) ws = Worksheet('split_workbook.xlsx','Sheet1') response = ws.get_max_data_column(0,0); self.assertEqual(int, type(response)) def test_get_min_row(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) ws = Worksheet('split_workbook.xlsx','Sheet1') response = ws.get_min_row(0,0); self.assertEqual(int, type(response)) def test_get_min_data_row(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) ws = Worksheet('split_workbook.xlsx','Sheet1') response = ws.get_min_data_row(0,0); self.assertEqual(int, type(response)) def test_get_min_column(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) ws = Worksheet('split_workbook.xlsx','Sheet1') response = ws.get_min_column(0,0); self.assertEqual(int, type(response)) def test_get_min_data_column(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) ws = Worksheet('split_workbook.xlsx','Sheet1') response = ws.get_min_data_column(0,0); self.assertEqual(int, type(response)) def test_save_as(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) wb = Workbook('split_workbook.xlsx') options = '<PdfSaveOptions>' \ '<desiredPPI>300</desiredPPI>' \ '<jpegQuality>70</jpegQuality>' \ '<OnePagePerSheet>true</OnePagePerSheet>' \ '<SaveFormat>Pdf</SaveFormat>' \ '</PdfSaveOptions>' response = wb.save_as(options,'save_wb.pdf') self.assertEqual(True, os.path.exists(response)) def test_merge(self): folder = Folder() response = folder.upload_file('./data/split_workbook.xlsx') self.assertEqual(True, response) folder = Folder() response = folder.upload_file('./data/test_convert_cell.xlsx') self.assertEqual(True, response) wb = Workbook('split_workbook.xlsx') response = wb.merge('test_convert_cell.xlsx') self.assertEqual(dict, type(response)) if __name__ == '__main__': unittest.main()
{ "repo_name": "asposeforcloud/Aspose_Cloud_SDK_For_Python", "path": "test/test_aspose_cells.py", "copies": "1", "size": "5608", "license": "mit", "hash": -5273983225701664000, "line_mean": 32.9878787879, "line_max": 74, "alpha_frac": 0.6223252496, "autogenerated": false, "ratio": 3.674967234600262, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9776168263080695, "avg_score": 0.004224844223913459, "num_lines": 165 }
__author__ = 'AssadMahmood' import unittest import asposecloud import os.path import json from asposecloud.storage import Folder from asposecloud.common import Utils class TestAsposeStorage(unittest.TestCase): def setUp(self): with open('setup.json') as json_file: data = json.load(json_file) asposecloud.AsposeApp.app_key = str(data['app_key']) asposecloud.AsposeApp.app_sid = str(data['app_sid']) asposecloud.AsposeApp.output_path = str(data['output_location']) asposecloud.Product.product_uri = str(data['product_uri']) def test_get_files(self): fld = Folder() files = fld.get_files() assert(isinstance(files, list)) def test_upload_file(self): fld = Folder() response = fld.upload_file('./data/test_uploadfile.docx') self.assertEqual(response, True) def test_get_file(self): fld = Folder() response = fld.get_file('test_uploadfile.docx') Utils.save_file(response, asposecloud.AsposeApp.output_path + 'test_uploadfile.docx') self.assertEqual(True, os.path.exists(asposecloud.AsposeApp.output_path + 'test_uploadfile.docx')) if __name__ == '__main__': unittest.main()
{ "repo_name": "asposeforcloud/Aspose_Cloud_SDK_For_Python", "path": "test/test_aspose_storage.py", "copies": "1", "size": "1231", "license": "mit", "hash": 5404796034206046000, "line_mean": 30.5641025641, "line_max": 106, "alpha_frac": 0.6580016247, "autogenerated": false, "ratio": 3.4289693593314765, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.45869709840314765, "avg_score": null, "num_lines": null }
__author__ = 'assadmahmood' import requests import json from asposecloud import Product from asposecloud import AsposeApp from asposecloud.common import Utils class Assignments: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'tasks/' + self.filename def get_assignments(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/assignments' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Assignments']['AssignmentsItem'] def get_assignment(self, assignment_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param assignment_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/assignments/' + assignment_id str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Assignment'] def delete_assignment(self, assignment_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param assignment_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/assignments/' + assignment_id str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def add_assignment(self, task_id, resource_id, units, remote_folder='', storage_type='Aspose', storage_name=None): """ :param task_id: :param resource_id: :param units: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/assignments' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) qry = {'taskUid': task_id, 'resourceUid': resource_id, 'units': units} str_uri = Utils.build_uri(str_uri, qry) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['AssignmentItem'] class Resources: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'tasks/' + self.filename def get_resources(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/resources' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Resources']['ResourceItem'] def get_resource(self, resource_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param resource_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/resources/' + resource_id str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Resource'] def delete_resource(self, resource_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param resource_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/resources/' + resource_id str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def add_resource(self, resource_name, after_resource_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param resource_name: :param after_resource_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/resources' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) qry = {'resourceName': resource_name, 'afterResourceId': after_resource_id} str_uri = Utils.build_uri(str_uri, qry) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['ResourceItem'] class Calendar: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'tasks/' + self.filename def get_calendars(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/calendars' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Calendars']['List'] def get_calendar(self, calendar_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param calendar_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/calendars/' + calendar_id str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Calendar'] def add_calendar(self, calendar_data, remote_folder='', storage_type='Aspose', storage_name=None): """ :param calendar_data: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/calendars' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps(calendar_data) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def delete_calendar(self, calendar_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param calendar_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/calendars/' + calendar_id str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response class Document: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'tasks/' + self.filename def get_properties(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/documentProperties' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Properties']['List'] def get_tasks(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/tasks' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Tasks']['TaskItem'] def get_task(self, task_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param task_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/tasks/' + task_id str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['Task'] def add_task(self, task_name, before_task_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param task_name: :param before_task_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/tasks' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) qry = {'taskName': task_name, 'beforeTaskId': before_task_id} str_uri = Utils.build_uri(str_uri, qry) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['TaskItem'] def delete_task(self, task_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param task_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/tasks/' + task_id str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_links(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/taskLinks' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['TaskLinks'] def add_link(self, link_data, remote_folder='', storage_type='Aspose', storage_name=None): """ :param link_data: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/taskLinks' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) json_data = json.dumps(link_data) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, json_data, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def delete_link(self, link_index, remote_folder='', storage_type='Aspose', storage_name=None): """ :param link_index: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/taskLinks/' + link_index str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_outline_codes(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/outlineCodes' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['OutlineCodes'] def get_outline_code(self, outline_code_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param outline_code_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/outlineCodes/' + outline_code_id str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['OutlineCode'] def delete_outline_code(self, outline_code_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param outline_code_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/outlineCodes/' + outline_code_id str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_extended_attributes(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/extendedAttributes' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['ExtendedAttributes'] def get_extended_attribute(self, attribute_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param attribute_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/extendedAttributes/' + attribute_id str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response['ExtendedAttribute'] def delete_extended_attribute(self, attribute_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param attribute_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/extendedAttributes/' + attribute_id str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.delete(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response # ======================================================================== # CONVERTER CLASS # ======================================================================== class Converter: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'tasks/' + self.filename def convert(self, save_format, stream_out=False, remote_folder='', storage_type='Aspose', storage_name=None): """ :param save_format: :param stream_out: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ if not save_format: raise ValueError("save_format not specified") str_uri = self.base_uri + '?format=' + save_format str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output
{ "repo_name": "asposeforcloud/Aspose_Cloud_SDK_For_Python", "path": "asposecloud/tasks/__init__.py", "copies": "1", "size": "29291", "license": "mit", "hash": 3293058250371579400, "line_mean": 34.7643467643, "line_max": 118, "alpha_frac": 0.5766276331, "autogenerated": false, "ratio": 4.004237867395762, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5080865500495763, "avg_score": null, "num_lines": null }
__author__ = 'assadmahmood' import requests from asposecloud import AsposeApp from asposecloud import Product from asposecloud.common import Utils # ======================================================================== # DOCUMENT CLASS # ======================================================================== class Document: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'imaging/' + self.filename def update_tiff_properties_local(self, input_file_path, bit_depth, compression, resolution_unit, new_width, new_height, horizontal_resolution, vertical_resolution, output_path, remote_folder='', storage_type='Aspose', storage_name=None): """ :param input_file_path: :param bit_depth: :param compression: :param resolution_unit: :param new_width: :param new_height: :param horizontal_resolution: :param vertical_resolution: :param output_path: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/tiff' qry = {'compression': compression, 'resolutionUnit': resolution_unit, 'newWidth' : new_width, 'newHeight' : new_height, 'horizontalResolution' : horizontal_resolution, 'verticalResolution' : vertical_resolution, 'bitDepth' : bit_depth, 'outputPath': output_path} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: with open(input_file_path, 'rb') as payload: response = requests.post(signed_uri, data=payload, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_updated.tiff' Utils.save_file(response, output_path) return output_path else: return validate_output def update_tiff_properties(self, bit_depth, compression, resolution_unit, new_width, new_height, horizontal_resolution, vertical_resolution, output_path, remote_folder='', storage_type='Aspose', storage_name=None): """ :param bit_depth: :param compression: :param resolution_unit: :param new_width: :param new_height: :param horizontal_resolution: :param vertical_resolution: :param output_path: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'storage/file/' + self.filename str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) file_content = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) str_uri = Product.product_uri + 'imaging/tiff' qry = {'compression': compression, 'resolutionUnit': resolution_unit, 'newWidth' : new_width, 'newHeight' : new_height, 'horizontalResolution' : horizontal_resolution, 'verticalResolution' : vertical_resolution, 'bitDepth' : bit_depth, 'outputPath': output_path} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, file_content, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_updated.tiff' Utils.save_file(response, output_path) return output_path else: return validate_output def update_psd_properties_local(self, input_file_path, channels_count, compression_method, remote_folder='', storage_type='Aspose', storage_name=None): """ :param input_file_path: :param channels_count: :param compression_method: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/psd' qry = {'channelsCount': channels_count, 'compressionMethod': compression_method} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: with open(input_file_path, 'rb') as payload: response = requests.post(signed_uri, data=payload, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_updated.psd' Utils.save_file(response, output_path) return output_path else: return validate_output def update_psd_properties(self, channels_count, compression_method, output_path, remote_folder='', storage_type='Aspose', storage_name=None): """ :param channels_count: :param compression_method: :param output_path: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/psd' qry = {'channelsCount': channels_count, 'compressionMethod': compression_method, 'outputPath': output_path} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_updated.psd' Utils.save_file(response, output_path) return output_path else: return validate_output def update_jpg_properties_local(self, input_file_path, quality, compression_type, remote_folder='', storage_type='Aspose', storage_name=None): """ :param input_file_path: :param quality: :param compression_type: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/jpg' qry = {'quality': quality, 'compressionType': compression_type} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: with open(input_file_path, 'rb') as payload: response = requests.post(signed_uri, data=payload, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_updated.jpg' Utils.save_file(response, output_path) return output_path else: return validate_output def update_jpg_properties(self, quality, compression_type, output_path, remote_folder='', storage_type='Aspose', storage_name=None): """ :param quality: :param compression_type: :param output_path: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/jpg' qry = {'quality': quality, 'compressionType': compression_type, 'outputPath': output_path} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_updated.jpg' Utils.save_file(response, output_path) return output_path else: return validate_output def update_gif_properties_local(self, input_file_path, bg_color_index, px_aspect_ratio, interlaced, remote_folder='', storage_type='Aspose', storage_name=None): """ :param input_file_path: :param bg_color_index: :param px_aspect_ratio: :param interlaced: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/gif' qry = {'backgroundColorIndex': bg_color_index, 'pixelAspectRatio': px_aspect_ratio, 'interlaced': interlaced} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: with open(input_file_path, 'rb') as payload: response = requests.post(signed_uri, data=payload, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_updated.gif' Utils.save_file(response, output_path) return output_path else: return validate_output def update_gif_properties(self, bg_color_index, px_aspect_ratio, interlaced, output_path, remote_folder='', storage_type='Aspose', storage_name=None): """ :param bg_color_index: :param px_aspect_ratio: :param interlaced: :param output_path: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/gif' qry = {'backgroundColorIndex': bg_color_index, 'pixelAspectRatio': px_aspect_ratio, 'interlaced': interlaced,'outputPath': output_path} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_updated.gif' Utils.save_file(response, output_path) return output_path else: return validate_output def update_bmp_properties_local(self, input_file_path, bits_per_px, h_resolution, v_resolution, remote_folder='', storage_type='Aspose', storage_name=None): """ :param bits_per_px: :param h_resolution: :param v_resolution: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/bmp' qry = {'bitsPerPixel': bits_per_px, 'horizontalResolution': h_resolution, 'verticalResolution': v_resolution} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: with open(input_file_path, 'rb') as payload: response = requests.post(signed_uri, data=payload, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_updated.bmp' Utils.save_file(response, output_path) return output_path else: return validate_output def update_bmp_properties(self, bits_per_px, h_resolution, v_resolution, output_path, remote_folder='', storage_type='Aspose', storage_name=None): """ :param bits_per_px: :param h_resolution: :param v_resolution: :param output_path: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/bmp' qry = {'bitsPerPixel': bits_per_px, 'horizontalResolution': h_resolution, 'verticalResolution': v_resolution,'outputPath': output_path} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_updated.bmp' Utils.save_file(response, output_path) return output_path else: return validate_output def get_properties(self, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/properties' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def get_tiff_frame_properties(self, frame_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param frame_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/frames/' + str(frame_id) + '/properties' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response def extract_frame(self, frame_id, remote_folder='', storage_type='Aspose', storage_name=None): """ :param frame_id: :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = self.base_uri + '/frames/' + str(frame_id) + '?saveOtherFrames=false' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_frame_' + str(frame_id) + '.tiff' Utils.save_file(response, output_path) return output_path else: return validate_output return response def append_tiff(self, append_file, remote_folder='', storage_type='Aspose', storage_name=None): """ :param remote_folder: storage path to operate :param storage_type: type of storage e.g Aspose, S3 :param storage_name: name of storage e.g. MyAmazonS3 :return: """ str_uri = Product.product_uri + 'imaging/tiff/' + self.filename + '/appendTiff' qry = {'appendFile': append_file} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.post(signed_uri, None, headers={ 'content-type': 'application/json', 'accept': 'application/json' }) response.raise_for_status() response = response.json() except requests.HTTPError as e: print e print response.content exit(1) return response # ======================================================================== # IMAGE CLASS # ======================================================================== class Image: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'imaging/' + self.filename def rotate_image(self, method, output_path, save_format, remote_folder='', storage_type='Aspose', storage_name=None): """ :param method: :param output_path: :param save_format: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/' + self.filename + '/rotateflip' qry = {'method': method, 'outputPath': output_path, 'format': save_format} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output def crop_image(self, x, y, width, height, output_path, save_format, remote_folder='', storage_type='Aspose', storage_name=None): """ :param x: :param y: :param width: :param height: :param output_path: :param save_format: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/' + self.filename + '/crop' qry = {'x': x, 'y': y, 'width': width, 'height': height, 'outputPath': output_path, 'format': save_format} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_croped.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output def update_tiff_frame(self, frame_id, params, remote_folder='', storage_type='Aspose', storage_name=None): """ :param frame_id: :param params: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/' + self.filename + '/frames/' + str(frame_id) str_uri = Utils.build_uri(str_uri, params) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: save_format = 'tiff' output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_updated_frame_' + \ str(frame_id) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output def rotate_tiff_frame(self, frame_id, rotate_method, output_path, remote_folder='', storage_type='Aspose', storage_name=None): """ :param frame_id: :param rotate_method: :param new_height: :param output_path: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/' + self.filename + '/frames/' + str(frame_id) qry = {'saveOtherFrames': True, 'rotateFlipMethod': rotate_method, 'outputPath': output_path} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: save_format = 'tiff' output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_rotated_frame_' + \ str(frame_id) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output def resize_tiff_frame(self, frame_id, new_width, new_height, output_path, remote_folder='', storage_type='Aspose', storage_name=None): """ :param frame_id: :param new_width: :param new_height: :param output_path: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/' + self.filename + '/frames/' + str(frame_id) qry = {'saveOtherFrames': True, 'newWidth': new_width, 'newHeight': new_height, 'outputPath': output_path} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: save_format = 'tiff' output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_resized_frame_' + \ str(frame_id) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output def crop_tiff_frame(self, frame_id, x, y, new_width, new_height, output_path, remote_folder='', storage_type='Aspose', storage_name=None): """ :param frame_id: :param x: :param y: :param new_width: :param new_height: :param output_path: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/' + self.filename + '/frames/' + str(frame_id) qry = {'saveOtherFrames': True, 'x':x, 'y':y, 'newWidth': new_width, 'newHeight': new_height, 'outputPath': output_path} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: save_format = 'tiff' output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_croped_frame_' + \ str(frame_id) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output def resize_image(self, input_file_path, new_width, new_height, output_filename, save_format, remote_folder='', storage_type='Aspose', storage_name=None): """ :param input_file_path: :param new_width: :param new_height: :param output_filename: :param save_format: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/resize' qry = {'newWidth': new_width, 'newHeight': new_height, 'format': save_format} str_uri = Utils.build_uri(str_uri, qry) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: with open(input_file_path, 'rb') as payload: response = requests.post(signed_uri, data=payload, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(input_file_path) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output def update_image(self, params, save_format, remote_folder='', storage_type='Aspose', storage_name=None): """ :param params: :param save_format: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = self.base_uri + '/updateImage' params['format'] = save_format str_uri = Utils.build_uri(str_uri, params) str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(self.filename) + '_updated.' + save_format Utils.save_file(response, output_path) return output_path else: return validate_output # ======================================================================== # CONVERTER CLASS # ======================================================================== class Converter: def __init__(self, filename): self.filename = filename if not filename: raise ValueError("filename not specified") self.base_uri = Product.product_uri + 'imaging/' + self.filename def convert_local_file(self, input_file_path, save_format, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param input_file_path: :param save_format: :param stream_out: :param output_filename: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/' + self.filename + '/saveAs?format=' + save_format str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: with open(input_file_path, 'rb') as payload: response = requests.get(signed_uri, data=payload, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = input_file_path save_format = 'zip' if save_format == 'html' else save_format output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output def convert_tiff_to_fax(self, stream_out=False, output_filename=None, remote_folder='', storage_type='Aspose', storage_name=None): """ :param stream_out: :param output_filename: :param remote_folder: :param storage_type: :param storage_name: :return: """ str_uri = Product.product_uri + 'imaging/tiff/' + self.filename + '/toFax' str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name) signed_uri = Utils.sign(str_uri) response = None try: response = requests.get(signed_uri, headers={ 'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0' }, stream=True) response.raise_for_status() except requests.HTTPError as e: print e print response.content exit(1) validate_output = Utils.validate_result(response) if not validate_output: if not stream_out: if output_filename is None: output_filename = self.filename output_path = AsposeApp.output_path + output_filename Utils.save_file(response, output_path) return output_path else: return response.content else: return validate_output
{ "repo_name": "asposeforcloud/Aspose_Cloud_SDK_For_Python", "path": "asposecloud/imaging/__init__.py", "copies": "1", "size": "37583", "license": "mit", "hash": 8700172759326975000, "line_mean": 34.5562913907, "line_max": 134, "alpha_frac": 0.5634196312, "autogenerated": false, "ratio": 4.088219297291418, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0019422310611208228, "num_lines": 1057 }
__author__ = 'astar' from sentiment_classifier import SentimentClassifier from codecs import open import time from flask import Flask, render_template, request app = Flask(__name__) print "Preparing classifier for sentiment analysis demo" start_time = time.time() classifier = SentimentClassifier() print "Classifier is ready" print time.time() - start_time, "seconds" @app.route("/sentiment-demo", methods=["POST", "GET"]) def index_page(text="", prediction_message=""): if request.method == "POST": text = request.form["text"] logfile = open("ydf_demo_logs.txt", "a", "utf-8") print text print >> logfile, "<response>" print >> logfile, text prediction_message = classifier.get_prediction_message([text]) print prediction_message print >> logfile, prediction_message print >> logfile, "</response>" logfile.close() return render_template('sentiment_demo.html', text=text, prediction_message=prediction_message) if __name__ == "__main__": app.run(host='0.0.0.0', port=8080, debug=False)
{ "repo_name": "astarostin/MachineLearningSpecializationCoursera", "path": "course6/week4/demo.py", "copies": "1", "size": "1044", "license": "apache-2.0", "hash": -5941586927611551000, "line_mean": 31.625, "line_max": 99, "alpha_frac": 0.6925287356, "autogenerated": false, "ratio": 3.5033557046979866, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46958844402979866, "avg_score": null, "num_lines": null }
__author__ = 'astyler' from sklearn.preprocessing import StandardScaler from sklearn.neighbors import NearestNeighbors import math import numpy as np class TripPredictor(object): def __init__(self, trip, features, feature_weights): self.features = features self.feature_weights = feature_weights self.scaler = StandardScaler() self.data = self.scaler.fit_transform(trip[features].values) self.data *= feature_weights self.tree = NearestNeighbors(n_neighbors=1).fit(self.data) def predict(self, query_point): query_point = self.feature_weights * self.scaler.transform(query_point[self.features].values) results = self.tree.kneighbors(X=query_point, n_neighbors=1) distance = results[0][0, 0] index = results[1][0, 0] return distance, index class EnsemblePredictor(object): def __init__(self, trip_list, features=['Latitude', 'Longitude', 'HeadingCosF', 'HeadingSinF', 'SpeedFilt', 'Acceleration', 'Power', 'TotalEnergyUsed'], feature_weights=[1, 1, 0.25, 0.25, 0.1, 0, 0, 0.2]): self.ensemble = [] self.ensemble_weights = [] self.features = features self.feature_weights = feature_weights for trip in trip_list: self.add_trip(trip) def add_trip(self, trip): self.ensemble_weights = np.append(self.ensemble_weights, 1.0) self.ensemble.append(TripPredictor(trip=trip, features=self.features, feature_weights=self.feature_weights)) def predict(self, query_point, sigma=1e-4, update_ensemble_weighting=False): predictions = [] for weight, predictor in zip(self.ensemble_weights, self.ensemble): # convolve distance with gaussian kernel?? how wide? # convolve with ensemble weight distance, prediction = predictor.predict(query_point) predictions.append((self.norm(distance, sigma), prediction)) if update_ensemble_weighting: self.ensemble_weights = np.multiply(self.ensemble_weights, [p[0] for p in predictions]) self.ensemble_weights /= np.linalg.norm(self.ensemble_weights) return [(w, p[0], p[1]) for w, p in zip(self.ensemble_weights, predictions)] def norm(self, dist, sigma): return math.exp(-dist**2/(2*sigma)) #def feedback(self, ):
{ "repo_name": "astyler/hybridpy", "path": "hybridpy/learning/ensemblepredictor.py", "copies": "1", "size": "2387", "license": "mit", "hash": -3697026867521184300, "line_mean": 37.5, "line_max": 139, "alpha_frac": 0.6476749057, "autogenerated": false, "ratio": 3.807017543859649, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4954692449559649, "avg_score": null, "num_lines": null }
__author__ = 'astyler' import math from hybridpy.models.batteries import IdealBattery class Vehicle(object): def get_power(self, speed_init, accleration, elevation, gradient, duration): return 0 class Car(Vehicle): def __init__(self, mass=1200, cross_area=1.988, drag_coefficient=0.31, rolling_resistance=0.015): self.mass = mass self.cross_area = cross_area self.drag_coefficient = drag_coefficient self.rolling_resistance = rolling_resistance def get_power(self, speed_init, acceleration, elevation, gradient, duration, outside_temperature=23): a_gravity = 9.81 offset = 240 ineff = 0.95 regen_efficiency = 0.84 pressure = 101325 * math.pow((1 - ((0.0065 * elevation) / 288.15)), ((a_gravity * 0.0289) / (8.314 * 0.0065))) rho = (pressure * 0.0289) / (8.314 * outside_temperature) air_resistance_coefficient = 0.5 * rho * self.cross_area * self.drag_coefficient # check behavior near 0 theta = math.atan(gradient) f_sin = self.mass * a_gravity * math.sin(theta) f_cos = self.mass * a_gravity * math.cos(theta) # Rolling resistance dependent on the normal force rolling_resistance = self.rolling_resistance * f_cos air_resistance = air_resistance_coefficient * speed_init * speed_init f_net = self.mass * acceleration # Define fR to be sum of other forces f_resistance = air_resistance + rolling_resistance + f_sin f_motor = f_net + f_resistance if f_motor > 0: power = f_motor * speed_init / ineff else: power = f_motor * speed_init * ineff * regen_efficiency return power + offset # TODO: add in a real power -> fuel mapping def compute_fuel_rate(self, power_out_W, soc_init=0): return power_out_W/10000.0 class ElectricCar(Car): def __init__(self, battery=IdealBattery(), **kwargs): super(ElectricCar, self).__init__(**kwargs) self.battery = battery def compute_fuel_rate(self, power_out_W, soc_init): duration_s = 1.0 delta_soc, current = self.battery.compute_delta_soc_and_current(soc_init, power_out_W, duration_s) return current
{ "repo_name": "astyler/hybridpy", "path": "hybridpy/models/vehicles.py", "copies": "1", "size": "2261", "license": "mit", "hash": -8714274310161937000, "line_mean": 34.8888888889, "line_max": 118, "alpha_frac": 0.62804069, "autogenerated": false, "ratio": 3.3746268656716416, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9415188560409338, "avg_score": 0.017495799052460823, "num_lines": 63 }
__author__ = 'astyler' import math class IdealBattery(object): # forget voltage and consider battery as a bucket of wH def __init__(self, max_energy_wh=10000.0, voltage=90, name='ideal'): self.max_energy_wh = max_energy_wh self.voltage = voltage self.name = name def compute_voltage(self, soc_init): return self.voltage # Not used in SOC calculation, convenience for current normalization def compute_current(self, soc_init, power_out_W): voltage = self.compute_voltage(soc_init) return power_out_W / voltage # Power in Watts, Duration in seconds, SOC in [0, 1] def compute_delta_soc_and_current(self, soc_init, power_out_W, duration_s): current = self.compute_current(soc_init, power_out_W) return (- power_out_W * duration_s / 3600.0) / self.max_energy_wh, current class QuadraticBattery(object): def __init__(self, u_min=3.25, u_max=3.9, amphours=5, cells=60, resistance=2.1/1000, name='quadratic'): self.u_max = cells*u_max self.u_min = cells*u_min self.q_max_h = amphours self.resistance = cells*resistance self.name = name def compute_voltage(self, soc): return self.u_min+(self.u_max-self.u_min)*soc def compute_current(self, soc_init, power_out_W): voltage = self.compute_voltage(soc_init) try: current_out_A = (voltage - math.sqrt(voltage*voltage - 4 * power_out_W * self.resistance)) / (2 * self.resistance) except ValueError: current_out_A = power_out_W / voltage return current_out_A def compute_delta_soc_and_current(self, soc_init, power_out_W, duration_s): current_out_A = self.compute_current( soc_init, power_out_W) delta_soc = (-current_out_A * duration_s / 3600) / self.q_max_h return delta_soc, current_out_A
{ "repo_name": "astyler/hybridpy", "path": "hybridpy/models/batteries.py", "copies": "1", "size": "1876", "license": "mit", "hash": -1565829117979475700, "line_mean": 38.0833333333, "line_max": 126, "alpha_frac": 0.6364605544, "autogenerated": false, "ratio": 3.1742808798646363, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4310741434264636, "avg_score": null, "num_lines": null }
__author__ = 'astyler' import pandas as pd import numpy as np import math import osmapping from scipy.signal import butter, filtfilt def load(fname): trip = pd.read_csv(fname) elapsed = np.cumsum(trip.PeriodMS / 1000.0) elapsed -= elapsed[0] trip['ElapsedSeconds'] = elapsed # smooth speed b, a = butter(2, 0.5) trip['SpeedFilt'] = filtfilt(b, a, trip.Speed) trip.Acceleration = trip['SpeedFilt'].diff() trip.Acceleration[0] = 0 # smooth noisy elevation measurements b, a = butter(4, 0.05) trip['ElevationFilt'] = filtfilt(b, a, trip.Elevation) locations = trip[['Latitude', 'Longitude']].values # add heading headings = [compute_heading(lat1=here[0], lat2=there[0], lon1=here[1], lon2=there[1]) for here, there in zip(locations[0:-1], locations[1:])] headings.append(headings[-1]) trip['HeadingRaw'] = headings filtered_headings = [headings[0]] for heading, speed in zip(headings[1:], trip['SpeedFilt'].values[1:]): if speed < 1: filtered_headings.append(filtered_headings[-1]) else: filtered_headings.append(heading) b, a = butter(2, 0.2) trip['HeadingCosF'] = filtfilt(b,a,np.cos(filtered_headings)) trip['HeadingSinF'] = filtfilt(b,a,np.sin(filtered_headings)) # add gradient planar_distances = [osmapping.haversine(here, there)+1.0 for here, there in zip(locations[0:-1], locations[1:])] trip['GradientRaw'] = trip.Elevation.diff() / ([1.0] + planar_distances) trip.loc[0, 'GradientRaw'] = trip.loc[1, 'GradientRaw'] return trip def compute_heading(lat1, lat2, lon1, lon2): lat1, lat2, lon1, lon2 = map(math.radians, [lat1, lat2, lon1, lon2]) return math.atan2(math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1), math.sin(lon2 - lon1) * math.cos(lat2))
{ "repo_name": "astyler/hybridpy", "path": "hybridpy/dataset/triploader.py", "copies": "1", "size": "1887", "license": "mit", "hash": 6802950418851111000, "line_mean": 33.3272727273, "line_max": 145, "alpha_frac": 0.6412294648, "autogenerated": false, "ratio": 3, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9126677745365599, "avg_score": 0.002910343886880032, "num_lines": 55 }
__author__ = 'astyler' # foobar def compute_value_function(trip, init_soc, num_soc_states=1000, electricity_to_fuel_price_ratio=0.25, sell_to_buy_ratio=0.8, gamma=1.0): """ Computes the value function for a trip :param trip: Trip dataframe with columns [TimeIndex (s), Gradient, Speed (m/s)] :param init_soc: Initial battery state of charge [0-1] :param num_soc_states: Resolution of SOC grid :param electricity_to_fuel_price_ratio: ratio of cost between electricity and fuel :param sell_to_buy_ratio: ratio of cost recovery at terminal state from selling excess SOC to grid :param gamma: single state decay rate for bellman equation :return: Value Function matrix [T x SOC], Q Matrix [T, SOC, Actions] """ actions = carmodel.get_action_list() states = np.linspace(0, 1, num=num_soc_states) T = len(trip) q_function = np.empty(shape=(T, len(states), len(actions))) value_function = np.empty(shape=(T, len(states))) # Set terminal costs, reduce gain for selling electricity buy sell/buy ratio. value_function[T - 1] = [compute_terminal_cost(init_soc, soc, electricity_to_fuel_price_ratio, sell_to_buy_ratio) for soc in states] # define default cost function cost_function = lambda l_fuel, l_time: l_fuel * l_time if l_fuel >= 0 else 1e6 # backprop djikstras to compute value function for t in reversed(range(T - 1)): def nextvalue(x_query): fit = interp1d(states, value_function[t + 1], bounds_error=False) v_max_batt = value_function[t + 1][-1] return [1e6 if xq < 0 else v_max_batt if xq > 1 else yq for (xq, yq) in zip(x_query, fit(x_query))] time_period = trip.TimeIndex.iloc[t + 1] - trip.TimeIndex.iloc[t] powers, fuels = zip(*[__get_model_outputs(a, t, time_period, trip) for a in actions]) for (idx, x) in enumerate(states): next_values = nextvalue([carmodel.new_soc(x, power, time_period) for power in powers]) q_function[t][idx] = [cost_function(fuel, time_period) + gamma * nval for nval, fuel in zip(next_values, fuels)] value_function[t] = [min(qt_state) for qt_state in q_function[t]] # return J and Q return value_function, q_function
{ "repo_name": "astyler/hybridpy", "path": "hybridpy/__init__.py", "copies": "1", "size": "2338", "license": "mit", "hash": -5909510506393004000, "line_mean": 45.78, "line_max": 117, "alpha_frac": 0.6364414029, "autogenerated": false, "ratio": 3.189631650750341, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.43260730536503406, "avg_score": null, "num_lines": null }
__author__ = 'astyler' import numpy as np from scipy.interpolate import interp1d from hybridpy.models import vehicles, batteries def compute(trip, controls, soc_states=50, gamma=1.0, cost_function=lambda fuel_rate, power, duration: fuel_rate * duration, vehicle=vehicles.Car(), battery=batteries.QuadraticBattery()): """ Computes the value function and q function for a given trip and optimization parameters :param trip: Trip dataframe containing speed, acceleration, elevation, and gradient features :param controls: discrete list of allowed controls for the engine power output :param soc_states: scalar number of state of charge states (resolution) :param gamma: discount factor in bellman equation :param cost_function: cost function for input arguments: fuel_rate, power, duration :param vehicle: vehicle model to generate power outputs from speed, acceleration, gradient :param battery: battery model to compute SOC change for given power loads :return: value_function, q_function, power list, duration list """ socs = np.linspace(0, 1, num=soc_states) time_states = len(trip) q_function = np.zeros(shape=(time_states, soc_states, len(controls))) value_function = np.zeros(shape=(time_states, soc_states)) powers = [] durations = [] for t in xrange(0, time_states - 1): state = trip.iloc[t] duration = trip.ElapsedSeconds.iloc[t + 1] - state.ElapsedSeconds power = vehicle.get_power(speed_init=state.SpeedFilt, acceleration=state.Acceleration, elevation=state.ElevationFilt, gradient=state.GradientRaw, duration=duration) powers.append(power) durations.append(duration) # value function terminal state value is 0 for all charges. consider adding in price of electricity to fill battery # backprop djikstras to compute value function for t in xrange(time_states - 2, -1, -1): next_value_slice = interp1d(socs, value_function[t + 1]) power_demand = powers[t] duration = durations[t] def cost_to_go(soc): if soc < 0: return np.nan # can't pull energy when battery empty, return inf ctg elif soc > 1: return value_function[t + 1][-1] # can't charge above max, return value at max else: return next_value_slice(soc) # return cost to go of next slice for (i, soc) in enumerate(socs): # control is power supplied from the ICE, battery makes up the difference costs_to_go = [cost_to_go(soc + battery.compute_delta_soc_and_current(soc, power_demand - control, duration)[0]) for control in controls] q_function[t][i] = [ cost_function(vehicle.compute_fuel_rate(control, soc), power_demand - control, duration) + (gamma * ctg) for ctg, control in zip(costs_to_go, controls)] value_function[t] = [np.nanmin(q) for q in q_function[t]] return value_function, q_function, powers, durations
{ "repo_name": "astyler/hybridpy", "path": "hybridpy/learning/dynamicprogramming.py", "copies": "1", "size": "3108", "license": "mit", "hash": 3741719083884593700, "line_mean": 46.8307692308, "line_max": 128, "alpha_frac": 0.6592664093, "autogenerated": false, "ratio": 3.776427703523694, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49356941128236936, "avg_score": null, "num_lines": null }
___author__ = 'Asus' from IClassifier import IClassifier from Utils.utilities import load_stf from glove import Glove from scipy.spatial.distance import cosine from scipy.spatial.distance import euclidean import numpy as np class GloveClassifier(IClassifier): def __init__(self): self.GloveInstace = None self.Centroids = None def answerQuestion(self,wordAskedFor,question,possibilities): qV = self.GloveInstance.word_vectors[self.GloveInstance.dictionary[wordAskedFor]] pVs = [] cqV = self.Centroids[wordAskedFor] cpVs = [] maxSim = 1000 correct = -1 comment = '' for p in possibilities: pVs.append(self.GloveInstance.word_vectors[self.GloveInstance.dictionary[p]]) cpVs.append(self.Centroids[p]) for i,pV in enumerate(pVs): a = cosine(qV,pV) - np.power(cosine(pV,cpVs[i]),0.09) #a = 1/euclidean(qV,pV) nPtokens = question.split(' ') tokens = [] for token in nPtokens: tokens.append(token.strip().strip('.')) wAi = -1 for j,token in enumerate(tokens): if token == wordAskedFor: wAi = j for j in range(9): try: m1 = self.GloveInstance.word_vectors[self.GloveInstance.dictionary[tokens[wAi-j-1]]] a += (1/np.power(j+1,2.0))*(cosine(pV,m1) - np.power(cosine(pV,cpVs[i]),0.09)) except: nothing = 0 try: d1 = self.GloveInstance.word_vectors[self.GloveInstance.dictionary[tokens[wAi+j+1]]] a += (1/np.power(j+1,2.0))*(cosine(pV,d1) - np.power(cosine(pV,cpVs[i]),0.09)) except: nothing = 0 comment += '\n\t\t\tsim(' + wordAskedFor + ',' + possibilities[i] + ')=' +str(a) if a<maxSim: maxSim = a correct = i return (possibilities[correct],comment)
{ "repo_name": "dudenzz/word_embedding", "path": "SimilarityClassification/Classifiers/GloveCenteredESLExtendedClassifier.py", "copies": "1", "size": "1662", "license": "mit", "hash": -4429539864581486000, "line_mean": 32.9183673469, "line_max": 89, "alpha_frac": 0.6750902527, "autogenerated": false, "ratio": 2.642289348171701, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.38173796008717015, "avg_score": null, "num_lines": null }
__author__ = 'Asus' import argparse import gzip import math import numpy import re import sys import time import io from glove import Glove from copy import deepcopy import numpy as np isNumber = re.compile(r'\d+.*') def norm_word(word): if isNumber.search(word.lower()): return '---num---' elif re.sub(r'\W+', '', word) == '': return '---punc---' else: return word.lower() def calculate_size(filename): start = time.clock() max = 0 with io.open(filename, 'r', encoding='utf-8') as savefile: for i, line in enumerate(savefile): if(max%100000==0): print(max,'stamp',time.clock() - start) max = max+1 return max def load_stf(filename, no_d): start = time.clock() dct = {} size = calculate_size(filename); vectors = np.ndarray(shape=(size,no_d), dtype=float) iter = 0 with io.open(filename, 'r', encoding='utf-8') as savefile: for i, line in enumerate(savefile): tokens = line.strip().split(' ') if(iter%10000 == 0): print(iter, size, 'stamp',time.clock() - start) word = tokens[0] try: vectors[iter] = tokens[1:] except: print tokens if len(tokens)==300: word = '' vectors[iter] = tokens else: print 'oops something went terribly wrong with this vector' dct[word] = i iter = iter+1 # Infer word vectors dimensions. no_vectors = len(dct) print('stampnv',time.clock() - start) # Set up the model instance. instance = Glove() print('stampinst',time.clock() - start) instance.no_components = size print('stampnoc',time.clock() - start) instance.word_vectors = vectors print('stampwv',time.clock() - start) instance.word_biases = np.zeros(no_vectors) print('stampwb',time.clock() - start) instance.add_dictionary(dct) print('stampdict',time.clock() - start) return instance ''' Read all the word vectors and normalize them ''' def read_word_vecs(filename): wordVectors = {} if filename.endswith('.gz'): fileObject = gzip.open(filename, 'r') else: fileObject = open(filename, 'r') for line in fileObject: line = line.strip().lower() word = line.split()[0] wordVectors[word] = numpy.zeros(len(line.split())-1, dtype=float) for index, vecVal in enumerate(line.split()[1:]): wordVectors[word][index] = float(vecVal) ''' normalize weight vector ''' wordVectors[word] /= math.sqrt((wordVectors[word]**2).sum() + 1e-6) sys.stderr.write("Vectors read from: "+filename+" \n") return wordVectors ''' Write word vectors to file ''' def print_word_vecs(wordVectors, outFileName): sys.stderr.write('\nWriting down the vectors in '+outFileName+'\n') outFile = open(outFileName, 'w') for word, values in wordVectors.iteritems(): outFile.write(word+' ') for val in wordVectors[word]: outFile.write('%.4f' %(val)+' ') outFile.write('\n') outFile.close() ''' Read the PPDB word relations as a dictionary ''' def read_lexicon(filename, wordVecs): lexicon = {} for line in open(filename, 'r'): words = line.lower().strip().split() lexicon[norm_word(words[0])] = [norm_word(word) for word in words[1:]] return lexicon '''''' def retrofit_new(glove_vsm,lexicon,numIters): newWordVecs = deepcopy(glove_vsm) wvWords = set(glove_vsm.dictionary.keys()) loopVocab = wvWords.intersection(set(lexicon.keys())) for it in range(numIters): for word in loopVocab: wordNeighbours = set(lexicon[word]).intersection(wvWords) numN = len(wordNeighbours) if numN == 0: continue newVec = numN * glove_vsm.word_vectors[glove_vsm.dictionary[word]] for ppWord in wordNeighbours: newVec += newWordVecs.word_vectors[newWordVecs.dictionary[ppWord]] newWordVecs.word_vectors[newWordVecs.dictionary[word]] = newVec/(2*numN) return newWordVecs ''' Retrofit word vectors to a lexicon ''' def retrofit(wordVecs, lexicon, numIters): newWordVecs = deepcopy(wordVecs) wvVocab = set(newWordVecs.keys()) loopVocab = wvVocab.intersection(set(lexicon.keys())) for it in range(numIters): # loop through every node also in ontology (else just use data estimate) for word in loopVocab: wordNeighbours = set(lexicon[word]).intersection(wvVocab) numNeighbours = len(wordNeighbours) #no neighbours, pass - use data estimate if numNeighbours == 0: continue print word # the weight of the data estimate if the number of neighbours newVec = numNeighbours * wordVecs[word] # loop over neighbours and add to new vector (currently with weight 1) for ppWord in wordNeighbours: newVec += newWordVecs[ppWord] newWordVecs[word] = newVec/(2*numNeighbours) return newWordVecs if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument("-i", "--input", type=str, default=None, help="Input word vecs") parser.add_argument("-l", "--lexicon", type=str, default=None, help="Lexicon file name") parser.add_argument("-o", "--output", type=str, help="Output word vecs") parser.add_argument("-n", "--numiter", type=int, default=10, help="Num iterations") parser.add_argument("-d", "--dims", type=int, default=10, help="Num dimensions") args = parser.parse_args() wordVecs = load_stf(args.input,args.dims) lexicon = read_lexicon(args.lexicon, wordVecs) numIter = int(args.numiter) outFileName = args.output ''' Enrich the word vectors using ppdb and print the enriched vectors ''' #print_word_vecs(retrofit(wordVecs, lexicon, numIter), outFileName) a = retrofit_new(wordVecs,lexicon,numIter) a.save(outFileName)
{ "repo_name": "dudenzz/word_embedding", "path": "CentroidsGeneration/retrofitNew_gloveInstance.py", "copies": "2", "size": "5863", "license": "mit", "hash": 6047411354934428000, "line_mean": 35.1913580247, "line_max": 90, "alpha_frac": 0.6373870032, "autogenerated": false, "ratio": 3.4047619047619047, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.992452927202554, "avg_score": 0.02352392718727277, "num_lines": 162 }
__author__ = 'Asus' import sys import getopt from QuestionHandling.QuestionBase import QuestionBase from Classifiers.GloveClassifier import GloveClassifier from Utils.utilities import load_stf from Utils.retrofitNew_gloveInstance import retrofit_new from Utils.retrofitNew_gloveInstance import read_lexicon import numpy as np help_message = ''' $ python questionAnswering.py -v <vectorsFile> -q <questionsFile> -d <dimensions> [-o outputFile] [-h] -v or --vectors to specify path to the word vectors input file in Glove text format -q or --questions to specify path to the questions input file in "Question...[questionWord]...Question.|answer1|answer2|answer3|answer4":correctAnswer" format -o or --output to optionally set path to output word sense vectors file (<vectorsFile>.results is used by default) -h or --help (this message is displayed) -r or --retro 1 to run the retrofit postprocessing on the vector space model 0 to skip ''' class Usage(Exception): def __init__(self, msg): self.msg = msg def readCommandLineInput(argv): try: try: #specify the possible option switches opts, _ = getopt.getopt(argv[1:], "hv:q:o:d:t:r:l:", ["help", "vectors=","questions=", "output=", "dimensions=", "type=","retro=","lexicon="]) except getopt.error, msg: raise Usage(msg) vectorsFile = None questionsFile = None outputFile = None type = "Turney" dims = 0 setOutput = False retro = 1 lexicon = None for option, value in opts: if option in ("-h", "--help"): raise Usage(help_message) elif option in ("-v", "--vectors"): vectorsFile = value elif option in ("-q", "--ontology"): questionsFile = value elif option in ("-o", "--output"): outputFile = value setOutput = True elif option in ("-d", "--dimensions"): dims = value elif option in ("-t", "--type"): type = value elif option in ("-r","--retro"): retro = value elif option in ("-l","--lexicon"): lexicon = value if (vectorsFile==None) or (questionsFile==None) or (dims==None): raise Usage(help_message) else: if not setOutput: outputFile = vectorsFile + '.results' return (vectorsFile, questionsFile, dims, outputFile, type,retro, lexicon) except Usage, err: print str(err.msg) return 2 if __name__ == "__main__": commandParse = readCommandLineInput(sys.argv) #commandParse = ('glove.6B.50d.txt','toefl.qst',50,'util.results','TOEFL') if commandParse==2: sys.exit(2) qb = QuestionBase(commandParse[1]) print(commandParse) instance = load_stf(commandParse[0],int(commandParse[2])) lexicon = read_lexicon(commandParse[6], instance) print "starting retrofit procedure" instance_r = retrofit_new(instance, lexicon, 10) print "retrofit done" classifier = GloveClassifier() classifier.GloveInstance = instance_r #classifier.Centroids = np.load('//mnt/raid0/kuba/vsm/models/centroids_dir/ppdb_centroids').item() oFile = open(commandParse[3],'w+') qb.classify(classifier,oFile) oFile.close()
{ "repo_name": "dudenzz/word_embedding", "path": "SimilarityRegression/answerQuestions.py", "copies": "1", "size": "3309", "license": "mit", "hash": -8218101775180354000, "line_mean": 36.1797752809, "line_max": 158, "alpha_frac": 0.6285886975, "autogenerated": false, "ratio": 3.688963210702341, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9692204173503822, "avg_score": 0.025069546939703903, "num_lines": 89 }
__author__ = 'ASUS' class ContactHelper: def __init__(self, app): self.app = app def create(self, contact): wd = self.app.wd self.init_contact_creation() self.fill_contacts_form(contact) self.submit_contact_creation() def init_contact_creation(self): wd = self.app.wd wd.find_element_by_css_selector("div#nav a[href='edit.php']").click() def change_field_value(self, field_name, text): wd = self.app.wd if text is not None: wd.find_element_by_name(field_name).click() wd.find_element_by_name(field_name).clear() wd.find_element_by_name(field_name).send_keys(text) def fill_contacts_form(self, contact): wd = self.app.wd self.change_field_value("firstname", contact.firstname) self.change_field_value("middlename", contact.middlename) self.change_field_value("lastname", contact.lastname) self.change_field_value("nickname", contact.nickname) self.change_field_value("title", contact.title) self.change_field_value("company", contact.company) self.change_field_value("address", contact.address) self.change_field_value("home", contact.home) self.change_field_value("mobile", contact.mobile) self.change_field_value("work", contact.work) self.change_field_value("fax", contact.fax) def submit_contact_creation(self): wd = self.app.wd wd.find_element_by_css_selector("input[value='Enter']").click() def select_first_contact(self): wd = self.app.wd wd.find_element_by_name("selected[]").click() def delete_first_contact(self): wd = self.app.wd self.select_first_contact() # submit deletion wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click() # alert accept alert = wd.switch_to.alert alert.accept() def modify_first_contact(self, new_contact_data): wd = self.app.wd self.select_first_contact() # open modification form wd.find_element_by_xpath(".//*[@id='maintable']/tbody/tr[2]/td[8]/a/img").click() # fill contact form self.fill_contacts_form(new_contact_data) # submit modification wd.find_element_by_xpath(".//*[@id='content']/form[1]/input[1]").click()
{ "repo_name": "alen4ik/python_training", "path": "fixture/contact.py", "copies": "1", "size": "2381", "license": "apache-2.0", "hash": -918093524253070700, "line_mean": 36.21875, "line_max": 89, "alpha_frac": 0.6152876942, "autogenerated": false, "ratio": 3.5326409495548963, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9640478794871544, "avg_score": 0.001489969776670358, "num_lines": 64 }