text
stringlengths
256
65.5k
When I write to a file, using python open(filename, 'w+'), I get multiple lines of NULL written to the file in addition to the new text. Python 2.7.3 from sys import argv script, filename, random = argv my_file = open(filename, 'w+') added_line = raw_input("Type what you want to add: ") my_file.write(added_line) print my_file.read() my_file.close() I am teaching myself and practicing opening and writing to files (obviously, I guess). I can get the program to run and prompt me for the new text. I also tried open(filename, 'a'). What am I missing? Thank you.
I am trying to save an instance of a model but I get Invalid EmbeddedDocumentField item (1) where 1 is item's id (I think). Model is defined as class Graph(Document): user = StringField(max_length=50, required=True) title = StringField(max_length=500) description = StringField(max_length=1000) # field in error graph_nodes = ListField(EmbeddedDocumentField(Graph_Node)) procedure = ListField(DictField()) created_on = DateTimeField(default=datetime.datetime.now) def __unicode__(self): return self.title meta = { 'ordering': ['-created_on'], 'allow_inheritance': False, } class Graph_Node(EmbeddedDocument): id = StringField(required=True, max_length=30) button = StringField(max_length=40, required=True) parameter = StringField(max_length=50, required=False) values = ListField(StringField(max_length=30, required=False)) wires_out = ListField(StringField(max_length=30, required=False)) wires_in = ListField(StringField(max_length=30, required=False)) def __unicode__(self): return self.id meta = { 'allow_inheritance': False } def fill(self, args): for k in args.keys(): if k in self.__dict__['_data'].keys(): self.__dict__['_data'][k] = args[k] Call for save() method is the following: Graph(user=request.user.username, title=title, description=description, procedure=procedura, graph_nodes=gnlist).save() And exception is thrown on save, not on Graph creation. gnlist is (from pydev debugger) list: [<Graph_Node: start>, <Graph_Node: 1>, <Graph_Node: end>] and gnlist[1] seems to be well formed (I'll post more details if needed) I've tried to trace the query on mongodb but it doesn't seem to be profiled, however output follows (in case help me to purge it from irrelevant logs): { "ts" : ISODate("2012-10-26T18:25:07.492Z"), "op" : "query", "ns" : "analysis.system.indexes", "query" : { "expireAfterSeconds" : { "$exists" : true } }, "ntoreturn" : 0, "ntoskip" : 0, "nscanned" : 0, "keyUpdates" : 0, "numYield" : 0, "lockStats" : { "timeLockedMicros" : { "r" : NumberLong(248), "w" : NumberLong(0) }, "timeAcquiringMicros" : { "r" : NumberLong(2), "w" : NumberLong(4) } }, "nreturned" : 0, "responseLength" : 20, "millis" : 0, "client" : "0.0.0.0", "user" : "" } { "ts" : ISODate("2012-10-26T18:26:07.505Z"), "op" : "query", "ns" : "analysis.system.indexes", "query" : { "expireAfterSeconds" : { "$exists" : true } }, "ntoreturn" : 0, "ntoskip" : 0, "nscanned" : 0, "keyUpdates" : 0, "numYield" : 0, "lockStats" : { "timeLockedMicros" : { "r" : NumberLong(680), "w" : NumberLong(0) }, "timeAcquiringMicros" : { "r" : NumberLong(4), "w" : NumberLong(7) } }, "nreturned" : 0, "responseLength" : 20, "millis" : 0, "client" : "0.0.0.0", "user" : "" } { "ts" : ISODate("2012-10-26T18:26:25.803Z"), "op" : "query", "ns" : "analysis.system.profile", "query" : { }, "ntoreturn" : 0, "ntoskip" : 0, "nscanned" : 2, "keyUpdates" : 0, "numYield" : 0, "lockStats" : { "timeLockedMicros" : { "r" : NumberLong(62), "w" : NumberLong(0) }, "timeAcquiringMicros" : { "r" : NumberLong(7), "w" : NumberLong(3) } }, "nreturned" : 2, "responseLength" : 758, "millis" : 0, "client" : "127.0.0.1", "user" : "" } { "ts" : ISODate("2012-10-26T18:27:07.506Z"), "op" : "query", "ns" : "analysis.system.indexes", "query" : { "expireAfterSeconds" : { "$exists" : true } }, "ntoreturn" : 0, "ntoskip" : 0, "nscanned" : 0, "keyUpdates" : 0, "numYield" : 0, "lockStats" : { "timeLockedMicros" : { "r" : NumberLong(698), "w" : NumberLong(0) }, "timeAcquiringMicros" : { "r" : NumberLong(6), "w" : NumberLong(7) } }, "nreturned" : 0, "responseLength" : 20, "millis" : 0, "client" : "0.0.0.0", "user" : "" } { "ts" : ISODate("2012-10-26T18:27:18.817Z"), "op" : "query", "ns" : "analysis.system.profile", "query" : { }, "ntoreturn" : 0, "ntoskip" : 0, "nscanned" : 4, "keyUpdates" : 0, "numYield" : 0, "lockStats" : { "timeLockedMicros" : { "r" : NumberLong(121), "w" : NumberLong(0) }, "timeAcquiringMicros" : { "r" : NumberLong(11), "w" : NumberLong(8) } }, "nreturned" : 4, "responseLength" : 1463, "millis" : 0, "client" : "127.0.0.1", "user" : "" } { "ts" : ISODate("2012-10-26T18:28:02.570Z"), "op" : "query", "ns" : "analysis.system.profile", "query" : { }, "ntoreturn" : 0, "ntoskip" : 0, "nscanned" : 5, "keyUpdates" : 0, "numYield" : 0, "lockStats" : { "timeLockedMicros" : { "r" : NumberLong(112), "w" : NumberLong(0) }, "timeAcquiringMicros" : { "r" : NumberLong(13), "w" : NumberLong(7) } }, "nreturned" : 5, "responseLength" : 1799, "millis" : 0, "client" : "127.0.0.1", "user" : "" } { "ts" : ISODate("2012-10-26T18:28:07.507Z"), "op" : "query", "ns" : "analysis.system.indexes", "query" : { "expireAfterSeconds" : { "$exists" : true } }, "ntoreturn" : 0, "ntoskip" : 0, "nscanned" : 0, "keyUpdates" : 0, "numYield" : 0, "lockStats" : { "timeLockedMicros" : { "r" : NumberLong(680), "w" : NumberLong(0) }, "timeAcquiringMicros" : { "r" : NumberLong(5), "w" : NumberLong(7) } }, "nreturned" : 0, "responseLength" : 20, "millis" : 0, "client" : "0.0.0.0", "user" : "" } { "ts" : ISODate("2012-10-26T18:28:11.608Z"), "op" : "query", "ns" : "analysis.system.profile", "query" : { }, "ntoreturn" : 0, "ntoskip" : 0, "nscanned" : 7, "keyUpdates" : 0, "numYield" : 0, "lockStats" : { "timeLockedMicros" : { "r" : NumberLong(117), "w" : NumberLong(0) }, "timeAcquiringMicros" : { "r" : NumberLong(10), "w" : NumberLong(7) } }, "nreturned" : 7, "responseLength" : 2504, "millis" : 0, "client" : "127.0.0.1", "user" : "" } { "ts" : ISODate("2012-10-26T18:28:51.589Z"), "op" : "command", "ns" : "analysis.$cmd", "command" : { "profile" : 0 }, "ntoreturn" : 1, "keyUpdates" : 0, "numYield" : 0, "lockStats" : { "timeLockedMicros" : { "r" : NumberLong(0), "w" : NumberLong(24) }, "timeAcquiringMicros" : { "r" : NumberLong(0), "w" : NumberLong(11) } }, "responseLength" : 58, "millis" : 0, "client" : "127.0.0.1", "user" : "" } Thanks
JavaScript stevenhu — 2012-09-21T16:02:46-04:00 — #1 I am trying to delete a row in a database via external JS, but don't think the syntax is right. What I have right now is my best guess as to what it should be. The database rows show an ID, filename, and title (context: a bookmarked or favorite page), and when a query is made, it shows two buttons side by side for each row; the left button shows the title and when tapped goes to the page, while the right button is used to remove the row. This code successfully generates the buttons (via innerHtml): function querySuccess(tx, results) { var len = results.rows.length; console.log("BOOKMARKS table: " + len + " rows found."); for (var i=0; i<len; i++) { <font color='"#FF0000"'>rowId = results.rows.item(i).id;</font> document.getElementById("output").innerHTML +="<div class='row'>" + "<a href='" + results.rows.item(i).filename + "'><input type='button' class='buttonBM' value='" + results.rows.item(i).title + "'></a></div>" + "<div class='buttonRemove'><input type='button' class='buttonEdit' value='Remove' onclick='removeRow('" + <font color='"#FF0000"'>rowId</font> + "')'></a></div>"; } } What I need to do is take the rowId and use it to delete the correct row in the DB, and I do it like this: function removeRow() { db = window.openDatabase("Database", "1.0", "Bookmarks", 200000); <font color='"#FF0000"'>var rowId;</font> db.transaction(function(tx) { tx.executeSql("delete from BOOKMARKS where id='" + <font color='"#FF0000"'>rowId</font> + "'"); }, errorCB, successCB()); alert('Bookmark removed.'); } However, it fails to delete the row. Google Tools gives the error as "Uncaught SyntaxError: Unexpected token }" which is on the external HTML page (which could be any page in the app containing the Add Bookmark button). The error points to the <HTML> line, so it could be anything. Below is the above code in context: // Add row via button on external page function insertRow() { db = window.openDatabase("Database", "1.0", "Bookmarks", 200000); db.transaction(function(tx) { var id, filename = window.location.pathname, title = document.title; tx.executeSql("insert into BOOKMARKS(id, filename, title) values(?,?,?)", [id, filename, title]); }, errorCB, successCB()); alert('Bookmark added.'); } // Display DB results (onLoad) function queryDB(tx) { tx.executeSql("SELECT * FROM BOOKMARKS", [], querySuccess, errorCB); } function querySuccess(tx, results) { var len = results.rows.length; console.log("BOOKMARKS table: " + len + " rows found."); for (var i=0; i<len; i++) { rowId = results.rows.item(i).id; document.getElementById("output").innerHTML +="<div class='row'>" + "<a href='" + results.rows.item(i).filename + "'><input type='button' class='buttonBM' value='" + results.rows.item(i).title + "'></a></div>" + "<div class='buttonRemove'><input type='button' class='buttonEdit' value='Remove' onclick='removeRow('" + rowId + "')'></a></div>"; } } // Remove row via button on this page function removeRow() { db = window.openDatabase("Database", "1.0", "Bookmarks", 200000); var rowId; db.transaction(function(tx) { tx.executeSql("delete from BOOKMARKS where id='" + rowId + "'"); }, errorCB, successCB()); alert('Bookmark removed.'); } // Transaction error callback (corrected per Sitepoint.com post) function errorCB(tx, err) { alert('Error processing SQL: ' + err); } Of course, the Google error could be pointing to an entirely different part of the two pages! paul_wilkins — 2012-09-21T17:05:07-04:00 — #2 I think that the problem is in some other part of the code. There aren't any unexpected }'s in that code. Not even after using jslint to help tidy it up. The only other issue there is that successCB() isn't correct, it should be just a reference to a function instead, like with the errorCB function. The successCB one is optional though, so you can remove the function and the reference to it if you don't want to use it. // Transaction error callback (corrected per Sitepoint.com post) function errorCB(tx, err) { alert('Error processing SQL: ' + err); } function successCB() { } // Add row via button on external page function insertRow() { var db = window.openDatabase("Database", "1.0", "Bookmarks", 200000); db.transaction(function (tx) { var id, filename = window.location.pathname, title = document.title; tx.executeSql("insert into BOOKMARKS(id, filename, title) values(?,?,?)", [id, filename, title]); }, errorCB, successCB); alert('Bookmark added.'); } function querySuccess(tx, results) { var len = results.rows.length, i, rowId; console.log("BOOKMARKS table: " + len + " rows found."); for (i = 0; i < len; i += 1) { rowId = results.rows.item(i).id; document.getElementById("output").innerHTML += "<div class='row'>" + "<a href='" + results.rows.item(i).filename + "'><input type='button' class='buttonBM' value='" + results.rows.item(i).title + "'></a></div>" + "<div class='buttonRemove'><input type='button' class='buttonEdit' value='Remove' onclick='removeRow('" + rowId + "')'></a></div>"; } } // Display DB results (onLoad) function queryDB(tx) { tx.executeSql("SELECT * FROM BOOKMARKS", [], querySuccess, errorCB); } // Remove row via button on this page function removeRow() { var db = window.openDatabase("Database", "1.0", "Bookmarks", 200000), rowId; db.transaction(function (tx) { tx.executeSql("delete from BOOKMARKS where id='" + rowId + "'"); }, errorCB, successCB); alert('Bookmark removed.'); } stevenhu — 2012-09-21T17:13:09-04:00 — #3 Thanks, Paul. Here is the HTML the error code points to. The external JS' innerHtml is written to the span=output part: <!DOCTYPE html> <html> ==> Uncaught SyntaxError: Unexpected token } <== <head> <title>Bookmarks 3 html</title> <script type="text/javascript" charset="utf-8" src="cordova-1.7.0.js"></script> <script type="text/javascript" charset="utf-8" src="original4.js"></script> <script> </script> <style type="text/css"> body {margin:0; padding:0; background-color: #aaa;} div#wrapper{width:100%;padding:.5em;} div.row{font-family:Helvetica, Arial, sans-serif; color: #333; margin:0;padding:0; } /* input */ input[type="button"] { background-color:#fff;font-family:Helvetica, Arial, sans-serif; color: #333; text-align:center;margin:0;padding:1em;float:left; -webkit-border-radius: 5px; -moz-border-radius: 5px; border-radius: 5px; -webkit-box-shadow: 5px 5px 4px 0px #888; /* Safari 3-4, iOS 4.0.2 &#8211; 4.2, Android 2.3+ */ box-shadow: 5px 5px 4px 0px #888; /* Opera 10.5, IE9, Firefox 4+, Chrome 6+, iOS 5 */ border:1px #666 solid; } input[type="button"]:hover {background-color:#ddd;} .buttonBM { width:70%; } .buttonEdit { width:28%; } .addBM {width:100%; float:left;clear:both;margin:1em;} </style> </head> <body onload="onDeviceReady()"> <div id="wrapper"><span id="output"></span></div> <br> <a href="original3.html"><p style="padding: .5em;line-height:4em;">Back</p></a> </body> </html> stevenhu — 2012-09-21T17:37:11-04:00 — #4 Your version of the code places my functions in a different order. Can you explain why? paul_wilkins — 2012-09-21T18:17:28-04:00 — #5 Because it's not good to use a function before it has been defined. When using a function statement you can mostly get away with it because functions within the same scope are defined first, after which scripting it processed. But if you ever use function expressions then the problem becomes quite obvious. someFunc('Did this run?'); // nope var someFunc = function (str) { alert(str); } So declare what you need (the function in this case) before making use of it. That helps to prevent all sorts of problems. var someFunc = function (str) { alert(str); } someFunc('Did this run?'); // yes Because the ordering does affect function statements, and function statements are used interchangably with function expressions, it's best if the one ordering rule is applied across both of them. This helps to simplify things, and allows the code to more easily be improved upon at later stages as well. paul_wilkins — 2012-09-21T18:31:27-04:00 — #6 Remove everything you can until you are left with the core code that still causes the problem. How much of it can you remove while still retaining the same problem? stevenhu — 2012-09-21T18:50:16-04:00 — #7 I did the following one at a time, cutting, saving, testing, then replacing to remove something else: I removed the entire CSS style section. Still errors. I removed the Back link, still errors. Removed <HTML>. Still errors. Removed <!DOCTYPE html>. Still errors. Removed empty <script>tags. Still errors. That leaves the innerHtml span id = output line. Can't take that out, or there are no buttons to press! I suppose, in conclusion, the error is happening in the innerHtml output? paul_wilkins — 2012-09-21T19:36:54-04:00 — #8 Currently we're not focused on having a working system. Right now we're focused on figuring out where that error with the brace is coming from. Keep on chopping things out. stevenhu — 2012-09-24T11:24:14-04:00 — #9 If I remove the innerHtml line, then there is no content on the page, so I get the error, Uncaught TypeError: Cannot read property 'innerHTML' of null The error only occurs after I click on one of the Remove buttons, which is rendered inside the innerHtml. I separated the css and js content into their own files. HTML: <!DOCTYPE html> <html> <head> <title>Bookmarks 3 html</title> <script type="text/javascript" charset="utf-8" src="cordova-1.7.0.js"></script> <script type="text/javascript" charset="utf-8" src="original4.js"></script> <link rel="stylesheet" type="text/css" media="screen" href="bookmarks3.css"> <style type="text/css"> </style> </head> <body onload="onDeviceReady()"> <div id="wrapper"><span id="output"></span></div> <a href="original3.html"><p style="padding: .5em;">Back</p></a> </body> </html> CSS: body {margin:0; padding:0; background-color: #aaa;} div#wrapper{width:100%;padding:.5em;} div.row{font-family:Helvetica, Arial, sans-serif; color: #333; margin:0;padding:0; } div.bookmark {margin:.5em;} p{text-align:left;} p.title {font-weight:bold;font-size:1em;text-align:center; color:#000} a {color:#666} /* input */ input[type="button"] { background-color:#fff;font-family:Helvetica, Arial, sans-serif; color: #333; text-align:left;margin:0;padding:1em;float:left; -webkit-border-radius: 5px; -moz-border-radius: 5px; border-radius: 5px; -webkit-box-shadow: 5px 5px 4px 0px #888; /* Safari 3-4, iOS 4.0.2 – 4.2, Android 2.3+ */ box-shadow: 5px 5px 4px 0px #888; /* Opera 10.5, IE9, Firefox 4+, Chrome 6+, iOS 5 */ border:1px #666 solid; } .buttonBM { width:67%; } .buttonEdit { width:33%; } .addBM {width:100%; float:left;clear:both;margin:1em;} JS: /* Helpful info: base: http://docs.phonegap.com/en/2.0.0/cordova_storage_storage.md.html#Storage modifications: http://tv.adobe.com/watch/adc-presents/phonegap-storage-api/eng/ */ // Wait for Cordova to load document.addEventListener("deviceready", onDeviceReady, false); var db; function onDeviceReady() { db = window.openDatabase("Database", "1.0", "Bookmarks", 200000); db.transaction(populateDB, errorCB, successCB); } // Transaction error callback (corrected per Sitepoint.com post) function errorCB(tx, err) { alert('Error processing SQL: ' + err); } // Transaction success callback function successCB() { var db = window.openDatabase("Database", "1.0", "Bookmarks", 200000); db.transaction(queryDB, errorCB); } // Display the rows on this page (onLoad) function querySuccess(tx, results) { var len = results.rows.length; console.log("BOOKMARKS table: " + len + " rows found."); for (var i=0; i<len; i++) { rowId = results.rows.item(i).id; document.getElementById("output").innerHTML +="<div class='row'>" + "<a href='" + results.rows.item(i).filename + "'><input type='button' class='buttonBM' value='" + results.rows.item(i).title + "'></a></div>" + "<div class='buttonRemove'><input type='button' class='buttonEdit' value='Remove' onclick='removeRow('" + rowId + "')'></a></div>"; } document.getElementById("output").innerHTML +="<div class='row'><p style='line-height:2em;'>You have " + len + " bookmarks.</p></div>"; } // Query the DB function queryDB(tx) { tx.executeSql("SELECT * FROM BOOKMARKS", [], querySuccess, errorCB); } // Remove row via button on this page function removeRow() { db = window.openDatabase("Database", "1.0", "Bookmarks", 200000); var rowId; db.transaction(function(tx) { tx.executeSql("delete from BOOKMARKS where id='" + rowId + "'"); }, errorCB, successCB()); alert('Bookmark removed.'); } // Add row via button on external page function insertRow() { db = window.openDatabase("Database", "1.0", "Bookmarks", 200000); db.transaction(function(tx) { var id, filename = window.location.pathname, title = document.title; tx.executeSql("insert into BOOKMARKS(id, filename, title) values(?,?,?)", [id, filename, title]); }, errorCB, successCB()); alert('Bookmark added.'); } // Create the database function populateDB(tx) { tx.executeSql('CREATE TABLE IF NOT EXISTS BOOKMARKS (id INTEGER NOT NULL, filename TEXT NOT NULL, title TEXT NOT NULL)'); } // Cordova is ready function onDeviceReady() { var db = window.openDatabase("Database", "1.0", "Bookmarks", 200000); db.transaction(populateDB, errorCB, successCB); } stevenhu — 2012-09-24T13:11:25-04:00 — #10 In Google Tools, I clicked on the Elements tab and examined the innerHtml content as output to the screen. I think I found the problem. The innerHtml line has (copying and pasting): "<div class='buttonRemove'><input type='button' class='buttonEdit' value='Remove' onclick='removeRow('" + rowId + "')'></a></div>"; The Elements tab has (copying and pasting): <input type="button" class="buttonEdit" value="Remove" onclick="removeRow(" undefined')'=""> The rowId is not rendering properly. Also, the copy/paste from google Tools Elements tab shows more than what I see on the Elements tab on screen. On screen, I don't see undefined')'=""> but undefined')'> It looks like we need to figure out why the variable is not being populated. On a separate HTML page, which contains the button to execute the code to enter a new row, the code used is: <div class="bookmark"><input type="button" class="addBM" value="Add Bookmark" onclick="insertRow('100', window.location.pathname, document.title)"></div> Is the syntax wrong? The pathname and title are all appearing correctly. Would the following function understand the order of the three variables, and understand the first one is id?: function insertRow() { db = window.openDatabase("Database", "1.0", "Bookmarks", 200000); db.transaction(function(tx) { var id, filename = window.location.pathname, title = document.title; tx.executeSql("insert into BOOKMARKS(id, filename, title) values(?,?,?)", [id, filename, title]); }, errorCB, successCB()); alert('Bookmark added.'); } stevenhu — 2012-09-24T13:47:43-04:00 — #11 You know, since I am using an ID number to reference the row to delete, but it isn't showing up, BUT the title is showing up, I might as well remove the ID completely and substitute with the title, which IS showing up. So far, that isn't working either, for the output is generated as <input type="button" class="buttonEdit" value="Remove" onclick="removeRow(" original="" 3="" html')'> even though the JS line in innerHtml is: "<div class='buttonRemove'><input type='button' class='buttonEdit' value='Remove' onclick='removeRow('"+results.rows.item(i).title+"')'></div>"; The quotes are still not right, and the generated output adds a space before the row title and quotes within the title. (I also tried it with the filename, but it gave a long pathname rather than just the filename. I don't see a window.location code that retrieves only the filename.) stevenhu — 2012-09-24T15:57:54-04:00 — #12 You know, since I am using an ID number to reference the row to delete, but it isn't showing up, BUT the title is showing up, I might as well remove the ID completely and substitute with the title, which IS showing up. So far, that isn't working either, for the output is generated as <input type="button" class="buttonEdit" value="Remove" onclick="removeRow(" original="" 3="" html')'> even though the JS line in innerHtml is: "<div class='buttonRemove'><input type='button' class='buttonEdit' value='Remove' onclick='removeRow('"+results.rows.item(i).title+"')'></div>"; stevenhu — 2012-09-24T18:34:11-04:00 — #13 Now I've got the removeRow to show the title without the ="" within the words, and the database is showing correct entries. But I am still getting the "unexpected token }" error. paul_wilkins — 2012-09-24T19:21:23-04:00 — #14 From the code that you had in post #9 I'm seeing that it should just be the quotes that are causing the issue. This line seems to be the problem: "<div class='buttonRemove'><input type='button' class='buttonEdit' value='Remove' onclick='removeRow('" + rowId + "')'><font color='"red"'></a></font></div>"; By the way - the </a> from the javascript code part shouldn't be there either. It results in this HTML code: <div class="buttonRemove"><input type="button" class="buttonEdit" value="Remove" onclick="removeRow(<font color='"red"'>"</font> row1<font color='"red"'>')'=""</font>></div> See how the quotes are all messed up? What you need there instead is: onclick="removeRow('row1')" Notice though that even when we start the javascript string with single quotes, that by the time we get through the HTML double quotes and once more to the single quotes required for the removeRow part, that we're going to end up closing the string instead. So, for those innermost single quotes we'll need to escape them as \' instead. '<div class="buttonRemove"><input type="button" class="buttonEdit" value="Remove" onclick="removeRow(\\'' + rowId + '\\')"></div>'; That seems to work now. stevenhu — 2012-09-25T17:08:17-04:00 — #15 Wow, this is very good! Clicking on Remove now fires the removeRow function. Unfortunately, the screen is refreshing and inserting all the previous rows under the table, so the table is showing up multiplied by the number of times one clicks on Remove. We need to terminate the script so this doesn't happen. However, I'm not sure that exit(); is the approved way to do this. Thanks again, Paul! stevenhu — 2012-09-25T17:20:06-04:00 — #16 Actually, I click on Remove, and see the "Bookmark removed" alert, then the row is NOT REMOVED but repeated below the "You have X Bookmarks" message. If I go back a page, then return to the bookmarks page, the bookmarks I removed are gone. For some reason, when I comment out the successCB as you suggest, the bookmarks don't appear on the page (the area is just blank, showing the background). Uncommenting the lines causes them to appear again. Here is the current JS code: /* Helpful info: base: http://docs.phonegap.com/en/2.0.0/cordova_storage_storage.md.html#Storage modifications: http://tv.adobe.com/watch/adc-presents/phonegap-storage-api/eng/ */ // Wait for Cordova to load document.addEventListener("deviceready", onDeviceReady, false); var db; function onDeviceReady() { db = window.openDatabase("Database", "1.0", "BOOKMARKS3", 200000); db.transaction(populateDB, errorCB, successCB); } // Create the database function populateDB(tx) { tx.executeSql('CREATE TABLE IF NOT EXISTS BOOKMARKS3 (id INTEGER NOT NULL, filename TEXT NOT NULL, title TEXT NOT NULL)'); } // 1. Transaction error callback (corrected per Sitepoint.com post) function errorCB(tx, err) { alert('Error processing SQL: ' + err); } // 2. Transaction success callback function successCB() { var db = window.openDatabase("Database", "1.0", "BOOKMARKS3", 200000); db.transaction(queryDB, errorCB); } // 3. Add row via button on external page function insertRow() { db = window.openDatabase("Database", "1.0", "BOOKMARKS3", 200000); db.transaction(function(tx) { var id, filename = window.location.pathname, title = document.title; tx.executeSql("insert into BOOKMARKS3(id, filename, title) values(?,?,?)", [id, filename, title]); }, errorCB, successCB()); alert('Bookmark added.'); } // 4. Display the rows on this page (onLoad) function querySuccess(tx, results) { var len = results.rows.length; console.log("BOOKMARKS3 table: " + len + " rows found."); for (var i=0; i<len; i++) { rowId = results.rows.item(i).id; document.getElementById("output").innerHTML +="<div class='row'>" + "<a href='" + results.rows.item(i).filename + "'><input type='button' class='buttonBM' value='" + results.rows.item(i).title + "'></a></div>" + '<div class="buttonRemove"><input type="button" class="buttonEdit" value="Remove" onclick="removeRow(\\'' + rowId + '\\')"></div>'; } document.getElementById("output").innerHTML +="<div class='row'><p style='line-height:2em;'>You have " + len + " BOOKMARKS3.</p></div>"; } // 5. Query the DB function queryDB(tx) { tx.executeSql("SELECT * FROM BOOKMARKS3", [], querySuccess, errorCB); } // 6. Remove row via button on this page function removeRow() { db = window.openDatabase("Database", "1.0", "BOOKMARKS3", 200000); var rowId; db.transaction(function(tx) { tx.executeSql("delete from BOOKMARKS3 where id='" + rowId + "'"); }, errorCB, successCB()); alert('Bookmark removed.'); } exit; // Cordova is ready function onDeviceReady() { var db = window.openDatabase("Database", "1.0", "BOOKMARKS3", 200000); db.transaction(populateDB, errorCB, successCB); } stevenhu — 2012-09-25T17:49:02-04:00 — #17 I solved the problem by replacing the successCB() with a doNothing() function to the removeRow(). Now it does not add a new row, and refreshes the page to show the new status: function removeRow() { db = window.openDatabase("Database", "1.0", "BOOKMARKS3", 200000); var rowId; db.transaction(function(tx) { tx.executeSql("delete from BOOKMARKS3 where id='" + rowId + "'"); }, errorCB, doNothing()); alert('Bookmark removed.'); } function doNothing() { window.location.href=window.location.href; } Is this syntax OK, or is this unconventional? stevenhu — 2012-09-25T18:13:10-04:00 — #18 You know, when I press the button to add that page's title and filename to the database, it works because of this in the page's head: <script type="text/javascript" charset="utf-8" src="original4.js"></script> However, that means ALL the JS functions on that page try to execute whenever I first open the page AND after I click on the Add Bookmark button. Obviously, I'm going to turn off the debugging alerts when I'm done, but is there a more "correct" way of doing this? The bookmarking code will appear on over 200 pages in a book app I'm working on.
For fun (and to learn...), I'm trying to write a program that takes 3 inputs, a, b and c, and returns the solution to the quadratic formula. Right now, I'm getting an error saying StringVar instance has no attribute 'trunc'I initially had my entry variables set up as IntVar and got the same type of error, with IntVar instance not having the trunc attribute. import Tkinter from math import * def quadprogram(a,b,c): x1=((-1*int(b)+(sqrt((int(b)**2)-(4*int(a)*int(c))))/2*int(a))) x2=((-1*int(b)-(sqrt((int(b)**2)-(4*int(a)*int(c))))/2*int(a))) my_answer="(x+"+str(-1*x1)+")(x+"+str(-1*x2)+")" xinputs= "x1= "+ str(x1) + " and x2= "+str(x2) answers= my_answer+", "+ xinputs return answers class quadratic(Tkinter.Tk): def __init__(self,parent): Tkinter.Tk.__init__(self,parent) self.parent=parent self.initialize() def initialize(self): self.grid() self.entryVariableA = Tkinter.StringVar() self.entry= Tkinter.Entry(self, textvariable= self.entryVariableA) self.entry.grid(column=0,row=2,sticky="W") self.entry.bind("<Return>",self.OnPressEnter) self.entryVariableA.set(u"a") self.entryVariableB = Tkinter.StringVar() self.entry= Tkinter.Entry(self, textvariable= self.entryVariableB) self.entry.grid(column=0,row=3,sticky="W") self.entry.bind("<Return>",self.OnPressEnter) self.entryVariableB.set(u"b") self.entryVariableC = Tkinter.StringVar() self.entry= Tkinter.Entry(self, textvariable= self.entryVariableC) self.entry.grid(column=0,row=4,sticky="W") self.entry.bind("<Return>",self.OnPressEnter) self.entryVariableC.set(u"c") button = Tkinter.Button(self, text= u"Solve!", command=self.OnButtonClick) button.grid(column=1,row=5) self.labelVariable= Tkinter.StringVar() self.Eq_labelVariable=Tkinter.StringVar() self.Ans_labelVariable=Tkinter.StringVar() label= Tkinter.Label(self, textvariable=self.labelVariable,anchor= 'w', fg='black',bg='blue') label.grid(column=0,row=0, columnspan=2, sticky='EW') self.labelVariable.set(u"Enter Equation Here...") Eq_label=Tkinter.Label(self, textvariable=self.Eq_labelVariable,anchor='w', fg='white', bg='blue') Eq_label.grid(column=0, row=1, columnspan=2, sticky='EW') self.Eq_labelVariable.set(u"For A(x^2)+B(x)+C") Ans_label=Tkinter.Label(self, textvariable=self.Ans_labelVariable,anchor='w', fg='black', bg='green') Ans_label.grid(column=0, row=1, columnspan=2, sticky='EW') self.Ans_labelVariable.set(u"Answer will show here") self.grid_columnconfigure(0,weight=1) self.resizable(True, False) self.update() self.geometry(self.geometry()) self.entry.focus_set() self.entry.selection_range(0,Tkinter.END) def OnButtonClick(self): self.Ans_labelVariable.set(quadprogram(self.entryVariableA,self.entryVariableB,self.entryVariableC)+"(this program works?!)") self.entry.focus_set() self.entry.selection_range(0, Tkinter.END) def OnPressEnter(self,event): self.labelVariable.set(self.entryVariableA.get()) if __name__=='__main__': app=quadratic(None) app.title("Quadratic Solver") app.geometry("300x300") app.mainloop() Anyone have any thoughts on avoiding that? Thanks for the help...
This program has been disqualified. Author Rekrul Submission date 2011-07-18 09:00:05.001192 Rating 7687 Matches played 1336 Win rate 73.5 import random SIZE = 5 WEIGHT_FACTOR = 6. class HistoryNode(object): def __init__(self, parent=None): if parent is not None: self.depth = parent.depth + 1 else: self.depth = 0 self.children = {'RR': None, 'RS': None, 'RP': None, 'SR': None, 'SS': None, 'SP': None, 'PR': None, 'PS': None, 'PP': None} self.distribution = {'RR': 0, 'RS': 0, 'RP': 0, 'SR': 0, 'SS': 0, 'SP': 0, 'PR': 0, 'PS': 0, 'PP': 0} def new_move(self, input): last_move = input[0:2] if len(input) > 2: if self.children[last_move] is None: self.children[last_move] = HistoryNode(self) self.children[last_move].new_move(input[2:]) else: self.distribution[last_move] = self.distribution[last_move] * 0.975 + 1 def predict(self, input): if len(input) > 0: last_move = input[0:2] if self.children[last_move] is not None: return self.children[last_move].predict(input[2:]) else: return None else: return self.distribution class HistoryTree(object): def __init__(self): self.root = HistoryNode() self.input = '' def new_move(self, move): self.input += move if len(self.input) > SIZE * 2: self.input = self.input[-SIZE * 2:] for i in xrange(2, len(self.input) + 1, 2): self.root.new_move(self.input[-i:]) def predict(self): results = {'R':0, 'S':0, 'P':0} for i in xrange(2, len(self.input) + 1, 2): res = self.root.predict(self.input[-i:]) if res is not None: for key in res: results[key[1]] += res[key] * (WEIGHT_FACTOR ** i) d = results e = d.keys() e.sort(cmp=lambda a, b: cmp(d[a], d[b])) return e[-1] def predict_i(self, i): results = {'R':0, 'S':0, 'P':0} for depth in xrange(i, -1, -1): res = self.root.predict(self.input[-depth * 2:]) if res is not None: break if res is None: return random.choice(["R", "P", "S"]) else: for key in res: results[key[1]] += res[key] d = results e = d.keys() e.sort(cmp=lambda a, b: cmp(d[a], d[b])) return e[-1] if input == '': history_tree_me = HistoryTree() history_tree_him = HistoryTree() output = random.choice(["R", "P", "S"]) meta_predictor = [output] * (6 * SIZE + 6 + 6 + 1) metascore = [0] * (6 * SIZE + 6 + 6 + 1) win = ['RS', 'SP', 'PR'] lost = ['SR', 'PS', 'RP'] prediction_me_i = [output] * SIZE prediction_him_i = [output] * SIZE possible_moves = ['R', 'P', 'S'] else: history_tree_me.new_move(output + input) history_tree_him.new_move(input + output) for idx in xrange(len(metascore)): if meta_predictor[idx] + input in win: metascore[idx] = metascore[idx] * 0.9 + 1 elif meta_predictor[idx] + input in lost: metascore[idx] = metascore[idx] * 0.9 - 1 else: metascore[idx] = metascore[idx] * 0.9 - 0.34 prediction_me = history_tree_me.predict() prediction_him = history_tree_him.predict() for j in xrange(SIZE): prediction_me_i[j] = history_tree_me.predict_i(j) prediction_him_i[j] = history_tree_him.predict_i(j) for i in xrange(3): move_me = possible_moves[(possible_moves.index(prediction_me) + i) % 3] move_him = possible_moves[(possible_moves.index(prediction_him) + i) % 3] meta_predictor[i] = move_me meta_predictor[i + 3] = move_him for j in xrange(SIZE): move_me = possible_moves[(possible_moves.index(prediction_me_i[j]) + i) % 3] move_him = possible_moves[(possible_moves.index(prediction_him_i[j]) + i) % 3] meta_predictor[i + (j + 1) * 6] = move_me meta_predictor[i + 3 + (j + 1) * 6] = move_him tmp = [] for j in range(6): for i in range(0, len(meta_predictor) - 8, 6): tmp.append(meta_predictor[i + j]) occ = [tmp.count('R'), tmp.count('P'), tmp.count('S')] meta_predictor[-7 + j] = possible_moves[occ.index(max(occ))] meta_predictor[-1] = random.choice(possible_moves) best_predictor = metascore.index(max(metascore)) output = meta_predictor[best_predictor]
The last six months have involved a lot more writing of code than the previous couple of years. I’ve been tweeting little things I learn on a daily basis and thought I’d look back on this week. format() A reocurring problem with report writing is getting numbers formatted properly for the occassion. I discovered ‘format’ in Python this week: print "{0:.2f}%".format(float(1)/3 * 100) That prints out a float to 2 decimal places. I looked around and Dive Into Python has similar syntax, but without the format() function. So, the equivalent would be: print "blah %.2f" % (float(1) / 3 * 100) So, why use one over the other? A user on StackOverflow suggested that compatibility with 2.5 might drive a person to use ‘%’ over ‘format()’, but otherwise, the poster suggested that format() is the cleaner looking and more flexible choice. set search_path = bixie I’m working on a new schema for a project. We’re rolling out a prototype quickly, so we’re going to house it in our existing production database for now. To keep things easy to clean up, Laura suggested that we put things into a separate schema. For managing our database models, I’ve switched to using SQLAlchemy, and also alembic for migrations. This made it super easy to specify that I wanted all the Bixie related tables in their own schema: class BixieCrash(DeclarativeBase): __table_args__ = {'schema': 'bixie'} __tablename__ = 'crashes' And that was it. Then, to avoid having to add ‘bixie.’ to all the table paths in test queries, I put this command into the tests: cursor.execute(""" SET search_path TO bixie """) I imagine there are some other ways to handle this. We’re not really using the ORM for anything other than schema loading, so I’ll probably add that to our connection initialization code for the new app. Then developers can write their queries as without any concerns about being in the correct schema. And I’ll glow just a little bit about deploying alembic on stage! set colorcolumn=80 I’ve been trying to write prettier Python. Today’s micro-effort was figuring out how display a vertical line to tell me when I exceed the 80 character width. The proper command to add to .vimrc is: :set colorcolumn=80 Which looks something like:
How can I fix my Software center? I can't update Ubuntu or anything without getting an error that items cannot be installed until the package catalog is repaired. Below is the errors I get. Please help I'm a noob Ubuntu. installArchives() failed: dpkg: error processing libqt4-xmlpatterns:i386 (--configure): libqt4-xmlpatterns:i386 4:4.8.1-0ubuntu4.1 cannot be configured because libqt4-xmlpatterns:amd64 is in a different version (4:4.8.1-0ubuntu4.2) dpkg: error processing libqt4-xmlpatterns (--configure): libqt4-xmlpatterns:amd64 4:4.8.1-0ubuntu4.2 cannot be configured because libqt4-xmlpatterns:i386 is in a different version (4:4.8.1-0ubuntu4.1) No apport report written because MaxReports is reached already dpkg: dependency problems prevent configuration of libqt4-declarative:i386: libqt4-declarative:i386 depends on libqt4-xmlpatterns (= 4:4.8.1-0ubuntu4.2); however: Version of libqt4-xmlpatterns:i386 on system is 4:4.8.1-0ubuntu4.1. dpkg: error processing libqt4-declarative:i386 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-declarative: libqt4-declarative depends on libqt4-xmlpatterns (= 4:4.8.1-0ubuntu4.2); however: Package libqt4-xmlpatterns is not configured yet. dpkg: error processing libqt4-declarative (--configure): dependency problems - leaving unconfigured No apport report written because MaxReports is reached already dpkg: dependency problems prevent configuration of libqtgui4:i386: libqtgui4:i386 depends on libqt4-declarative (= 4:4.8.1-0ubuntu4.2); however: Package libqt4-declarative:i386 is not configured yet. dpkg: error processing libqtgui4:i386 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqtgui4: libqtgui4 depends on libqt4-declarative (= 4:4.8.1-0ubuntu4.2); however: Package libqt4-declarative is not configured yet. dpkg: error processing libqtgui4 (--configure): dependency problems - leaving unconfigured No apport report written because MaxReports is reached already dpkg: dependency problems prevent configuration of libqt4-designer: libqt4-designer depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4 is not configured yet. dpkg: error processing libqt4-designer (--configure): dependency problems - leaving unconfigured No apport report written because MaxReports is reached already dpkg: dependency problems prevent configuration of libqt4-designer:i386: libqt4-designer:i386 depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4:i386 is not configured yet. dpkg: error processing libqt4-designer:i386 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-help: libqt4-help depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4 is not configured yet. dpkg: error processing libqt4-help (--configure): dependency problems - leaving unconfigured No apport report written because MaxReports is reached already dpkg: dependency problems prevent configuration of libqt4-opengl: libqt4-opengl depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4 is not configured yet. dpkg: error processing libqt4-opengl (--configure): dependency problems - leaving unconfigured No apport report written because MaxReports is reached already dpkg: dependency problems prevent configuration of libqt4-opengl:i386: libqt4-opengl:i386 depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4:i386 is not configured yet. dpkg: error processing libqt4-opengl:i386 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-qt3support:i386: libqt4-qt3support:i386 depends on libqt4-designer (= 4:4.8.1-0ubuntu4.2); however: Package libqt4-designer:i386 is not configured yet. libqt4-qt3support:i386 depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4:i386 is not configured yet. dpkg: error processing libqt4-qt3support:i386 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-scripttools: libqt4-scripttools depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4 is not configured yet. dpkg: error processing libqt4-scripttools (--configure): dependency problems - leaving unconfigured No apport report written because MaxReports is reached already dpkg: dependency problems prevent configuration of libqt4-scripttools:i386: libqt4-scripttools:i386 depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4:i386 is not configured yet. dpkg: error processing libqt4-scripttools:i386 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-svg: libqt4-svg depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4 is not configured yet. dpkg: error processing libqt4-svg (--configure): dependency problems - leaving unconfigured No apport report written because MaxReports is reached already dpkg: dependency problems prevent configuration of libqt4-svg:i386: libqt4-svg:i386 depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4:i386 is not configured yet. dpkg: error processing libqt4-svg:i386 (--configure): dependency problems - leaving unconfigured Errors were encountered while processing: libqt4-xmlpatterns:i386 libqt4-xmlpatterns libqt4-declarative:i386 libqt4-declarative libqtgui4:i386 libqtgui4 libqt4-designer libqt4-designer:i386 libqt4-help libqt4-opengl libqt4-opengl:i386 libqt4-qt3support:i386 libqt4-scripttools libqt4-scripttools:i386 libqt4-svg libqt4-svg:i386 Error in function: SystemError: E:Sub-process /usr/bin/dpkg returned an error code (1) dpkg: dependency problems prevent configuration of libqt4-declarative:i386: libqt4-declarative:i386 depends on libqt4-xmlpatterns (= 4:4.8.1-0ubuntu4.2); however: Version of libqt4-xmlpatterns:i386 on system is 4:4.8.1-0ubuntu4.1. dpkg: error processing libqt4-declarative:i386 (--configure): dependency problems - leaving unconfigured dpkg: error processing libqt4-xmlpatterns (--configure): libqt4-xmlpatterns:amd64 4:4.8.1-0ubuntu4.2 cannot be configured because libqt4-xmlpatterns:i386 is in a different version (4:4.8.1-0ubuntu4.1) dpkg: error processing libqt4-xmlpatterns:i386 (--configure): libqt4-xmlpatterns:i386 4:4.8.1-0ubuntu4.1 cannot be configured because libqt4-xmlpatterns:amd64 is in a different version (4:4.8.1-0ubuntu4.2) dpkg: dependency problems prevent configuration of libqtgui4:i386: libqtgui4:i386 depends on libqt4-declarative (= 4:4.8.1-0ubuntu4.2); however: Package libqt4-declarative:i386 is not configured yet. dpkg: error processing libqtgui4:i386 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-declarative: libqt4-declarative depends on libqt4-xmlpatterns (= 4:4.8.1-0ubuntu4.2); however: Package libqt4-xmlpatterns is not configured yet. dpkg: error processing libqt4-declarative (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-svg:i386: libqt4-svg:i386 depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4:i386 is not configured yet. dpkg: error processing libqt4-svg:i386 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-opengl:i386: libqt4-opengl:i386 depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4:i386 is not configured yet. dpkg: error processing libqt4-opengl:i386 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-designer:i386: libqt4-designer:i386 depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4:i386 is not configured yet. dpkg: error processing libqt4-designer:i386 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-scripttools:i386: libqt4-scripttools:i386 depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4:i386 is not configured yet. dpkg: error processing libqt4-scripttools:i386 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-qt3support:i386: libqt4-qt3support:i386 depends on libqt4-designer (= 4:4.8.1-0ubuntu4.2); however: Package libqt4-designer:i386 is not configured yet. libqt4-qt3support:i386 depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4:i386 is not configured yet. dpkg: error processing libqt4-qt3support:i386 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqtgui4: libqtgui4 depends on libqt4-declarative (= 4:4.8.1-0ubuntu4.2); however: Package libqt4-declarative is not configured yet. dpkg: error processing libqtgui4 (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-svg: libqt4-svg depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4 is not configured yet. dpkg: error processing libqt4-svg (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-opengl: libqt4-opengl depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4 is not configured yet. dpkg: error processing libqt4-opengl (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-designer: libqt4-designer depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4 is not configured yet. dpkg: error processing libqt4-designer (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-help: libqt4-help depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4 is not configured yet. dpkg: error processing libqt4-help (--configure): dependency problems - leaving unconfigured dpkg: dependency problems prevent configuration of libqt4-scripttools: libqt4-scripttools depends on libqtgui4 (= 4:4.8.1-0ubuntu4.2); however: Package libqtgui4 is not configured yet. dpkg: error processing libqt4-scripttools (--configure): dependency problems - leaving unconfigured
I'm currently following this tutorial to install scipy on Ubuntu 12.04 (I can't use apt-get install because I need a recent version) : http://www.scipy.org/Installing_SciPy/Linux However I get errors when I do the following commands : python setup.py build sudo python setup.py install --prefix=/usr/local # installs to /usr/local python setup.py build michael@michael-laptop-ubuntu:~/Downloads/scipy-0.11.0rc1$ python setup.py buildRunning from scipy source directory. blas_opt_info: blas_mkl_info: libraries mkl,vml,guide not found in /usr/local/lib libraries mkl,vml,guide not found in /usr/lib libraries mkl,vml,guide not found in /usr/lib/i386-linux-gnu NOT AVAILABLE atlas_blas_threads_info: Setting PTATLAS=ATLAS libraries ptf77blas,ptcblas,atlas not found in /usr/local/lib libraries ptf77blas,ptcblas,atlas not found in /usr/lib/sse2 libraries ptf77blas,ptcblas,atlas not found in /usr/lib libraries ptf77blas,ptcblas,atlas not found in /usr/lib/i386-linux-gnu/sse2 libraries ptf77blas,ptcblas,atlas not found in /usr/lib/i386-linux-gnu NOT AVAILABLE atlas_blas_info: libraries f77blas,cblas,atlas not found in /usr/local/lib libraries f77blas,cblas,atlas not found in /usr/lib/sse2 libraries f77blas,cblas,atlas not found in /usr/lib libraries f77blas,cblas,atlas not found in /usr/lib/i386-linux-gnu/sse2 libraries f77blas,cblas,atlas not found in /usr/lib/i386-linux-gnu NOT AVAILABLE /usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py:1423: UserWarning: Atlas (http://math-atlas.sourceforge.net/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [atlas]) or by setting the ATLAS environment variable. warnings.warn(AtlasNotFoundError.__doc__) blas_info: libraries blas not found in /usr/local/lib libraries blas not found in /usr/lib libraries blas not found in /usr/lib/i386-linux-gnu NOT AVAILABLE /usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py:1432: UserWarning: Blas (http://www.netlib.org/blas/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable. warnings.warn(BlasNotFoundError.__doc__) blas_src_info: NOT AVAILABLE /usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py:1435: UserWarning: Blas (http://www.netlib.org/blas/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [blas_src]) or by setting the BLAS_SRC environment variable. warnings.warn(BlasSrcNotFoundError.__doc__) Traceback (most recent call last): File "setup.py", line 208, in <module> setup_package() File "setup.py", line 199, in setup_package configuration=configuration ) File "/usr/lib/python2.7/dist-packages/numpy/distutils/core.py", line 152, in setup config = configuration() File "setup.py", line 136, in configuration config.add_subpackage('scipy') File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 1002, in add_subpackage caller_level = 2) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 971, in get_subpackage caller_level = caller_level + 1) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 908, in _get_configuration_from_setup_py config = setup_module.configuration(*args) File "scipy/setup.py", line 8, in configuration config.add_subpackage('integrate') File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 1002, in add_subpackage caller_level = 2) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 971, in get_subpackage caller_level = caller_level + 1) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 908, in _get_configuration_from_setup_py config = setup_module.configuration(*args) File "scipy/integrate/setup.py", line 10, in configuration blas_opt = get_info('blas_opt',notfound_action=2) File "/usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py", line 320, in get_info return cl().get_info(notfound_action) File "/usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py", line 471, in get_info raise self.notfounderror(self.notfounderror.__doc__) numpy.distutils.system_info.BlasNotFoundError: Blas (http://www.netlib.org/blas/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable. Error in sys.excepthook: Traceback (most recent call last): File "/usr/lib/python2.7/dist-packages/apport_python_hook.py", line 64, in apport_excepthook from apport.fileutils import likely_packaged, get_recent_crashes File "/usr/lib/python2.7/dist-packages/apport/__init__.py", line 1, in <module> from apport.report import Report File "/usr/lib/python2.7/dist-packages/apport/report.py", line 18, in <module> import problem_report File "/usr/lib/python2.7/dist-packages/problem_report.py", line 14, in <module> import zlib, base64, time, sys, gzip, struct, os File "/usr/lib/python2.7/gzip.py", line 10, in <module> import io File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/io/__init__.py", line 83, in <module> from matlab import loadmat, savemat, byteordercodes File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/io/matlab/__init__.py", line 11, in <module> from mio import loadmat, savemat File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/io/matlab/mio.py", line 15, in <module> from mio4 import MatFile4Reader, MatFile4Writer File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/io/matlab/mio4.py", line 9, in <module> import scipy.sparse File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/sparse/__init__.py", line 180, in <module> from csr import * File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/sparse/csr.py", line 12, in <module> from sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \ File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/sparse/sparsetools/__init__.py", line 4, in <module> from csr import * File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/sparse/sparsetools/csr.py", line 25, in <module> _csr = swig_import_helper() File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/sparse/sparsetools/csr.py", line 17, in swig_import_helper import _csr ImportError: No module named _csr Original exception was: Traceback (most recent call last): File "setup.py", line 208, in <module> setup_package() File "setup.py", line 199, in setup_package configuration=configuration ) File "/usr/lib/python2.7/dist-packages/numpy/distutils/core.py", line 152, in setup config = configuration() File "setup.py", line 136, in configuration config.add_subpackage('scipy') File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 1002, in add_subpackage caller_level = 2) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 971, in get_subpackage caller_level = caller_level + 1) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 908, in _get_configuration_from_setup_py config = setup_module.configuration(*args) File "scipy/setup.py", line 8, in configuration config.add_subpackage('integrate') File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 1002, in add_subpackage caller_level = 2) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 971, in get_subpackage caller_level = caller_level + 1) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 908, in _get_configuration_from_setup_py config = setup_module.configuration(*args) File "scipy/integrate/setup.py", line 10, in configuration blas_opt = get_info('blas_opt',notfound_action=2) File "/usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py", line 320, in get_info return cl().get_info(notfound_action) File "/usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py", line 471, in get_info raise self.notfounderror(self.notfounderror.__doc__) numpy.distutils.system_info.BlasNotFoundError: Blas (http://www.netlib.org/blas/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable. sudo python setup.py install --prefix=/usr/local # installs to /usr/local michael@michael-laptop-ubuntu:~/Downloads/scipy-0.11.0rc1$ sudo python setup.py install --prefix=/usr/local [sudo] password for michael: Running from scipy source directory. blas_opt_info: blas_mkl_info: libraries mkl,vml,guide not found in /usr/local/lib libraries mkl,vml,guide not found in /usr/lib libraries mkl,vml,guide not found in /usr/lib/i386-linux-gnu NOT AVAILABLE atlas_blas_threads_info: Setting PTATLAS=ATLAS libraries ptf77blas,ptcblas,atlas not found in /usr/local/lib libraries ptf77blas,ptcblas,atlas not found in /usr/lib/sse2 libraries ptf77blas,ptcblas,atlas not found in /usr/lib libraries ptf77blas,ptcblas,atlas not found in /usr/lib/i386-linux-gnu/sse2 libraries ptf77blas,ptcblas,atlas not found in /usr/lib/i386-linux-gnu NOT AVAILABLE atlas_blas_info: libraries f77blas,cblas,atlas not found in /usr/local/lib libraries f77blas,cblas,atlas not found in /usr/lib/sse2 libraries f77blas,cblas,atlas not found in /usr/lib libraries f77blas,cblas,atlas not found in /usr/lib/i386-linux-gnu/sse2 libraries f77blas,cblas,atlas not found in /usr/lib/i386-linux-gnu NOT AVAILABLE /usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py:1423: UserWarning: Atlas (http://math-atlas.sourceforge.net/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [atlas]) or by setting the ATLAS environment variable. warnings.warn(AtlasNotFoundError.__doc__) blas_info: libraries blas not found in /usr/local/lib libraries blas not found in /usr/lib libraries blas not found in /usr/lib/i386-linux-gnu NOT AVAILABLE /usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py:1432: UserWarning: Blas (http://www.netlib.org/blas/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable. warnings.warn(BlasNotFoundError.__doc__) blas_src_info: NOT AVAILABLE /usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py:1435: UserWarning: Blas (http://www.netlib.org/blas/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [blas_src]) or by setting the BLAS_SRC environment variable. warnings.warn(BlasSrcNotFoundError.__doc__) Traceback (most recent call last): File "setup.py", line 208, in <module> setup_package() File "setup.py", line 199, in setup_package configuration=configuration ) File "/usr/lib/python2.7/dist-packages/numpy/distutils/core.py", line 152, in setup config = configuration() File "setup.py", line 136, in configuration config.add_subpackage('scipy') File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 1002, in add_subpackage caller_level = 2) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 971, in get_subpackage caller_level = caller_level + 1) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 908, in _get_configuration_from_setup_py config = setup_module.configuration(*args) File "scipy/setup.py", line 8, in configuration config.add_subpackage('integrate') File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 1002, in add_subpackage caller_level = 2) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 971, in get_subpackage caller_level = caller_level + 1) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 908, in _get_configuration_from_setup_py config = setup_module.configuration(*args) File "scipy/integrate/setup.py", line 10, in configuration blas_opt = get_info('blas_opt',notfound_action=2) File "/usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py", line 320, in get_info return cl().get_info(notfound_action) File "/usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py", line 471, in get_info raise self.notfounderror(self.notfounderror.__doc__) numpy.distutils.system_info.BlasNotFoundError: Blas (http://www.netlib.org/blas/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable. Error in sys.excepthook: Traceback (most recent call last): File "/usr/lib/python2.7/dist-packages/apport_python_hook.py", line 64, in apport_excepthook from apport.fileutils import likely_packaged, get_recent_crashes File "/usr/lib/python2.7/dist-packages/apport/__init__.py", line 1, in <module> from apport.report import Report File "/usr/lib/python2.7/dist-packages/apport/report.py", line 18, in <module> import problem_report File "/usr/lib/python2.7/dist-packages/problem_report.py", line 14, in <module> import zlib, base64, time, sys, gzip, struct, os File "/usr/lib/python2.7/gzip.py", line 10, in <module> import io File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/io/__init__.py", line 83, in <module> from matlab import loadmat, savemat, byteordercodes File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/io/matlab/__init__.py", line 11, in <module> from mio import loadmat, savemat File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/io/matlab/mio.py", line 15, in <module> from mio4 import MatFile4Reader, MatFile4Writer File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/io/matlab/mio4.py", line 9, in <module> import scipy.sparse File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/sparse/__init__.py", line 180, in <module> from csr import * File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/sparse/csr.py", line 12, in <module> from sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \ File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/sparse/sparsetools/__init__.py", line 4, in <module> from csr import * File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/sparse/sparsetools/csr.py", line 25, in <module> _csr = swig_import_helper() File "/home/michael/Downloads/scipy-0.11.0rc1/scipy/sparse/sparsetools/csr.py", line 17, in swig_import_helper import _csr ImportError: No module named _csr Original exception was: Traceback (most recent call last): File "setup.py", line 208, in <module> setup_package() File "setup.py", line 199, in setup_package configuration=configuration ) File "/usr/lib/python2.7/dist-packages/numpy/distutils/core.py", line 152, in setup config = configuration() File "setup.py", line 136, in configuration config.add_subpackage('scipy') File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 1002, in add_subpackage caller_level = 2) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 971, in get_subpackage caller_level = caller_level + 1) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 908, in _get_configuration_from_setup_py config = setup_module.configuration(*args) File "scipy/setup.py", line 8, in configuration config.add_subpackage('integrate') File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 1002, in add_subpackage caller_level = 2) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 971, in get_subpackage caller_level = caller_level + 1) File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 908, in _get_configuration_from_setup_py config = setup_module.configuration(*args) File "scipy/integrate/setup.py", line 10, in configuration blas_opt = get_info('blas_opt',notfound_action=2) File "/usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py", line 320, in get_info return cl().get_info(notfound_action) File "/usr/lib/python2.7/dist-packages/numpy/distutils/system_info.py", line 471, in get_info raise self.notfounderror(self.notfounderror.__doc__) numpy.distutils.system_info.BlasNotFoundError: Blas (http://www.netlib.org/blas/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable. Moreover, I don't know how to test if this module was installed correctly. Thank
I am attempting to save a response from a text input to a single cell in a sqlite3 database table without writing over the rest of the information stored in that line of the table. I need to do this using the python form I have started below. This form below successfully saves the label responses into their own table. In addition to this, the model for the Applicant table that I want to save the individual cell in is below also. forms.py class LabelResponseForm(forms.Form): def __init__(self, reviewQuestion, applicant, reviewer, edit_existing=False, *args, **kwdargs): self.reviewQuestion = reviewQuestion self.applicant_id = applicant self.reviewer_id = reviewer.eid self.reviewer_name = reviewer.name self.response = None #initial = None super(LabelResponseForm, self).__init__(*args, **kwdargs) response = self.fields['response'] response.label = reviewQuestion response.required = False def save (self, commit=True): if not self.cleaned_data['response']: return labels = ReviewerLabels() applicant = Applicants() # applicant.app_label = self.cleaned_data['response'] labels.rl_response_to_question_id = self.reviewQuestion labels.rl_applicant_id = self.applicant_id labels.rl_reviewer_eid = self.reviewer_id labels.rl_reviewer_name = self.reviewer_name labels.rl_label_text = self.cleaned_data['response'] #embedded sqlite code conn = sqlite3.connect('C:\\engr\\edh\\gradapp\\data\\test.db') cur = conn.cursor() cur.execute("UPDATE admissions_applicants SET app_label = ? WHERE id = ?", (self.cleaned_data['response'], self.applicant_id )) transaction.commit_unless_managed() if commit: labels.save() # applicant.save() return labels class LabelTextAreaResponse(LabelResponseForm): response = forms.CharField(widget=forms.Textarea(attrs={'rows': '2', 'cols': '30'}))` Below is the model for the database table I need to save the input into. I need this specific input saved in the app_label column and it also needs to correspond to the app_eid in that row. models.py class Applicants(models.Model): app_eid = models.CharField(max_length=8) # other model fields app_gpa = models.FloatField() app_labels = models.CharField(max_length=50) reviewer_tag = models.CharField(max_length=50) degree_obj = models.CharField(max_length=50) app_univ = models.CharField(max_length=100) class_rank = models.CharField(max_length=15) num_reviews = models.SmallIntegerField() pending_reviews = models.SmallIntegerField() avg_score = models.DecimalField(max_digits=2, decimal_places=1) Is there a way to do this? If so, I'd be repeating the same data redirection for the app_gpa, reviewer_tag, degree_obj, app_univ, class_rank, num_reviews, pending_reviews, and avg_score aswell. Any response would be helpful at this point. Currently, I'm trying to embed sqlite code to update the cells in the applicants table. Exception Type: OperationalErrorException Value: no such table: admissions_applicants I'm getting this error saying that this table doesn't exist. I'm confident the database table does exist. Is my syntax wrong for writing these sqlite queries like this in python?
I have a scipy array, e.g. a = array([[0, 0, 1], [1, 1, 1], [1, 1, 1], [1, 0, 1]]) I want to count the number of occurrences of each unique element in the array. For example, for the above array a, I want to get out that there is 1 occurrence of [0, 0, 1], 2 occurrences of [1, 1, 1] and 1 occurrence of [1, 0, 1]. One way I thought of doing it is: from collections import defaultdict d = defaultdict(int) for elt in a: d[elt] += 1 is there a better/more efficient way? thanks.
You are importing all names from the requests module into your local namespace, which means you do not need to prefix them anymore with the module name: >>> from requests import * >>> get <function get at 0x107820b18> If you were to import the module with an import requests statement instead, you added the module itself to your namespace and you do have to use the full name: >>> import requests >>> requests.get <function get at 0x102e46b18> Note that the above examples is what I got from my tests in the interpreter. If you get different results, you are importing the wrong module; check if you have an extra requests.py file in your python package: >>> import requests >>> print requests.__file__ /private/tmp/requeststest/lib/python2.7/site-packages/requests/__init__.pyc You can also test for the name listing provided by the requests module: >>> print dir(requests) ['ConnectionError', 'HTTPError', 'Request', 'RequestException', 'Response', 'Session', 'Timeout', 'TooManyRedirects', 'URLRequired', '__author__', '__build__', '__builtins__', '__copyright__', '__doc__', '__file__', '__license__', '__name__', '__package__', '__path__', '__title__', '__version__', '_oauth', 'api', 'auth', 'certs', 'codes', 'compat', 'cookies', 'defaults', 'delete', 'exceptions', 'get', 'head', 'hooks', 'models', 'options', 'packages', 'patch', 'post', 'put', 'request', 'safe_mode', 'session', 'sessions', 'status_codes', 'structures', 'utils']
I am working on creating an ArcGIS tool from a Python script I am writing. I am wondering if it is possible to have a checkbox parameter. Basically what I want to do is have a parameter where the user selects a feature class, then from the feature class the user will choose the field for the upper most layer in their model, then I want the user to be able to choose what layers they want the script to run on with a checkbox structure derived from the upper most layer field. Is this possible with python, and ArcGIS? To see how to get a checkbox onto the dialog of a Python script tool try using some test code like this: inputString = arcpy.GetParameterAsText(0) inputBoolean = arcpy.GetParameterAsText(1) arcpy.AddMessage("String set to " + inputString) arcpy.AddMessage("Boolean set to " + str(inputBoolean)) Then when you add this script as a tool you will need two Parameters, the first of Data Type String and the second of Data Type Boolean. A sample code for a script tool which will have a single check box. If a check box will be checked by a user, the tool will verify existance of a specified data file. import arcpy input_fc = r'C:\GIS\Temp\data_shp.shp' #getting the input parameter - will become a tool parameter in ArcGIS of Boolean type ischecked = arcpy.GetParameterAsText(0) #Important to convert the check box value to a string first. #Should be 'true' with the small case for 't', #not the 'True' as shown in the Python window in ArcGIS if str(ischecked) == 'true': arcpy.AddMessage("The check box was checked") result = arcpy.Exists(input_fc) #to return 'True' or 'False' depending on whether the data file exists #since it is a Boolean, important to convert it to a string arcpy.AddMessage(str(result)) else: #in this case, the check box value is 'false', user did not check the box arcpy.AddMessage("The check box was not checked") Remember to add a tool parameter of Boolean data type when creating a new script tool in ArcGIS Desktop application. This parameter will be automatically shown as a check box when user runs the tool.
Let's say we want the 8th unrestricted partition or p(8). We generate some generalized pentagonal numbers first. We use: with n = -4, -3, -2, -1, 0, 1, 2, 3, 4 why we go from -4 to 4 will be clearer when you do some these yourself. Anyway, this generates the sequence you only keep numbers whose absolute value is < 8 and do not equal 0. So the sequence becomes: Now the absolute value of each of those numbers is subtracted from 8 to get: p(8) = p(8-1) ± p(8-2) ± p(8-5) ± p(8-7) that becomes p(8) = p(7) ± p(6) ± p(3) ± p(1) To get the correct signs instead of ± , look at the sequence. If the number that is subtracted from 8 is positive then the ± becomes + if it is negative then it becomes negative. In mathematics, you don't understand things. You just get used to them.I have the result, but I do not yet know how to get it.All physicists, and a good many quite respectable mathematicians are contemptuous about proof. Offline Wikipedia provides this program: pentagonal = lambda n : n*(3*n-1)/2 def generalised_pentagonal(n): # 0, 1, -1, 2, -2 if n < 0: return 0 if n%2 == 0: return pentagonal(n/2+1) else: return pentagonal(-(n/2+1)) pt = [1] for n in range (1, 1000+1): r = 0 f = -1 i = 0 while 1: k = generalised_pentagonal(i) if k > n: break if i%2==0: f = -f r += f*pt[n - k] i += 1 pt.append(r) print pt I just replaced the condition of the loop with what I require I got the answer within a few seconds 'And fun? If maths is fun, then getting a tooth extraction is fun. A viral infection is fun. Rabies shots are fun.' 'God exists because Mathematics is consistent, and the devil exists because we cannot prove it' 'Humanity is still kept intact. It remains within.' -Alokananda Offline Hi; Did you understand what I did in post #26? In mathematics, you don't understand things. You just get used to them.I have the result, but I do not yet know how to get it.All physicists, and a good many quite respectable mathematicians are contemptuous about proof. Offline Yes! 1. To calculate a partition, we have to calculate(some of) the previous partitions. 2. We need to know that p(1) = 1 I can understand how we use it, but not the principle behind it Last edited by Agnishom (2012-09-15 17:17:43) 'And fun? If maths is fun, then getting a tooth extraction is fun. A viral infection is fun. Rabies shots are fun.' 'God exists because Mathematics is consistent, and the devil exists because we cannot prove it' 'Humanity is still kept intact. It remains within.' -Alokananda Offline Hi Agnishom; Have you seen my signature? That is from a top mathematician. If he has trouble understanding some things what can be expected of us? Fortunately, if you understand the mechanics of a math technique or algorithm you can then use it. I gave you the how it works stuff, the why it works is much tougher and I do not fully understand it. Incidentally, I only showed this rather inefficient technique as a means to solve that specific problem. In mathematics, you don't understand things. You just get used to them.I have the result, but I do not yet know how to get it.All physicists, and a good many quite respectable mathematicians are contemptuous about proof. Offline Grand Idea! 'And fun? If maths is fun, then getting a tooth extraction is fun. A viral infection is fun. Rabies shots are fun.' 'God exists because Mathematics is consistent, and the devil exists because we cannot prove it' 'Humanity is still kept intact. It remains within.' -Alokananda Offline By the Way, Once upon a time you had a signature about wastage of time and money. Would you please explain that? 'And fun? If maths is fun, then getting a tooth extraction is fun. A viral infection is fun. Rabies shots are fun.' 'God exists because Mathematics is consistent, and the devil exists because we cannot prove it' 'Humanity is still kept intact. It remains within.' -Alokananda Offline Hi Agnishom; I do not remember that one, can you give me a hint? In mathematics, you don't understand things. You just get used to them.I have the result, but I do not yet know how to get it.All physicists, and a good many quite respectable mathematicians are contemptuous about proof. Offline Something like this: How often do I see money and time being wasted.............. Then You told something about calculations.... 'And fun? If maths is fun, then getting a tooth extraction is fun. A viral infection is fun. Rabies shots are fun.' 'God exists because Mathematics is consistent, and the devil exists because we cannot prove it' 'Humanity is still kept intact. It remains within.' -Alokananda Offline Hi Agnishom; That is very good that you remember that much. I wished I could remember the entire quote and who said it. If I do I will explain. In mathematics, you don't understand things. You just get used to them.I have the result, but I do not yet know how to get it.All physicists, and a good many quite respectable mathematicians are contemptuous about proof. Offline At that time I never understood what they were as I did not have the concept of Signatures. They seemed very confusing to me Big discoveries are protected by public incredulity. What is the meaning of public incredulity? 'And fun? If maths is fun, then getting a tooth extraction is fun. A viral infection is fun. Rabies shots are fun.' 'God exists because Mathematics is consistent, and the devil exists because we cannot prove it' 'Humanity is still kept intact. It remains within.' -Alokananda Offline It means, that big discoveries are looked at with disbelief by the general public. “Here lies the reader who will never open this book. He is forever dead. “Taking a new step, uttering a new word, is what people fear most.” ― Fyodor Dostoyevsky, Crime and Punishment Offline Hi all; That is close. In mathematics, you don't understand things. You just get used to them.I have the result, but I do not yet know how to get it.All physicists, and a good many quite respectable mathematicians are contemptuous about proof. Offline How close? “Here lies the reader who will never open this book. He is forever dead. “Taking a new step, uttering a new word, is what people fear most.” ― Fyodor Dostoyevsky, Crime and Punishment Offline Pretty close... In mathematics, you don't understand things. You just get used to them.I have the result, but I do not yet know how to get it.All physicists, and a good many quite respectable mathematicians are contemptuous about proof. Offline Where do the La***ta files come in? “Here lies the reader who will never open this book. He is forever dead. “Taking a new step, uttering a new word, is what people fear most.” ― Fyodor Dostoyevsky, Crime and Punishment Offline There is a difference between how Marshall McLuhan and I interpret that quote. I did not know him personally or attend the parties he did or hang in his social set. Therefore I do not know what secrets he was exposed to. In mathematics, you don't understand things. You just get used to them.I have the result, but I do not yet know how to get it.All physicists, and a good many quite respectable mathematicians are contemptuous about proof. Offline
I am runninig test's with Python Unittest. I am running tests but I want to do negative testing and I would like to test if a function throw's an exception, it passes but if no exception is thrown the test fail's. The script I have is: try: result = self.client.service.GetStreamUri(self.stream, self.token) self.assertFalse except suds.WebFault, e: self.assertTrue else: self.assertTrue This alway's passes as True even when the function work's perfectly. I have also tried various other way's including: try: result = self.client.service.GetStreamUri(self.stream, self.token) self.assertFalse except suds.WebFault, e: self.assertTrue except Exception, e: self.assertTrue Does anyone have any suggestions? Thanks I have tried assertRaises with no luck. try: result = self.client.service.GetStreamUri(self.stream, self.token) self.assertRaises(WebFault) except suds.WebFault, e: self.assertFalse except Exception, e: self.assertTrue It still passes. For some reason it does not try and do the assertRaises statement. I have also tried: (The function should fail but the test should pass) try: result = self.client.service.GetStreamUri(self.stream, self.token) except suds.WebFault, e: self.assertFalse except Exception, e: self.assertTrue else: self.assertFalse For some reason even when the function passes it will not raise an error. It always no matter what goes to Exception. Unless there is an else statement then it goes to that. Found a way that work's but it seem's a very poor way of doing things: Can anyone suggest a cleaner way of doing this? try: result = self.client.service.GetStreamUri(self.stream, self.token) except suds.WebFault, e: self.assertFalse except Exception, e: pass try: result==result except: result=None if result==None: assert True else: assert False
What's the most elegant way to check if the directory a file is going to be written to exists, and if not, create the directory? Here is what I tried: filename = "/my/directory/filename.txt" dir = os.path.dirname(filename) try: os.stat(dir) except: os.mkdir(dir) f = file(filename) Somehow, I missed os.path.exists (thanks kanja, Blair, and Douglas). This is what I have now: def ensure_dir(f): d = os.path.dirname(f) if not os.path.exists(d): os.makedirs(d) Is there a flag for "open", that makes this happen automatically?
I'm having trouble overriding a ModelForm save method. This is the error I'm receiving: Exception Type: TypeError Exception Value: save() got an unexpected keyword argument 'commit' My intentions are to have a form submit many values for 3 fields, to then create an object for each combination of those fields, and to save each of those objects. Helpful nudge in the right direction would be ace. File models.py class CallResultType(models.Model): id = models.AutoField(db_column='icontact_result_code_type_id', primary_key=True) callResult = models.ForeignKey('CallResult', db_column='icontact_result_code_id') campaign = models.ForeignKey('Campaign', db_column='icampaign_id') callType = models.ForeignKey('CallType', db_column='icall_type_id') agent = models.BooleanField(db_column='bagent', default=True) teamLeader = models.BooleanField(db_column='bTeamLeader', default=True) active = models.BooleanField(db_column='bactive', default=True) File forms.py from django.forms import ModelForm, ModelMultipleChoiceField from callresults.models import * class CallResultTypeForm(ModelForm): callResult = ModelMultipleChoiceField(queryset=CallResult.objects.all()) campaign = ModelMultipleChoiceField(queryset=Campaign.objects.all()) callType = ModelMultipleChoiceField(queryset=CallType.objects.all()) def save(self, force_insert=False, force_update=False): for cr in self.callResult: for c in self.campain: for ct in self.callType: m = CallResultType(self) # this line is probably wrong m.callResult = cr m.campaign = c m.calltype = ct m.save() class Meta: model = CallResultType File admin.py class CallResultTypeAdmin(admin.ModelAdmin): form = CallResultTypeForm
My app has a GtkFileChooserButton that you can use to open a chooser widget and pick a single file .. and then perform operations on that file. This works. I've added drag & drop functionality to the button as well. It works, but it's buggy. In short, the first dnd to the FileChooserButton triggers the file-set signal and appears to have worked, -- the name changes from "( None )" to the file's name and if you then click on the button, it pops up a chooser widget showing the proper file selected; HOWEVER, as you can see from the debug output below, get_filename() does not return the filename! Not the first time, anyway. The second time we dnd a file onto the button (whether it's a different file or the same file again), all goes well and get_filename() returns the file's name. Why? Here's the debug output from my program when I drag three files on to the button, one at a time: [on_file-set] FileChooserButton.get_filename() output: None [on_file-set] FileChooserButton.get_filename() output: '/home/ryran/Desktop/priv.txt' [on_file-set] FileChooserButton.get_filename() output: '/home/ryran/Desktop/kinamppost' PS: When I did this, the 1st and 2nd dnd were actually the same file. If you want to see the full app in action, it's at http://github.com/ryran/pyrite, and I'd love to post code, but there's not really anything to post!! I'm not doing drag_dest_set() because FileChooserButton already supports dnd. So ALL I'm doing is defining a cb for the FileChooserButton's file-set signal. So uhh.. here's that: def action_chooserbtn_file_set(self, widget): print "[on_file-set] FileChooserButton.get_filename() output:\n{!r}\n".format(widget.get_filename()) For the record I also tried doing all this in concert with defining drag_dest_set, but came up with the same results. What else to say? I'm starting to think this is a bug.
Imagine to have this code: class Foo: def __init__(self, active): self.active = active def doAction(self): if not self.active: return # do something f=Foo(false) f.doAction() # does nothing This is a nice code; I actually have (not in Python) a global active variable called "dosomething" and a routine called "something," where the first thing happening inside the "something" routine is if not dosomething return. An alternative implementation would call to a routine that always performs an action, and the flag is checked at invocation time, as in the following code: class Foo: def doAction(self): # do something doaction = False f=Foo() if doaction: f.doAction() What is your opinion on this? I personally find that the first solution violates the least surprise principle: The caller is invoking an action which is never performed in response to a status which has been set somewhere else, but from the code it looks like the action is performed. Would you consider it a total pattern, a total antipattern, or just an option with no strong opinion for or against it?
I am writing a distributed data-store in Python for a very specific kind of data,and I wanted to show how you can build a simple distributed system in Python. For this post we will build a distributed log,This system allows you to store logs from many servers into one big log which is distributed between many machines. We start by giving a overview of the components of the system: This system have two major components,The master node and the storage nodes. Storage nodes are simple dumb nodes that store and retrieve data when needed. The master node is responsible for accepting writes from servers and sending those writes to the storage node in a way that the data get distributed to all the nodes. When a server sends his log entries to the master,the following happens: master gets the K log entries and select N(the replication factor) nodes that will store the entries master send a copy of the K entries to the N nodes and waits for them to write it to their local store(be it memory or disk) master then register in his local datastructure that those N nodes have those K entries. Also the master is responsible for client requests,when a client wants to read a portion of the log the following happens: he sends a request to the master with a time range that he wants to read. the master decided which nodes have the information and ask for each node the entries. the master combine all the information and send it back to the client. This design(single master,multiple slaves) have pros and cons. Pros: simple design,the master coordinates both read and writes. complex operations can be implemented in the master,like garbage collecting. the master have a consist view of the cluster and can make elaborated decisions on data placement. Cons: SPOF(single point of failure) - in case the master is down then all logs are inaccessible,this problem can be solved using a shadow master(like it is done In Google File System) or by using a multi-master/no-master design(out of the scope of this post). the cluster size is limited by the master capacity,for example there is a limit on the amount of RAM(or disk) a master have to keep track of which node have each log entry. the amount of read/write load is limited by the master as all writes and reads must go to the master first. from optparse import OptionParser from wsgiref import simple_server from wsgi_jsonrpc import json_tools,WSGIJSONRPCApplication class StorageNode(object):     def __init__(self):         self.entries = []         self.dt_to_entries = {}     def add_entries(self,data,dt):         self.entries.append(data)         self.dt_to_entries[dt] = len(self.entries) - 1     def get_entries(self,dts):         return [(self.entries[self.dt_to_entries[dt]],dt) for dt in dts] if __name__ == "__main__":      parser = OptionParser()   parser.add_option("-m", "--master-port", dest="master_port", help="master port",type="int")   parser.add_option("-p", "--port", dest="port",help="storage port", type="int")   (options, args) = parser.parse_args()   node = StorageNode()   print "joining master..."   master = json_tools.ServerProxy( "http://localhost:%d" % options.master_port)     try:     master.join(options.port)     print "joining completed,serving..."     server = simple_server.make_server('localhost', options.port, WSGIJSONRPCApplication(instance=node))     server.serve_forever()   except Exception,e:     print "not working",e The storage node accepts two arguments,master port and port,the storage node sends a join request to the master when it start with his port so the master can communicate with the storage node. The storage node have two functions,add_entires which is used to write new log entries to the local store(in this case a list) and get_entries which is used to retrieve entries by timestamps(dts parameter). Now for the master node code: from optparse import OptionParser from random import sample from collections import defaultdict from wsgiref import simple_server from wsgi_jsonrpc import json_tools,WSGIJSONRPCApplication def get_proxy(port):     return json_tools.ServerProxy("http://localhost:%d" %port) class MasterNode(object):     def __init__(self,options):         self.options = options         self.servers = set()         self.dt_to_servers = {}         self.dts = []     def join(self,port):         self.servers.add(port)     def add_entries(self,data,dt):         print data,dt         servers = sample(self.servers,options.replication_factor)         for server in servers:             get_proxy(server).add_interval(data,dt)         self.dt_to_servers[dt] = servers         self.dts.append(dt)     def get_range(self,start_dt,end_dt):         #should use binary search!         servers_dts = defaultdict(list)         for dt in self.dts:           if dt >= start_dt and dt <= end_dt:             servers_dts[sample(self.dt_to_servers[dt],1)[0]].append(dt)         dts_data = {}         for server,dts in servers_dts.iteritems():           response = get_proxy(server).get_intervals(dts)           for data,dt in response['result']:             dts_data[dt] = data         return dts_data if __name__ == "__main__":      parser = OptionParser()     parser.add_option("-p", "--port", dest="port",help="port",type="int")   parser.add_option("-r", "--replication-factor", dest="replication_factor",help="the replication factor for entries", type="int",default=3)   (options, args) = parser.parse_args()   print "running..."   server = simple_server.make_server('localhost', options.port, WSGIJSONRPCApplication(instance=MasterNode(options)))   server.serve_forever() The master have a join method,this is the method that is called by the storage nodes when joining the cluster,the master register the ports of the storage nodes in the servers set. The other two method are: get_range - return all the entires between start_dt and end_dt add_entries - add the entries that happened in dt(timestamp) to the distributed store. Both of them do it in a distributed matter,adding a entry get copied to N(the replication factor) different storage node. Getting entries by a datetime range is done by getting the entries from the nodes that contain the data. This implementation have some limitations: it assumes that all nodes are in the same machine on different ports(this is done on purpose since this is a demonstration),it can be easily fixed by storing full addresses instead of ports. it doesn`t detect dead nodes,it will send requests to dead nodes this can be fixed by adding a heartbeat system to check each node state. new nodes that join the cluster start getting data written to them but they should get data from other nodes first in order to balance the load in the cluster(this is called bootstrapping). All in all I think this is a good start for anyone who is looking to build a distributed store.
I have problem with separating tables with relationships in different files. I want the tables below to be in three separate files and to import TableA in third party page, but I can not manage the load order. In most of the time I'm receiving the following error. sqlalchemy.exc. InvalidRequestError: When initializing mapper Mapper|TableA|tablea, expression 'TableB' failed to locate a name ("name 'TableB' is not defined"). If this is a classname, consider adding this relationship() to the class after both dependent classes have been defined. class TableA(Base): __tablename__ = "tablea" id = Column(Integer, primary_key=True) name = Column(String) tableB = relationship("TableB", secondary = TableC.__table__) class TableB(Base): __tablename__ = "tableb" id = Column(Integer, primary_key=True) name = Column(String) class TableC(Base): __tablename__ = "tableab" tableAId = Column("table_a_id", Integer, ForeignKey("TableA.id"), primary_key=True) tableBId = Column("table_b_id", Integer, ForeignKey("TableB.id"), primary_key=True)
I am running CherryPy as a webserver on a remote Linux machine. End users access a website over the internet which the CherryPy instance serves. So far, so good. Now, I want to have a dev version of the site, running on the same machine but on a different port, so that I can develop and test without disturbing the prod website. I need to do development on the same machine because it contains a lot of data which would not be practical to move to a different machine. The code for the dev instance is in a different directory, of course, so as I edit the dev code it does not trigger a reload for the prod instance or affect the prod instance in any way. However, if I configure my dev instance to run on a different port than the prod instance and try to start up the dev instance, I get: Traceback (most recent call last): File "/usr/lib/python2.7/dist-packages/cherrypy/process/wspbus.py", line 197, in publish output.append(listener(*args, **kwargs)) File "/usr/lib/python2.7/dist-packages/cherrypy/_cpserver.py", line 151, in start ServerAdapter.start(self) File "/usr/lib/python2.7/dist-packages/cherrypy/process/servers.py", line 167, in start wait_for_free_port(*self.bind_addr) File "/usr/lib/python2.7/dist-packages/cherrypy/process/servers.py", line 410, in wait_for_free_port raise IOError("Port %r not free on %r" % (port, host)) IOError: Port 8080 not free on '127.0.0.1' Neither the dev nor prod instance are running on port 8080. The prod instance is on SSL (443) and the dev instance I can put on an arbitrary port (let's suppose it's on 98765). I haven't specified 8080 anywhere in any config file. So I didn't expect there to be a port conflict, However, it appears that for one instance, CherryPy spawns 2 processes- I guess there is some sort of parent-child relationship- and one of the processes is somehow listening on localhost:8080. Because this happens for both instances I try to launch, the second one is unable to launch because there ends up being this conflict on 8080. I have tried several things, such as turning off autoreloading and timeout monitoring, in the hopes that CherryPy would not launch the process associated with 8080, but no luck so far. Thanks in advance for any help!
Confirmed. Thanks for your feedback. Here's patch to fix it: diff -r 4cf524236552 libs/ldaplib/user.py --- libs/ldaplib/user.py Tue May 03 10:07:17 2011 +0800 +++ libs/ldaplib/user.py Wed May 04 00:22:32 2011 +0800 @@ -833,10 +833,8 @@ if self.transport == '': # Remove attr. mod_attrs += [(ldap.MOD_REPLACE, 'mtaTransport', None)] - elif self.transport != self.defaultTransport: + else: mod_attrs += [(ldap.MOD_REPLACE, 'mtaTransport', self.transport)] - else: - pass # Enabled services. self.enabledService = [ Steps to apply this patch: - Save above code as file "transport.patch". - Upload to your server which has iRedAdmin-Pro-LDAP-1.5.0 running. We assume it's /tmp/transport.patch. - Change current working directory to root directory of iRedAdmin-Pro-LDAP-1.5.0. e.g. # cd /var/www/iRedAdmin-Pro-LDAP-1.5.0/ - Verify patch, make sure it can be applied without error: # patch -p0 --dry-run < /tmp/transport.patch patching file libs/ldaplib/user.py If you got same output message as above, it's safe to apply it with below command. If not, please ***STOP*** here and do ***NOT*** try below command. - Patch it: # patch -p0 < /tmp/transport.patch patching file libs/ldaplib/user.py
Test-Driven Development in Python by Jason Diamond 12/02/2004 Introduction Python's unittest Module Motivation Sample Input Getting Started Baby Steps Refactoring Conclusion Introduction Test-driven development is not about testing. Test-driven development is about development (and design), specifically improving the quality and design of code. The resulting unit tests are just an extremely useful by-product. That's all I'm going to tell you about test-driven development. The rest of this article will show you how it works. Come work on a project with me; we'll build a very simple tool together. I'll make mistakes, fix them, and change designs in response to what the tests tell me. Along the way, we'll throw in a few refactorings, design patterns, and object-oriented design principles. To make this project fun, we'll do it in Python. Python is an excellent language for test-driven development because it (usually) does exactly what you want it to without getting in your way. The standard library even comes with everything you need in order to start developing TDD-style. I assume that you're familiar with Python but not necessarily familiar with test-driven development or Python's unittest module. You need to know only a little in order to start testing. Python's unittest Module Since version 2.1, Python's standard library has included a unittest module, based on JUnit (by Kent Beck and Erich Gamma), the de facto standard unit test framework for Java developers. Formerly known as PyUnit, it also runs on Python versions prior to 2.1 with a separate download. Let's jump right in. Here's a "unit" and its tests--all in one file: import unittest # Here's our "unit". def IsOdd(n): return n % 2 == 1 # Here's our "unit tests". class IsOddTests(unittest.TestCase): def testOne(self): self.failUnless(IsOdd(1)) def testTwo(self): self.failIf(IsOdd(2)) def main(): unittest.main() if __name__ == '__main__': main() Lights Throughout this article, I'll use a traffic light to show the state of the tests. Green indicates that the tests pass, and red warns that they fail. A shining yellow light indicates a problem that prevents us from completing a test. TDD practitioners often talk about receiving a "green light" or "green bar" from the graphical test runner that comes with JUnit. Methods whose names start with the string test with one argument (self) in classes derived from unittest.TestCase are test cases. In the above example, testOne and testTwo are test cases. Grouping related test cases together, test fixtures are classes that derive from unittest.TestCase. In the above example, IsOddTests is a test fixture. This is true even though IsOddTests derives from a class called TestCase, not TestFixture. Trust me on this. Test fixtures can contain setUp and tearDown methods, which the test runner will call before and after every test case, respectively. Having a setUp method is the real justification for fixtures, because it allows us to extract common setup code from multiple test cases into the one setUp method. In Python we typically don't need a tearDown method, because we can usually rely on Python's garbage collection facilities to clean up our objects for us. When testing against a database, however, tearDown could be useful for closing connections, deleting tables, and so on. Looking back at our example, the main function defined in the unittest module makes it possible to execute the tests in the same manner as executing any other script. This function examines sys.argv, making it possible to supply command-line arguments to customize the test output or to run only specific fixtures or cases (use --help to see the arguments). The default behavior is to run all test cases in all test fixtures found in the file containing the call to unittest.main. Executing the test script above should produce output that resembles: .. ---------------------------------------------------------------------- Ran 2 tests in 0.000s OK If the second test had failed, the output would have looked something like this: .F ====================================================================== FAIL: testTwo (__main__.IsOddTests) ---------------------------------------------------------------------- Traceback (most recent call last): File "C:\jason\projects\tdd-py\test.py", line 14, in testTwo self.failIf(IsOdd(2)) File "C:\Python23\lib\unittest.py", line 274, in failIf if expr: raise self.failureException, msg AssertionError ---------------------------------------------------------------------- Ran 2 tests in 0.000s FAILED (failures=1) Typically, we wouldn't have the tests and the unit being tested in the same file, but it doesn't hurt to start out that way and then extract the code or the tests later. Motivation Guess what I have trouble remembering to do: 0 0 * * * [ `date +\%m` -ne `date -d +4days +\%m` ] \ && mail -s 'Pay the rent!' me-and-my-wife@example.org < /dev/null Related Reading That little puzzle is a line out of my crontab that emails me a reminder to pay the rent on the last four days of each month. Pathetic? Probably. It works, though. I haven't been late paying rent since I started using it. As clever as I thought I was for coming up with this, it wasn't practical for everything--especially for events that occur only once. Also, there's no way I could teach my wife enough bash scripting techniques in order to add a reminder to our calendar. Most people use a good old-fashioned wall calendar for this type of thing. That's not techno-geeky enough for me. I could use Outlook or Evolution or some productivity application, but that would open up a whole new can of worms. We don't use just one computer. We both use multiple computers and operating systems at home and at work. How could we easily synchronize all of those machines? It was after realizing that our email is available to us no matter where we were that I hit upon the motivation for my project. The email reminding me to pay the rent was with me no matter what machine I'm on because I always check my email via IMAP, so my email is accessible from everywhere. Why not email the upcoming events in my calendar to me just like my reminder to pay the rent? Brilliant, I thought. I know just the tools that can do this, too: the BSD calendar application and the new kid on the block, pal. My wife and I have a private wiki that we use for keeping track of notes. It's great. Despite the fact that my wife's an accountant and not a geek, she has no trouble using it. I figured we could use the wiki to edit our calendar file. I would write a little cron job to fetch the calendar file--probably using wget--from the wiki and pipe that into whatever tool best fit our needs. Unfortunately, after looking at both calendar and pal, I discovered that neither was what I was looking for. The calendar file format requires a <tab> character between dates and descriptions. Since I wanted to use our personal wiki to edit the calendar file, inserting <tab> characters would be an issue (upon hitting <tab>, focus jumps out of the text area to the next form control). calendar also doesn't support any of the fancy output options that pal does. The pal format was much too geeky for even me to want to use, and it didn't support the one really important use case I had so far: setting a reminder for the last day of the month. Sample Input My wife and I sat down and came up with something both of us would want to use. Here are some examples: 30 Nov 2004: Dinner with the Darghams. April 10: Happy Anniversary! Wednesday: Piano lesson. Last Thursday: Goody night at book study. Yum. -1: Pay the rent! Unlike the calendar format, a colon separates dates from descriptions. How Pythonic. Like the calendar format, omitted certain fields are wildcards. The April 10 event happens every year. The Last Thursday event happens on the final Thursday of every month of every year. The -1 event happens on the last day of every month of every year too. I took this idea from Python's array subscript syntax, where foo[-1] selects the last element in the foo array. I thought it was a little geeky, but my wife understood it right away. My goal is to write a small application that can run from cron to read a file in this format and email my wife and me the events we have scheduled for the next seven days. That shouldn't be too hard, should it? From this point on, I'm writing this article in real time, having contrived nothing. I didn't write the code first and then write the article--I'm writing the article as I write the code. Yes, I expect to make mistakes. In fact, I'm counting on it. Making mistakes is the best way to learn. Getting Started Being test infected means that I must write this tool by writing all of my unit tests before writing the code I expect the tests to exercise. The first thing I do when starting a new project is to create an empty fixture that fails: import unittest class FooTests(unittest.TestCase): def testFoo(self): self.failUnless(False) def main(): unittest.main() if __name__ == '__main__': main() I do this out of habit, just to make sure I have everything typed in correctly and to test that the test runner can find the fixture. Notice the class named FooTests and its testFoo method. At this point I have no idea what I'm going to test first. I just want to make sure that I have everything ready once things get going. Let's start out easy and test the first example from above with the full day, month, and year specified for the event. In order to create this test, I need know what to test. Am I testing a class? A function? This is where we put on our designer hats for a brief moment and try to use our experience and intuition to come up with some piece to the puzzle that will help us reach our goal. It's OK if we make a mistake here; the tests will reveal that right away, before we invest too much in this design. We certainly don't want to draft any documents filled with diagrams. Save those for later, after we have a clue about what will actually work. For this project, I should probably create objects that can say whether they "match" a given date. These objects will act as a "pattern" for dates. (I'm using regular expressions as a metaphor here.) Eventually, I'll have to write a parser that will read in a file and create these pattern objects, but I'll do that later. These pattern objects are probably an easier place to start. There might be multiple types of patterns--but I won't think about that now, because I could be wrong. Instead, I'll start coding so I can let it tell me what it wants to become: def testMatches(self): p = DatePattern(2004, 9, 28) d = datetime.date(2004, 9, 28) self.failUnless(p.matches(d)) Notice that I changed the name of the method from testFoo to something more appropriate, because I now have an idea about what to test. I've also invented a class name, DatePattern, and a method name, matches. (The datetime module is part of Python 2.3 and up--I had to import it at the top of my file in order to use it.) This test, of course, fails miserably--the DatePattern class doesn't even exist yet! But I at least know now the name of the class I need to implement. I also know the name and signature of one of its methods and the signature for its __init__ method. Here's what I can do with this knowledge: class DatePattern: def __init__(self, year, month, day): pass def matches(self, date): return True Now the test passes! It's time to move on to the next test. You probably think I'm joking, don't you? I'm not. Baby Steps Test-driven development is best when you move in the smallest possible increments. You should only be writing code that makes the current failing test case(s) pass. Once the tests pass, you're done writing code. Stop! The above code is worthless, right? It basically says that every pattern matches every date. How can I justify spending the time to come up with a "real" implementation? By adding another test: def testMatchesFalse(self): p = DatePattern(2004, 9, 28) d = datetime.date(2004, 9, 29) self.failIf(p.matches(d)) We now have one passing test and one failing test. I could change the matches method to return False in order to make this new test case pass, but that would break the old one! I now have no choice but to implement DatePattern correctly so that both tests can pass. Here's what I came up with: class DatePattern: def __init__(self, year, month, day): self.date = datetime.date(year, month, day) def matches(self, date): return self.date == date Both tests now pass. Woo-hoo! I'm not happy with the DatePattern class, though. So far, it's nothing more than a simple wrapper around Python's date class. Why am I not just using date instances for my "patterns"? It might turn out that the DatePattern class is unnecessary, but I'm not going to make that decision on my own. Instead, I'm going to write another test--one that I think will confirm the necessity of the DatePattern class: def testMatchesYearAsWildCard(self): p = DatePattern(0, 4, 10) d = datetime.date(2005, 4, 10) self.failUnless(p.matches(d)) Voilà! This test fails! Why am I so happy about a failing test? My reasoning is simple: this proves that the current implementation of DatePattern is insufficient. It can't be just a simple wrapper around date and therefore can't be just a date. While typing this test, I had to make a decision about how to represent wildcards. What occurred to me first was to use 0. After all, there's no year 0 (contrary to popular belief), month 0, or day 0. This may not have been the best choice, but I'm going to roll with it for now. It's time to make the new test pass (while making sure not to break the old ones): class DatePattern: def __init__(self, year, month, day): self.year = year self.month = month self.day = day def matches(self, date): return ((self.year and self.year == date.year or True) and self.month == date.month and self.day == date.day) To be honest, I'm already starting to feel like I'll need to do some refactoring as I add more wildcard functionality to the class, but I want to write a few more tests first. Let's add a test where the month is a wildcard: def testMatchesYearAndMonthAsWildCards(self): p = DatePattern(0, 0, 1) d = datetime.date(2004, 10, 1) self.failUnless(p.matches(d)) Fixing matches so that the test passes results in this: def matches(self, date): return ((self.year and self.year == date.year or True) and (self.month and self.month == date.month or True) and self.day == date.day) This method is getting uglier every time we touch it--I'm now positive that it will be my first refactoring victim. I now have a test for using wildcards for both years and months. Will I need one for days? A pattern containing nothing but wildcards would match every day. When would that be useful? At this point I can't think of a reason to support wildcard days, so I won't bother writing a test for it. Because of that, I also won't bother implementing any code to support it in the DatePattern class. Remember, code gets written only when there's a failing test that needs the new code in order to pass. This prevents us from writing code that should not exist in our application, which should help keep it from becoming unnecessarily complex. Let's move on. We need to support events that occur on a specified day of every week: def testMatchesWeekday(self): p = DatePattern( Uh, what now? At this point, I realized that the DatePattern class might not be what I want to use for this test. Its __init__ method doesn't accept a weekday. Should I use a different class, or modify the existing one? I decided to modify the existing one for now, as that will require the least amount of work. If this turns out to be a bad idea, I can always refactor later. def testMatchesWeekday(self): p = DatePattern(0, 0, 0, 2) # 2 is Wednesday d = datetime.date(2004, 9, 29) self.failUnless(p.matches(d)) This doesn't pass because DatePattern.__init__ doesn't accept five arguments (counting self). I modified __init__ to look like this: def __init__(self, year, month, day, weekday=0): self.year = year self.month = month self.day = day self.weekday = weekday I gave weekday a default value so that I wouldn't need to update the other test cases. Everything compiles and runs, but the new test case doesn't pass. The astute reader has probably already realized that I'm now passing in 0 for the day argument. There's the wildcard I didn't think I would need--now I need it! Here's my new matches method: def matches(self, date): return ((self.year and self.year == date.year or True) and (self.month and self.month == date.month or True) and (self.day and self.day == date.day or True) and (self.weekday and self.weekday == date.weekday() or True)) Now all of the components of a pattern allow for wildcards. How very interesting. With this new method, testMatchesWeekday passes but testMatchesFalse now fails! What gives? Refactoring I honestly can't tell why testMatchesFalse fails by looking at the code. This is going to call for some simple debugging. Unfortunately, I tried to cram all of the logic for the matches method into one expression (spanning four lines!), so there's no place for me to insert any print statements to help me see which part is failing. It's finally time to do that refactoring I've been wanting to do. The refactoring I want to apply is the Compose Method from Joshua Kerievsky's excellent book, Refactoring to Patterns. By extracting smaller methods from the current matches method, I can not only make matches clearer but also make it possible to debug whichever part is currently causing me grief. This is the result: def matches(self, date): return (self.yearMatches(date) and self.monthMatches(date) and self.dayMatches(date) and self.weekdayMatches(date)) def yearMatches(self, date): if not self.year: return True return self.year == date.year def monthMatches(self, date): if not self.month: return True return self.month == date.month def dayMatches(self, date): if not self.day: return True return self.day == date.day def weekdayMatches(self, date): if not self.weekday: return True return self.weekday == date.weekday() Code Pickiness I recently read a weblog post by Ian Bicking about what he considers to be code smells in Python code. The matches method is now much clearer, don't you agree? It might seem like a ridiculous thing to do, but writing intention-revealing code is much more important than being clever. I was trying to be too clever before and it caused a bug--one that I wouldn't have come across if I had done this from the beginning. After applying this refactoring and rerunning the tests, I expected to see the testMatchesFalse test still failing, but it's now passing. Somewhere in my original logic I made an error, and I have no idea where it was--I'll leave finding it as an exercise for the reader. In the meantime, not only do I have simpler code now but it also actually works the way I expect it to. Take that! Would I have noticed this bug without tests? I have no doubt that I would, but how long would it have been before I realized that this was a problem? With my unit tests, I noticed it immediately, so I knew exactly what to fix. Wildcards essentially work for all of the components I'm testing so far. This is good, but I think the next test will cause trouble. It starts out innocently enough: def testMatchesLastWeekday(self): p = DatePattern(0, 0, 0, 3 Er, I'm stuck again. In case it's not obvious (and it's not--why didn't Python's datetime module define constants for weekdays?), the 3 represents Thursday. How do I indicate that I only want to match the last Thursday in a month? Do I need to add yet another argument to DatePattern.__init__? This is where that sneaking suspicion in the back of my head is finally starting to warrant some closer attention. I might be trying to cram too much functionality into one class. Conclusion I haven't written much code yet, but that's a good thing, since it seems that the code I have written might not have been sufficient for what I want to do with it. Without the tests, I might not have discovered what a mess I was writing until it was too late. At this point, I haven't invested too much time into the DatePattern class, so I won't feel bad about throwing it away if that's what I'll need to do. I have some ideas about how to restructure the code so that it's as simple and yet as functional as I want it to be, but we're going to have to save those for Part 2 of this article, which will be published shortly. Jason Diamond is a consultant specializing in C++, C#, and XML, and is located in sunny Southern California. Return to the Python DevCenter.
The first thing to do after a successful completion of the file dialog is ask the dialog what the selected pathname was, and then use this to modify the frame's title and to open a BookSet file. Take a look at the next line. It reenables the BookSet menu since there is now a file open. It's really two statements in one and is equivalent to these two lines: menu = self.GetMenuBar() menu.EnableTop(1, true) Since it makes sense to actually let the user see something when they ask to open a file, you should create and show one of the views in the last bits of the OnMenuOpen handler above. We'll take a look at that next. wxListCtrl The Journal view consists of a wxListCtrl with a single-line summary for each transaction. It's placed inside a wxMDIChildFrame and since it's the only thing in the frame, don't worry about setting or maintaining the size, the frame does it automatically. (Unfortunately, since some platforms send the first resize event at different times, sometimes the window shows up without its child sized properly.) Here's a simple workaround: class JournalView(wxMDIChildFrame): def _ _init_ _(self, parent, bookset, editID): wxMDIChildFrame._ _init_ _(self, parent, -1, "") self.bookset = bookset self.parent = parent   tID = wxNewId() self.lc = wxListCtrl(self, tID, wxDefaultPosition, wxDefaultSize, wxLC_REPORT) ## Forces a resize event to get around a minor bug... self.SetSize(self.GetSize())   self.lc.InsertColumn(0, "Date") self.lc.InsertColumn(1, "Comment") self.lc.InsertColumn(2, "Amount")   self.currentItem = 0 EVT_LIST_ITEM_SELECTED(self, tID, self.OnItemSelected) EVT_LEFT_DCLICK(self.lc, self.OnDoubleClick)   menu = parent.MakeMenu(true) self.SetMenuBar(menu) EVT_MENU(self, editID, self.OnEdit) EVT_CLOSE(self, self.OnCloseWindow)   self.UpdateView() Figure 20-11 shows the application is progressing nicely and starting to look like a serious Windows application. The wxListCtrl has many personalities, but they should all be familiar to you. Underneath its wxPython wrappers, it's the same control used in Windows Explorer in the right side panel. All the same options are available: large icons, small icons, list mode, and the report mode used here. You define the columns with their headers and then set some events for the list control. You want to be able to edit the transactions when they are double-clicked, so why are both event handlers needed? The list control sends an event when an item is selected, but it doesn't keep track of double-clicks. The base wxWindow class, on the other hand, reports double-clicks, but it knows nothing about the list control. So by catching both events you can easily implement the functionality you need. Here is the code for the event handlers: def OnItemSelected(self, event): self.currentItem = event.m_itemIndex   def OnDoubleClick(self, event): self.OnEdit() After creating and setting up the list control, you create a menubar for this frame. Here you call the menu-making method in the parent, asking it to add the Edit Transaction menu item. The last thing the _ _init_ _ method does is call a method to fill the list control from the BookSet. We've split this into a separate method so it can be called independently whenever the BookSet data changes. Here's the UpdateView method: def UpdateView(self): self.lc.DeleteAllItems() for x in range(len(self.bookset)): trans = self.bookset[x] self.lc.InsertStringItem(x, trans.getDateString()) self.lc.SetStringItem(x, 1, trans.comment) self.lc.SetStringItem(x, 2, str(trans.magnitude()))   self.lc.SetColumnWidth(0, wxLIST_AUTOSIZE) self.lc.SetColumnWidth(1, wxLIST_AUTOSIZE) self.lc.SetColumnWidth(2, wxLIST_AUTOSIZE)   self.SetTitle("Journal view - %d transactions" % len(self.bookset)) Putting data in a list control is fairly easy; just insert each item. For the report mode, you insert an item for the first column and then set values for the remaining columns. For each column in the example, just fetch some data from the transaction and send it to the list control. If you were using icons or combination of icons and text, there are different methods to handle that. Now that there's data in the list control, you should resize the columns. You can either specify actual pixel widths or have the list auto-size the columns based on the widths of the data. The last thing the JournalView class needs to do is to enable the editing of the transactions. We saw previously that when an item is double-clicked, a method named OnEdit is invoked. Here it is: def OnEdit(self, *event): if self.currentItem: trans = self.bookset[self.currentItem] dlg = EditTransDlg(self, trans, self.bookset.getAccountList()) if dlg.ShowModal() == wxID_OK: trans = dlg.GetTrans() self.bookset.edit(self.currentItem, trans) self.parent.UpdateViews() dlg.Destroy() This looks like what we did with the file dialog in the main frame, and indeed you will find yourself using this pattern quite often when using dialogs. The one item to notice here is the call to UpdateViews() in the parent window. This is how to manage keeping all the views of the BookSet up to date. Whenever a transaction is updated, this method is called and then loops through all open views, telling the views to update themselves with their UpdateView() method. Constraints There is a class called wxLayoutConstraintsthat allows the specification of a window's position and size in relationship to its siblings and its parent. EachwxLayoutContraintsobject is composed of eightwxIndividualLayoutConstraintobjects, which define different sorts of relationships, such as which window is above this window, what is the relative width of this window, etc. You usually have to specify four of the eight individual constraints in order for the window to be fully constrained. For example, this button will be positioned in the center of its parent and will always be 50% of the parent's width: b = wxButton(self.panelA, 100, ' Panel A `) lc = wxLayoutConstraints() lc.centreX.SameAs (self.panelA, wxCentreX) lc.centreY.SameAs (self.panelA, wxCentreY) lc.height.AsIs () lc.width.PercentOf (self.panelA, wxWidth, 50) b.SetConstraints(lc); Layout algorithm The class named wxLayoutAlgorithmimplements layout of subwindows in MDI or SDI frames. It sends awxCalculateLayoutEventto children of the frame, asking them for information about their size. Because the event system is used this technique can be applied to any window, even those that aren't necessarily aware of the layout classes. However, you may wish to usewxSashLayoutWindowfor your subwindows since this class provides handlers for the required events and accessors to specify the desired size of the window. The sash behavior in the base class can be used, optionally, to make the windows user-resizable.wxLayoutAlgorithmis typically used in IDE style of applications, where there are several resizable windows in addition to the MDI client window or other primary editing window. Resizable windows might include toolbars, a project window, and a window for displaying error and warning messages. Sizers In an effort to simplify the programming of simple layouts, a family of wxSizerclasses has been added to thewxPythonlibrary. These are classes that are implemented in pure Python instead of wrapping C++ code fromwxWindows. They are somewhat reminiscent of the layout managers from Java in that you select the type of sizer you want and then add windows or other sizers to it, and they all follow the same rules for layout. For example, this code fragment creates five buttons that are laid out horizontally in a box, and the last button is allowed to stretch to fill the remaining space allocated to the box: box = wxBoxSizer(wxHORIZONTAL) box.Add(wxButton(win, 1010, "one"), 0) box.Add(wxButton(win, 1010, "two"), 0) box.Add(wxButton(win, 1010, "three"), 0) box.Add(wxButton(win, 1010, "four"), 0) box.Add(wxButton(win, 1010, "five"), 1) Resources The wxWindowslibrary has a simple dialog editor available that can assist with the layout of controls on a dialog and generates a portable cross-platform resource file. This file can be loaded into a program at runtime and transformed on the fly into a window with the specified controls on it. The only downfall with this approach is that you don't have the opportunity to subclass the windows that are generated, but if you can do everything you need with existing control types and event handlers, it should work out great. Eventually, there will be awxPython-specific application builder tool that will generate either a resource type of file or actual Python source code for you. Brute force Finally, there is the brute-force mechanism of specifying the exact position of every component programmatically. Sometimes the layout needs of a window don't fit with any of the sizers or don't warrant the complexity of the constraints or the layout algorithm. For these situations, you can fall back on doing it "by hand," but you probably don't want to attempt it for anything much more complex than the Edit Transaction dialog. wxDialog and friends The next step is to build a dialog to edit a transaction. As you've seen, the transaction object is composed of a date, a comment, and a variable number of transaction lines each of which has an account name and an amount. We know that all the lines should add up to zero and that the date should be a valid date. In addition to editing the date and comment, you need to be able to add, edit, and delete lines. Figure 20-12 shows one possible layout for this dialog and the one used for this example. Since there's quite a bit going on here, let's go through the initialization of this class step by step. Here's the first bit: class EditTransDlg(wxDialog): def _ _init_ _(self, parent, trans, accountList): wxDialog._ _init_ _(self, parent, -1, "") self.item = -1 if trans: self.trans = copy.deepcopy(trans) self.SetTitle("Edit Transaction") else: self.trans = Transaction() self.trans.setDateString(dates.ddmmmyyyy(self.trans.date)) self.SetTitle("Add Transaction") This is fairly simple stuff. Just invoke the parent class's _ _init_ _ method, do some initialization, and determine if you're editing an existing transaction or creating a new one. If editing an existing transaction, use the Python copy module to make a copy of the object. You do this because you will be editing the transaction in-place and don't want to have any partially edited transactions stuck in the BookSet. If the dialog is being used to add a new transaction, create one, and then fix its date by truncating the time from it. The default date in the transaction includes the current time, but this dialog is equipped to deal only with the date portion.
#2201 Le 22/02/2013, à 08:17 jpdipsy Re : [Conky] Alternative à weather.com (2) Bonjour, je n'apporterai pas d'aide sur les scripts, mais juste pour dire que chez moi l'intégration avec XplanetFX fonctionne parfaitement. Il y a juste un délai de quelques secondes pendant lequel la météo disparaît juste après sa mise à jour : message "mise à jour partielle effectuée en x secondes", effacement de l'écran météo peu après et 4~5 s plus tard apparition des données actualisées. Je suis en train de préparer une maj importante de start-recmeteo et meteo qui résoudra le prob. Il faudra alors faire tourner recmeteo en "standalone" composition="" dans le fichier de cfg et xplanet se chargera du reste comme maintenant pour avoir qu'un seul rafraichissement du fond ATTENTION ne le faites pas maintenant. Surveillez ma signature top départ à v 0.999 (bin c'est toujours ps fini) Hors ligne #2202 Le 22/02/2013, à 10:05 jpdipsy Re : [Conky] Alternative à weather.com (2) @Didier Pour toi interessant ou pas ? ### Vérification répertoire if not path.exists(repsauv): makedirs(repsauv) ### Sauvegarde pid pid = str(getpid()) f = open(repsauv+'/recmeteo.pid', 'w') f.write(pid) f.close() Hors ligne #2203 Le 22/02/2013, à 10:53 ragamatrix Re : [Conky] Alternative à weather.com (2) Bon jpdipsy va encore se moquer... Comment arrette-ton les scripts recmeteo ? j'ai essayé ça ┌─( climatix ) - ( 3.2.0-38-generic ) - ( ~/Accuweather ) └─> start-recmeteo.sh stop Mais c'est pas bon Même pas honte Hors ligne #2204 Le 22/02/2013, à 11:01 jpdipsy Re : [Conky] Alternative à weather.com (2) Bon jpdipsy va encore se moquer... Comment arrette-ton les scripts recmeteo ? j'ai essayé ça<metadata lang=Shell prob=0.06 /> ┌─( climatix ) - ( 3.2.0-38-generic ) - ( ~/Accuweather ) └─> start-recmeteo.sh stop Mais c'est pas bon Même pas honte kill -9 $(ps x|grep "[r]ecmeteo.py" |cut -d ? -f1) pour le moment Hors ligne #2205 Le 22/02/2013, à 11:45 ragamatrix Re : [Conky] Alternative à weather.com (2) ragamatrix a écrit : Bon jpdipsy va encore se moquer... Comment arrette-ton les scripts recmeteo ? j'ai essayé ça<metadata lang=Shell prob=0.06 /> ┌─( climatix ) - ( 3.2.0-38-generic ) - ( ~/Accuweather ) └─> start-recmeteo.sh stop Mais c'est pas bon Même pas honte kill -9 $(ps x|grep "[r]ecmeteo.py" |cut -d ? -f1) pour le moment Merki ! Fouillouillou bin oui j'aurais pas trouver ça tout seul ! (PA P1) A pluche Hors ligne #2206 Le 22/02/2013, à 11:45 carpediem Re : [Conky] Alternative à weather.com (2) @ carpediem, simplifie ce chemin Chemin de travail = /tmp/MeteoLua2 en Chemin de travail = /tmp sa devrais régler ton soucis a ce que je vois dans ton retour terminal. J'ai fait la modif mais ça marche toujours pas je cherche mais je ne comprend pas, apparemment les chemins sont correcte Merci de votre aide et de votre patience carpediem Dernière modification par carpediem (Le 22/02/2013, à 11:45) "Carpe diem quam minimum credula postero" (Cueille le jour présent, en te fiant le moins possible au lendemain.) HORACE Hors ligne #2207 Le 22/02/2013, à 12:19 Phyllinux Re : [Conky] Alternative à weather.com (2) @ carpediem : As tu regardé si tu avais le paquet qui semait la pagaille chez moi : python3-notify2 Cela a débloqué une situation un peu identique à la tienne The ship is sinking normally... Hors ligne #2208 Le 22/02/2013, à 12:22 carpediem Re : [Conky] Alternative à weather.com (2) @ carpediem : As tu regardé si tu avais le paquet qui semait la pagaille chez moi : python3-notify2 Cela a débloqué une situation un peu identique à la tienne En effet il est bien installé "Carpe diem quam minimum credula postero" (Cueille le jour présent, en te fiant le moins possible au lendemain.) HORACE Hors ligne #2209 Le 22/02/2013, à 12:22 Didier-T Re : [Conky] Alternative à weather.com (2) @Didier Pour toi interessant ou pas ? ### Vérification répertoire if not path.exists(repsauv): makedirs(repsauv) ### Sauvegarde pid pid = str(getpid()) f = open(repsauv+'/recmeteo.pid', 'w') f.write(pid) f.close() en fait j'avais déjà regardé sa, mais je n'ai pas encore trouvé de façon d'exploité l'information avec conky, ou meteo.lua, le soucis viens du fait que pour fermer ces processus on les kill. pour ton projet en bash, sa doit pouvoir se faire (a condition d'avoir un script d'extinction et qu'il soit utilisé). Hors ligne #2210 Le 22/02/2013, à 12:23 #2211 Le 22/02/2013, à 12:25 carpediem Re : [Conky] Alternative à weather.com (2) Voici le retour terminal carpediem ~$ conky -c /home/claude/Scripts/MeteoLua2/conkyrc-meteo Conky: forked to background, pid is 21610 carpediem ~$ Conky: desktop window (1e0017b) is subwindow of root window (62) Conky: window type - override Conky: drawing to created window (0x4e00001) Conky: drawing to double buffer Fontconfig warning: "/etc/fonts/conf.d/50-user.conf", line 9: reading configurations from ~/.fonts.conf is deprecated. version = v1.02 web = http://www.accuweather.com/fr/fr/stiring-wendel/135054/weather-forecast/135054 Pévision Nb jours = 10 Pévision Matin = oui Pévision Après Midi = oui Pévision Soirée = oui Pévision Nuit = oui Prévision sur 8 heures = oui nbFoisHuit= 1 Délais = 10 Chemin de travail = /tmp Palier = 20 Chemin de sauvegarde = /home/claude/Scripts/MeteoLua2/sauvegardes Chemin script = /home/claude/Scripts/MeteoLua2/Scripts Notification = oui date: /home/claude/Scripts/MeteoLua2/sauvegardes/CC: Aucun fichier ou dossier de ce type Conky: llua_do_call: function conky_Meteo_horsligne execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:368: /home/claude/Scripts/MeteoLua2/sauvegardes/CC: No such file or directory date: /home/claude/Scripts/MeteoLua2/sauvegardes/CC: Aucun fichier ou dossier de ce type Conky: llua_getstring: function conky_Meteo_Ville didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_Jour_QPluie execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:894: attempt to index field '?' (a nil value) Conky: llua_getstring: function conky_Meteo_Humidite didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_HLeverSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1009: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_MLeverSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1013: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_HCoucherSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1021: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_MCoucherSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1025: attempt to index field '?' (a nil value) Conky: llua_getstring: function conky_Meteo_CondMeteo didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:728: attempt to index field '?' (a nil value) Conky: llua_getstring: function conky_Meteo_VentForce didn't return a string, result discarded Conky: llua_getstring: function conky_Meteo_VentDirP didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) date: /home/claude/Scripts/MeteoLua2/sauvegardes/CC: Aucun fichier ou dossier de ce type /usr/bin/python3: can't find '__main__' module in '/home/claude/Scripts/MeteoLua2/Scripts' lunaison Ok date: /home/claude/Scripts/MeteoLua2/sauvegardes/CC: Aucun fichier ou dossier de ce type date: /home/claude/Scripts/MeteoLua2/sauvegardes/CC: Aucun fichier ou dossier de ce type Conky: llua_getstring: function conky_Meteo_Ville didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_Jour_QPluie execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:894: attempt to index field '?' (a nil value) Conky: llua_getstring: function conky_Meteo_Humidite didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_HLeverSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1009: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_MLeverSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1013: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_HCoucherSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1021: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_MCoucherSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1025: attempt to index field '?' (a nil value) Conky: llua_getstring: function conky_Meteo_CondMeteo didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:728: attempt to index field '?' (a nil value) Conky: llua_getstring: function conky_Meteo_VentForce didn't return a string, result discarded Conky: llua_getstring: function conky_Meteo_VentDirP didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) date: /home/claude/Scripts/MeteoLua2/sauvegardes/CC: Aucun fichier ou dossier de ce type date: /home/claude/Scripts/MeteoLua2/sauvegardes/CC: Aucun fichier ou dossier de ce type Conky: llua_getstring: function conky_Meteo_Ville didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_Jour_QPluie execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:894: attempt to index field '?' (a nil value) Conky: llua_getstring: function conky_Meteo_Humidite didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_HLeverSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1009: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_MLeverSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1013: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_HCoucherSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1021: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_MCoucherSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1025: attempt to index field '?' (a nil value) Conky: llua_getstring: function conky_Meteo_CondMeteo didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:728: attempt to index field '?' (a nil value) Conky: llua_getstring: function conky_Meteo_VentForce didn't return a string, result discarded Conky: llua_getstring: function conky_Meteo_VentDirP didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) date: /home/claude/Scripts/MeteoLua2/sauvegardes/CC: Aucun fichier ou dossier de ce type date: /home/claude/Scripts/MeteoLua2/sauvegardes/CC: Aucun fichier ou dossier de ce type Conky: llua_getstring: function conky_Meteo_Ville didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_Jour_QPluie execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:894: attempt to index field '?' (a nil value) Conky: llua_getstring: function conky_Meteo_Humidite didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_HLeverSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1009: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_MLeverSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1013: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_HCoucherSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1021: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_MCoucherSoleil execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:1025: attempt to index field '?' (a nil value) Conky: llua_getstring: function conky_Meteo_CondMeteo didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:728: attempt to index field '?' (a nil value) Conky: llua_getstring: function conky_Meteo_VentForce didn't return a string, result discarded Conky: llua_getstring: function conky_Meteo_VentDirP didn't return a string, result discarded Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_IconeM execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:906: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Jour_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:854: attempt to index field '?' (a nil value) Conky: llua_do_call: function conky_Meteo_Nuit_Temp execution failed: /home/claude/Scripts/MeteoLua2/Scripts/meteo.lua:941: attempt t "Carpe diem quam minimum credula postero" (Cueille le jour présent, en te fiant le moins possible au lendemain.) HORACE Hors ligne #2212 Le 22/02/2013, à 12:44 Didier-T Re : [Conky] Alternative à weather.com (2) @ carpediem, essaye ceci, et donne le retour, merci. python3 ~/Scripts/MeteoLua2/Scripts/recmeteo.py repsauv=~/Scripts/MeteoLua2/sauvegardes adressWeb=http://www.accuweather.com/fr/fr/stiring-wendel/135054/weather-forecast/135054 nbJour=10 matin=oui apresmidi=oui soiree=oui nuit=oui huitHeures=oui nbFoisHuit=1 interval=10 notify=oui Hors ligne #2213 Le 22/02/2013, à 12:54 jpdipsy Re : [Conky] Alternative à weather.com (2) jpdipsy a écrit : @Didier Pour toi interessant ou pas ? ### Vérification répertoire if not path.exists(repsauv): makedirs(repsauv) ### Sauvegarde pid pid = str(getpid()) f = open(repsauv+'/recmeteo.pid', 'w') f.write(pid) f.close() en fait j'avais déjà regardé sa, mais je n'ai pas encore trouvé de façon d'exploité l'information avec conky, ou meteo.lua, le soucis viens du fait que pour fermer ces processus on les kill. pour ton projet en bash, sa doit pouvoir se faire (a condition d'avoir un script d'extinction et qu'il soit utilisé). vi kill -9 $(cat $repsauv/recmeteo.pid) edit : naturellement suivi de rm $repsauv/recmeteo.pid redit : et dans le pire des cas dans recmeteo tu peux mettre en place une verification qui lit le num du pid sauvegardé et tué l'ancien Dernière modification par jpdipsy (Le 22/02/2013, à 13:06) Hors ligne #2214 Le 22/02/2013, à 12:57 carpediem Re : [Conky] Alternative à weather.com (2) @ carpediem, essaye ceci, et donne le retour, merci. python3 ~/Scripts/MeteoLua2/Scripts/recmeteo.py repsauv=~/Scripts/MeteoLua2/sauvegardes adressWeb=http://www.accuweather.com/fr/fr/stiring-wendel/135054/weather-forecast/135054 nbJour=10 matin=oui apresmidi=oui soiree=oui nuit=oui huitHeures=oui nbFoisHuit=1 interval=10 notify=oui carpediem ~$ python3 ~/Scripts/MeteoLua2/Scripts/recmeteo.py repsauv=~/Scripts/MeteoLua2/sauvegardes adressWeb=http://www.accuweather.com/fr/fr/stiring-wendel/135054/weather-forecast/135054 Traceback (most recent call last): File "/usr/lib/python3/dist-packages/dbus/bus.py", line 175, in activate_name_owner return self.get_name_owner(bus_name) File "/usr/lib/python3/dist-packages/dbus/bus.py", line 361, in get_name_owner 's', (bus_name,), **keywords) File "/usr/lib/python3/dist-packages/dbus/connection.py", line 651, in call_blocking message, timeout) dbus.exceptions.DBusException: org.freedesktop.DBus.Error.NameHasNoOwner: Could not get owner of name 'org.freedesktop.Notifications': no such name During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/claude/Scripts/MeteoLua2/Scripts/recmeteo.py", line 317, in <module> notify2.init('Recmeteo.py') File "/usr/lib/python3/dist-packages/notify2.py", line 96, in init '/org/freedesktop/Notifications') File "/usr/lib/python3/dist-packages/dbus/bus.py", line 241, in get_object follow_name_owner_changes=follow_name_owner_changes) File "/usr/lib/python3/dist-packages/dbus/proxies.py", line 248, in __init__ self._named_service = conn.activate_name_owner(bus_name) File "/usr/lib/python3/dist-packages/dbus/bus.py", line 180, in activate_name_owner self.start_service_by_name(bus_name) File "/usr/lib/python3/dist-packages/dbus/bus.py", line 278, in start_service_by_name 'su', (bus_name, flags))) File "/usr/lib/python3/dist-packages/dbus/connection.py", line 651, in call_blocking message, timeout) dbus.exceptions.DBusException: org.freedesktop.DBus.Error.ServiceUnknown: The name org.freedesktop.Notifications was not provided by any .service files carpediem ~$ nbJour=10 matin=oui apresmidi=oui soiree=oui nuit=oui huitHeures=oui nbFoisHuit=1 interval=10 notify=oui "Carpe diem quam minimum credula postero" (Cueille le jour présent, en te fiant le moins possible au lendemain.) HORACE Hors ligne #2215 Le 22/02/2013, à 13:03 #2216 Le 22/02/2013, à 13:26 jpdipsy Re : [Conky] Alternative à weather.com (2) @Carpediem essai le meme lancement de recmeteo mais sans notify python3 ~/Scripts/MeteoLua2/Scripts/recmeteo.py repsauv=~/Scripts/MeteoLua2/sauvegardes adressWeb=http://www.accuweather.com/fr/fr/stiring-wendel/135054/weather-forecast/135054 nbJour=10 matin=oui apresmidi=oui soiree=oui nuit=oui huitHeures=oui nbFoisHuit=1 interval=10 et fait voir le retour Hors ligne #2217 Le 22/02/2013, à 14:20 carpediem Re : [Conky] Alternative à weather.com (2) @Didier-T python3-dbus est intallé @Carpediem essai le meme lancement de recmeteo mais sans notify python3 ~/Scripts/MeteoLua2/Scripts/recmeteo.py repsauv=~/Scripts/MeteoLua2/sauvegardes adressWeb=http://www.accuweather.com/fr/fr/stiring-wendel/135054/weather-forecast/135054 nbJour=10 matin=oui apresmidi=oui soiree=oui nuit=oui huitHeures=oui nbFoisHuit=1 interval=10 et fait voir le retour carpediem ~$ python3 ~/Scripts/MeteoLua2/Scripts/recmeteo.py repsauv=~/Scripts/MeteoLua2/sauvegardes adressWeb=http://www.accuweather.com/fr/fr/stiring-wendel/135054/weather-forecast/135054 Traceback (most recent call last): File "/usr/lib/python3/dist-packages/dbus/bus.py", line 175, in activate_name_owner return self.get_name_owner(bus_name) File "/usr/lib/python3/dist-packages/dbus/bus.py", line 361, in get_name_owner 's', (bus_name,), **keywords) File "/usr/lib/python3/dist-packages/dbus/connection.py", line 651, in call_blocking message, timeout) dbus.exceptions.DBusException: org.freedesktop.DBus.Error.NameHasNoOwner: Could not get owner of name 'org.freedesktop.Notifications': no such name During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/claude/Scripts/MeteoLua2/Scripts/recmeteo.py", line 317, in <module> notify2.init('Recmeteo.py') File "/usr/lib/python3/dist-packages/notify2.py", line 96, in init '/org/freedesktop/Notifications') File "/usr/lib/python3/dist-packages/dbus/bus.py", line 241, in get_object follow_name_owner_changes=follow_name_owner_changes) File "/usr/lib/python3/dist-packages/dbus/proxies.py", line 248, in __init__ self._named_service = conn.activate_name_owner(bus_name) File "/usr/lib/python3/dist-packages/dbus/bus.py", line 180, in activate_name_owner self.start_service_by_name(bus_name) File "/usr/lib/python3/dist-packages/dbus/bus.py", line 278, in start_service_by_name 'su', (bus_name, flags))) File "/usr/lib/python3/dist-packages/dbus/connection.py", line 651, in call_blocking message, timeout) dbus.exceptions.DBusException: org.freedesktop.DBus.Error.ServiceUnknown: The name org.freedesktop.Notifications was not provided by any .service files carpediem ~$ nbJour=10 matin=oui apresmidi=oui soiree=oui nuit=oui huitHeures=oui nbFoisHuit=1 interval=10 carpediem ~$ Dernière modification par carpediem (Le 22/02/2013, à 14:21) "Carpe diem quam minimum credula postero" (Cueille le jour présent, en te fiant le moins possible au lendemain.) HORACE Hors ligne #2218 Le 22/02/2013, à 14:28 Didier-T Re : [Conky] Alternative à weather.com (2) carpediem, il s'agit d'une seul ligne de commande, je vois que tu la coupe en deux. par contre on vas passer notify=non, comme la suggéré jpdipsy. python3 ~/Scripts/MeteoLua2/Scripts/recmeteo.py repsauv=~/Scripts/MeteoLua2/sauvegardes adressWeb=http://www.accuweather.com/fr/fr/stiring-wendel/135054/weather-forecast/135054 nbJour=10 matin=oui apresmidi=oui soiree=oui nuit=oui huitHeures=oui nbFoisHuit=1 interval=10 notify=non Hors ligne #2219 Le 22/02/2013, à 14:40 Phyllinux Re : [Conky] Alternative à weather.com (2) @ jpdipsy : Hier, le script lancé par XPlanet ne se rafraîchissait pas, mais aujourd'hui, alors que je n'ai rien modifié, il ne se lance plus. Pas d'affichage de la météo en surcouche dans le fond d'écran de XPlanet. EDIT : Le script start-recmeteo.sh lancé 'a la mano' fonctionne cependant, mais pas celui lié à XPlanet. Dernière modification par Phyllinux (Le 22/02/2013, à 14:42) The ship is sinking normally... Hors ligne #2220 Le 22/02/2013, à 15:06 jpdipsy Re : [Conky] Alternative à weather.com (2) @ jpdipsy : Hier, le script lancé par XPlanet ne se rafraîchissait pas, mais aujourd'hui, alors que je n'ai rien modifié, il ne se lance plus. Pas d'affichage de la météo en surcouche dans le fond d'écran de XPlanet. EDIT : Le script start-recmeteo.sh lancé 'a la mano' fonctionne cependant, mais pas celui lié à XPlanet. Je sais j'ai mis le boxon dans les scripts moi ma bécane à flambée fais gaffe Je blague bien évidement je vous met les scripts en fin d'AM je pense Hors ligne #2221 Le 22/02/2013, à 16:21 carpediem Re : [Conky] Alternative à weather.com (2) carpediem, il s'agit d'une seul ligne de commande, je vois que tu la coupe en deux. par contre on vas passer notify=non, comme la suggéré jpdipsy.<metadata lang=INI prob=0.51 /> python3 ~/Scripts/MeteoLua2/Scripts/recmeteo.py repsauv=~/Scripts/MeteoLua2/sauvegardes adressWeb=http://www.accuweather.com/fr/fr/stiring-wendel/135054/weather-forecast/135054 nbJour=10 matin=oui apresmidi=oui soiree=oui nuit=oui huitHeures=oui nbFoisHuit=1 interval=10 notify=non @ jpdipsy, @Didier-T un grand MERCI mes conkys fonctionnent. Encore une petit souci pour le graphique de la pression et de la température qui ne fonctionne toujours pas. encore merci pour ce magnifique travaille Cordialement carpediem "Carpe diem quam minimum credula postero" (Cueille le jour présent, en te fiant le moins possible au lendemain.) HORACE Hors ligne #2222 Le 22/02/2013, à 16:33 Phyllinux Re : [Conky] Alternative à weather.com (2) @ jpdipsy : Je suis en train de travailler sur mon module à afficher. Sur ton baromètre, je vois affiché (en bas) H.R. et un pourcentage. Je pense qu'il s'agit du taux d'humidité ? Or, dans ton module, je ne vois jamais comment est appelée cette valeur. Or, en ce qui me concerne, je fais afficher l'icône de la condition du moment, et du coup, je voudrais faire disparaître cet affichage H.R. : XX%. Où dois je supprimer cette partie de code. Toujours sur le baromètre, comment changer la couleur de la flèche, qui est en bleu, et que je voudrais plutôt en noir. Car je ne vois nulle part dans ton script de partie sur la création du baromètre. Merci The ship is sinking normally... Hors ligne #2223 Le 22/02/2013, à 16:52 ragamatrix Re : [Conky] Alternative à weather.com (2) @ jpdipsy : Je suis en train de travailler sur mon module à afficher. Sur ton baromètre, je vois affiché (en bas) H.R. et un pourcentage. Je pense qu'il s'agit du taux d'humidité ? Or, dans ton module, je ne vois jamais comment est appelée cette valeur. Or, en ce qui me concerne, je fais afficher l'icône de la condition du moment, et du coup, je voudrais faire disparaître cet affichage H.R. : XX%. Où dois je supprimer cette partie de code. Toujours sur le baromètre, comment changer la couleur de la flèche, qui est en bleu, et que je voudrais plutôt en noir. Car je ne vois nulle part dans ton script de partie sur la création du baromètre. Merci Salut En fait je crois qu'avec ce barometre les aiguilles changent de couleur en fonction de la tendance. Pour HR humidité relative je crois que c'est dans le script meteo vers ligne:382 #ajouter la tendance convert $Barometre/base.png -background transparent -font $fonte -pointsize 45 -gravity South \ -stroke white -strokewidth 2 -annotate +0+110 "H.R: $HR %" \ -stroke none -fill blue -annotate +0+110 "H.R: $HR %" \ $reptemp/Barometre.png composite -compose Over -gravity Center $reptemp/aiguille_rot.png $reptemp/Barometre.png $reptemp/Barometre.png mogrify -resize $taille! $reptemp/Barometre.png composite -blend 0x$opacite null: $reptemp/Barometre.png -matte $reptemp/Barometre.png Hors ligne #2224 Le 22/02/2013, à 17:02 Phyllinux Re : [Conky] Alternative à weather.com (2) OK, merci pour ces infos. En fait je ne cherchais que dans le script macomposition.sh, d'où le fait que je trouvais pas. Pour la couleur de la flèche, je confirme car, de bleu, elle vient de passer à rouge toute seule ! The ship is sinking normally... Hors ligne #2225 Le 22/02/2013, à 17:15 Didier-T Re : [Conky] Alternative à weather.com (2) Didier-T a écrit : carpediem, il s'agit d'une seul ligne de commande, je vois que tu la coupe en deux. par contre on vas passer notify=non, comme la suggéré jpdipsy.<metadata lang=INI prob=0.51 /> python3 ~/Scripts/MeteoLua2/Scripts/recmeteo.py repsauv=~/Scripts/MeteoLua2/sauvegardes adressWeb=http://www.accuweather.com/fr/fr/stiring-wendel/135054/weather-forecast/135054 nbJour=10 matin=oui apresmidi=oui soiree=oui nuit=oui huitHeures=oui nbFoisHuit=1 interval=10 notify=non @ jpdipsy, @Didier-Tun grandMERCImes conkys fonctionnent. Encore une petit souci pour le graphique de la pression et de la température qui ne fonctionne toujours pas. encore merci pour ce magnifique travaille Cordialement carpediem il faudrait que tu me re-fournisse tes scripts, la position a changé pour les données (la facon de les récupérer aussi) Je me demande si je vais pas virer la notification, trop de soucis, pour un intérêt limité (enfin a mon gout) Hors ligne
I want to upload and get the result from this website. http://cello.life.nctu.edu.tw/ I tried from poster.encode import multipart_encode from poster.streaminghttp import register_openers import urllib2 register_openers() params = ({"file": open("xaa", "r"), "seqtype": "prot", "species": "eu"}) datagen, headers = multipart_encode(params) request = urllib2.Request("http://cello.life.nctu.edu.tw/cgi/main.cgi", datagen, headers) print urllib2.urlopen(request).read() but I got an error Traceback (most recent call last): File "client.py", line 9, in <module> a = opener.open("http://cello.life.nctu.edu.tw/cgi/main.cgi", params) File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 400, in open response = meth(req, response) File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 513, in http_response 'http', request, response, code, msg, hdrs) File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 438, in error return self._call_chain(*args) File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 372, in _call_chain result = func(*args) File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 521, in http_error_default raise HTTPError(req.get_full_url(), code, msg, hdrs, fp) urllib2.HTTPError: HTTP Error 500: Internal Server Error I have no idea what is wrong here. Could someone point me out. ? Thanks. btw. The xaa file looks like this http://pastebin.com/7VK3vvwC
lanzrg Scraper un site simple (sans AJAX) en python + download ? Bonjour, Je souhaite scraper le site de sublime text 3 afin de récupérer l'url de la dernière version stable. Pour ensuite la télécharger. Tout ceci en Python évidemment. Ce que j'ai et qui à l'air de fonctionner (n'hésitez pas à me dire que c'est à chier ). import lxml.etree import lxml.html import re # HTML site = "http://www.sublimetext.com/3" html = lxml.html.parse(site) # Ce que je voudrais avoir : # Sublime Text 3 is currently in beta. The latest build is 3047. # Helas j'ai une erreur. elem = html.cssselect('.post > p:nth-child(2)') print elem # Ce que je voudrais avoir : # 3047 # Je n'y suis pas encore. # Reconstruire l'url pour le download. # Et enfin telecharger le deb ou exe # Je ne sais pas trop comment y parvenir. # Particulierement sur la partie recuperation du filename. J'ai une erreur lors de mon dernier print. A quoi est-ce du ? AttributeError: 'lxml.etree._ElementTree' object has no attribute 'cssselect' Merci d'avance Dernière modification par lanzrg (Le 20/10/2013, à 14:16) Hors ligne k3c Re : Scraper un site simple (sans AJAX) en python + download ? On peut voir ton code ? Tu veux aller à http://www.sublimetext.com/3 et ensuite tu veux le build 3033 ou 3047 ou ... ? Acer Aspire One 150 8,9 " Norhtec avec une Clé Usb bootable http://www.norhtec.com/products/mcsr/index.html Toshiba Satellite L750 Hors ligne grim7reaper Re : Scraper un site simple (sans AJAX) en python + download ? Salut, Ton problème c’est que tu oublies de faire un getroot sur ta page HTML parsée pour avoir l’élément root sur lequel appliquer ton sélecteur CSS. Voilà un rapide script d’exemple (attention, aucune gestion d’erreur et je pars sur le principe que tu veux télécharger la version Ubuntu 64-bit) : #!/usr/bin/env python3 from re import search from urllib.request import urlretrieve from lxml.html import parse # Website address. URL = 'http://www.sublimetext.com/3' # For Ubuntu 64-bit DL_PATTERN = 'http://c758482.r82.cf2.rackcdn.com/sublime_text_3_build_XXX_x64.tar.bz2' # Retrieve the page. page = parse(URL).getroot() # Extract the build information. build_info = page.cssselect('.post > p:nth-child(2)')[0].text # Extract latest build number. matches = search(r'(\d+)\.$', build_info) version = matches.group(1) # Build the download URL. dl_url = DL_PATTERN.replace('XXX', version) # Download the file. urlretrieve(dl_url, 'SublimeText3.tar.bz2') Hors ligne
I am Trying to get Contacts out of Outlook using Python.The code is : import win32com.client import pywintypes o = win32com.client.Dispatch("Outlook.Application") ns = o.GetNamespace("MAPI") profile = ns.Folders.Item("Outlook") contacts = profile.Folders.Item("Contacts") but its giving error like this: Traceback (most recent call last): File "my_pro.py", line 7, in <module> profile = ns.Folders.Item("Outlook") File "C:\DOCUME~1\Manoj\LOCALS~1\Temp\gen_py\2.7\00062FFF-0000-0000-C000-00000 0000046x0x9x3\_Folders.py", line 70, in Item ret = self._oleobj_.InvokeTypes(81, LCID, 1, (9, 0), ((12, 1),),Index pywintypes.com_error: (-2147352567, 'Exception occurred.', (4096, u'Microsoft Of fice Outlook', u'The operation failed. An object could not be found.', None, 0, -2147221233), None) I cant understand why its throwing error as I Do have a profile named Outlook Thanks
I have a list of terms in a file that I want to read, modify each term and output the new terms to a new file. The new terms should look like this: take the first two characters of the original term put them in quotes, add a '=>' then the original term in quotes and a comma. This is the code I'm using: def newFile(newItem): original = line first = line[0:2] newItem = first+'=>'+original+',' return newItem input = open('/Users/george/Desktop/input.txt', 'r') output = open('/Users/george/Desktop/output.txt', 'w') collector = '' for line in input: if len(line) != 0: collector = newFile(input) output.write(''.join(collector)) if len(line) == 0: input.close() output.close() For example: If the terms in the input.txt file are these: term 1 term 2 term 3 term 4 The output is this: te=>term 1,te=>term 2,te=>term 3,te=>term 4 , How can I add '' to the first two letters and to the term? And why the second, third and forth terms have ,te not te like it should?
I'll bet it's case sensitive, like PocketSphinx or something. I'd search for it using python interactive shell's help() func.. matthew@speedy:~/openstack/nova$ python Python 2.7.3 (default, Sep 26 2012, 21:51:14) [GCC 4.7.2] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> help() Welcome to Python 2.7! This is the online help utility. If this is your first time using Python, you should definitely check out the tutorial on the Internet at http://docs.python.org/2.7/tutorial/. Enter the name of any module, keyword, or topic to get help on writing Python programs and using Python modules. To quit this help utility and return to the interpreter, just type "quit". To get a list of available modules, keywords, or topics, type "modules", "keywords", or "topics". Each module also comes with a one-line summary of what it does; to list the modules whose summaries contain a given word such as "spam", type "modules spam". help> modules phinx Here is a list of matching modules. Enter any module name to get more help. matplotlib.sphinxext matplotlib.sphinxext.ipython_console_highlighting - reST directive for syntax-highlighting ipython interactive sessions. matplotlib.sphinxext.mathmpl matplotlib.sphinxext.only_directives matplotlib.sphinxext.plot_directive - A directive for including a matplotlib plot in a Sphinx document. So it's just a simple import for me: matthew@speedy:~/Downloads/pocketsphinx-0.8$ sudo apt-get install python-pocketsphinx ... >>> import pocketsphinx >>> dir(pocketsphinx) ['Decoder', 'LatLink', 'LatLinkIterator', 'LatNode', 'LatNodeIterator', 'Lattice', '__builtins__', '__doc__', '__file__', '__name__', '__package__'] It was hard to install from source (I gave up), but easy to install the ubuntu lib. This site looks like it could be useful for installing it from source: http://www.cs.columbia.edu/~ecooper/CS4706/ps-mac.html
#2676 Le 15/02/2013, à 19:14 mulder29 Re : TVDownloader: télécharger les médias du net ! Et je reçois python: can't open file alors que j'ai installé Python 2.7.3, hier. Hors ligne #2677 Le 15/02/2013, à 19:26 k3c Re : TVDownloader: télécharger les médias du net ! si tu tapes which python ça affiche quoi ? Dernière modification par k3c (Le 15/02/2013, à 19:28) Hors ligne #2678 Le 15/02/2013, à 20:20 mulder29 Re : TVDownloader: télécharger les médias du net ! ça affiche : /usr/bin/python Hors ligne #2679 Le 15/02/2013, à 22:38 JessicaNichenin Re : TVDownloader: télécharger les médias du net ! j'utilise la version 0.6 du script et cela ne marche plus python get.py http://videos.tf1.fr/unforgettable/episode-16-saison-01-la-fille-de-l-air-7817948.html ['rtmpdump', '-r', 'rtmpte://wske.wat.tv/ondemand/mp4:vod/H264-384x288/31/87/9433187.h264', '-c', '1935', '-m', '10', '-w', 'ebb7a6fbdc9021db95e2bd537d73fabb9717508f085bea50bde75f7a8e27698c', '-x', '343642', '-o', 'episode-16-saison-01-la-fille-de-l-air-7817948.mp4', ' --resume'] Erreur : le sous-process s'est terminé avec (le code d'erreur est 1) Erreur : le sous-process s'est terminé avec (le code d'erreur est 1) Erreur : le sous-process s'est terminé avec (le code d'erreur est 1) Erreur : le sous-process s'est terminé avec (le code d'erreur est 1) Merci pour ce merveilleux script Hors ligne #2680 Le 16/02/2013, à 00:17 k3c Re : TVDownloader: télécharger les médias du net ! Cette version est peu testée, mais télécharge les 3 épisodes de Unforgettable # -*- coding:utf-8 -*- # TF1 TMC NT1 HD1 version 0.7 par k3c, correction de 11gjm, modif pour TF1 unforgettable import subprocess, optparse, re, sys, shlex import socket from urllib2 import urlopen import time, md5, random, urllib2 import bs4 as BeautifulSoup listeUserAgents = [ 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_5; fr-fr) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.20.1', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.27 Safari/525.', 'Mozilla/5.0 (X11; U; Linux x86_64; en-us) AppleWebKit/528.5+ (KHTML, like Gecko, Safari/528.5+) midori', 'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.107 Safari/535.1', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us) AppleWebKit/312.1 (KHTML, like Gecko) Safari/312', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.8 (KHTML, like Gecko) Chrome/17.0.940.0 Safari/535.8' ] def get_wat(id): def base36encode(number): if not isinstance(number, (int, long)): raise TypeError('number must be an integer') if number < 0: raise ValueError('number must be positive') alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' base36 = '' while number: number, i = divmod(number, 36) base36 = alphabet[i] + base36 return base36 or alphabet[0] ts = base36encode(int(time.time())) timesec = hex(int(ts, 36))[2:] while(len(timesec)<8): timesec = "0"+timesec token = md5.new("9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba00912564/web/"+str(id)+""+timesec).hexdigest() id_url1 = "http://www.wat.tv/get/web/"+str(id)+"?token="+token+"/"+timesec+"&getURL=1&country=FR" return id_url1 def main(): gg@gg-SATELLITE-L755:~$ cat !$ cat hd3.py # -*- coding:utf-8 -*- # TF1 TMC NT1 HD1 version 0.7 par k3c, correction de 11gjm, modif pour TF1 unforgettable import subprocess, optparse, re, sys, shlex import socket from urllib2 import urlopen import time, md5, random, urllib2 import bs4 as BeautifulSoup listeUserAgents = [ 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_5; fr-fr) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.20.1', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.27 Safari/525.', 'Mozilla/5.0 (X11; U; Linux x86_64; en-us) AppleWebKit/528.5+ (KHTML, like Gecko, Safari/528.5+) midori', 'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.107 Safari/535.1', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us) AppleWebKit/312.1 (KHTML, like Gecko) Safari/312', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.8 (KHTML, like Gecko) Chrome/17.0.940.0 Safari/535.8' ] def get_wat(id): def base36encode(number): if not isinstance(number, (int, long)): raise TypeError('number must be an integer') if number < 0: raise ValueError('number must be positive') alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' base36 = '' while number: number, i = divmod(number, 36) base36 = alphabet[i] + base36 return base36 or alphabet[0] ts = base36encode(int(time.time())) timesec = hex(int(ts, 36))[2:] while(len(timesec)<8): timesec = "0"+timesec token = md5.new("9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba00912564/web/"+str(id)+""+timesec).hexdigest() id_url1 = "http://www.wat.tv/get/web/"+str(id)+"?token="+token+"/"+timesec+"&getURL=1&country=FR" return id_url1 def main(): # timeout en secondes socket.setdefaulttimeout(90) usage = "usage: python tmc_tf1.py [options] <url de l'emission>" parser = optparse.OptionParser( usage = usage ) parser.add_option( "--nocolor", action = 'store_true', default = False, help = 'desactive la couleur dans le terminal' ) parser.add_option( "-v", "--verbose", action = "store_true", default = False, help = 'affiche les informations de debugage' ) ( options, args ) = parser.parse_args() if( len( args ) > 2 or args[ 0 ] == "" ): parser.print_help() parser.exit( 1 ) debut_id = '' html = urlopen(sys.argv[1]).read() nom = sys.argv[1].split('/')[-1:][0] no = nom.split('.')[-2:][0] soup = BeautifulSoup.BeautifulSoup(html) if 'tmc.tv' in str(soup) or 'tf1.fr' in str(soup): debut_id = str(soup.find('div', attrs={'class' : 'unique' })) if 'nt1.tv' in str(soup) or 'hd1.tv' in str(soup): debut_id = str(soup.find('section', attrs={'class' : 'player-unique' })) id = [x.strip() for x in re.findall("mediaId :([^,]*)", debut_id)][0] id_url1 = get_wat(id) opener = urllib2.build_opener() opener.addheaders = [('User-agent', random.choice(listeUserAgents))] data = opener.open(id_url1).read() opener.close() if data[0:4] == 'http': ua = random.choice(listeUserAgents) arguments = 'curl "%s" -L -g -A "%s" -o "%s.mp4"' % (data, ua, no) print arguments process = subprocess.Popen(arguments, stdout=subprocess.PIPE, shell=True).communicate()[0] if data[0:4] == 'rtmp': host = re.search('rtmpte://(.*)/ondemand', data).group(1) host = host.replace('rtmpte', 'rtmpe') data0 = re.search('rtmpte://(.*)h264', data).group(0) cmds = 'rtmpdump -r "%s" -c 443 -m 10 -w b23434cbed89c9eaf520373c4c6f26e1f7326896dee4b1719e8d9acda0c19e99 -x 343427 -o "%s.mp4" " --resume"' % (data0, str(no)) f = open(str(no), 'w') f.write(cmds) f.close() arguments = shlex.split( cmds ) print arguments cpt = 0 while True: p = subprocess.Popen( arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: print "Erreur : le sous-process s\'est terminé avec (le code d\'erreur est " + str(p.returncode) + ")" # status = False if cpt > 5: break cpt += 1 time.sleep(3) else: # status = True break if __name__ == "__main__": Hors ligne #2681 Le 16/02/2013, à 01:15 angeline Re : TVDownloader: télécharger les médias du net ! Bonsoir à tous. Ce soir, je découvre que "www.francetvinfo.fr" a encore changé de protocole, et plus possible télécharger le journal de France2 le 20 heures. Je suis en Amérique du sud. Est-ce vrai aussi pour vous ? Merci. ıɔǝɔ ǝɯɯoɔ xnǝıɯ ʇsǝ,ɔ nʇunqnʞ Hors ligne #2682 Le 16/02/2013, à 08:52 gl38 Re : TVDownloader: télécharger les médias du net ! Le journal de 20h de France 2 du 15/2 se télécharge normalement chez moi (en France). Cordialement, Guy Hors ligne #2683 Le 16/02/2013, à 10:09 k3c Re : TVDownloader: télécharger les médias du net ! @ angeline avec pluzzdl 0.9.4 je viens de télécharger le journal de 20 h gg@gg-SATELLITE-L755:~$ pluzzdl -v http://pluzz.francetv.fr/videos/jt20h.html [DEBUG ] main.py pluzzdl 0.9.4 avec Python 2.7.3 (i686) [DEBUG ] main.py OS : Linux #58-Ubuntu SMP Thu Jan 24 15:51:02 UTC 2013 [DEBUG ] Navigateur.py GET http://pluzz.francetv.fr/videos/jt20h.html [DEBUG ] PluzzDL.py ID de l'émission : 77106215 [DEBUG ] Navigateur.py GET http://www.pluzz.fr/appftv/webservices/video/getInfosOeuvre.php?mode=zeri&id-diffusion=77106215 [DEBUG ] PluzzDL.py Lien MMS : mms://a988.v101995.c10199.e.vm.akamaistream.net/7/988/10199/3f97c7e6/ftvigrp.download.akamai.com/10199/cappuccino/production/publication/Autre/Autre/2013/S07/J5/738525_jt20h_20130215.wmv [DEBUG ] PluzzDL.py Lien RTMP : None [DEBUG ] PluzzDL.py URL manifest : http://ftvodhdsecz-f.akamaihd.net/z/streaming-adaptatif/2013/S07/J5/77106215-20130215-,398,632,934,k.mp4.csmil/manifest.f4m [DEBUG ] PluzzDL.py URL m3u8 : http://medias2.francetv.fr/catchup-mobile/france-dom-tom/non-token/non-drm/m3u8/2013/S07/J5/77106215-20130215.m3u8 [DEBUG ] PluzzDL.py Utilisation de DRM : non [DEBUG ] PluzzDL.py Chaine : France 2 [DEBUG ] Historique.py Historique chargé [DEBUG ] Navigateur.py GET http://medias2.francetv.fr/catchup-mobile/france-dom-tom/non-token/non-drm/m3u8/2013/S07/J5/77106215-20130215.m3u8 [DEBUG ] Navigateur.py GET http://medias2.francetv.fr/catchup-mobile/france-dom-tom/non-token/non-drm/m3u8/2013/S07/J5/77106215-20130215-840k.m3u8 [DEBUG ] PluzzDL.py Nombre de fragments : 242 [INFO ] PluzzDL.py Début du téléchargement des fragments et ça se finit par [DEBUG ] Navigateur.py GET http://medias2.francetv.fr/catchup-mobile/france-dom-tom/non-token/non-drm/m3u8/2013/S07/J5/77106215-20130215-840k/77106215-20130215-240.ts [DEBUG ] Navigateur.py GET http://medias2.francetv.fr/catchup-mobile/france-dom-tom/non-token/non-drm/m3u8/2013/S07/J5/77106215-20130215-840k/77106215-20130215-241.ts [INFO ] PluzzDL.py Fin du téléchargement [INFO ] PluzzDL.py Création du fichier MKV (vidéo finale) ; veuillez attendre quelques instants [INFO ] PluzzDL.py Fin ! [DEBUG ] Historique.py Historique sauvé gg@gg-SATELLITE-L755:~$ Hors ligne #2684 Le 16/02/2013, à 11:29 mulder29 Re : TVDownloader: télécharger les médias du net ! Ah mince, et toujours pas de solution pour moi ? :S Hors ligne #2685 Le 16/02/2013, à 12:16 thom83 Re : TVDownloader: télécharger les médias du net ! Bonjour, @ k3c J'ai testé la version 0.7 ci-dessus (la partie inférieure) avec succès après avoir ajouté le «main()» qui va bien. Est-il possible d'améliorer cette version en favorisant les variantes HD des émissions plutôt qu'en SD, quitte à ce que cela prenne plus de place sur le disque ? Cela ferait une différence sur un écran un peu grand... Dernière modification par thom83 (Le 16/02/2013, à 12:17) Hors ligne #2686 Le 16/02/2013, à 12:33 ynad Re : TVDownloader: télécharger les médias du net ! @ mulder29 voila comment j'ai fait: j'ai créer un répertoire D8 dans ce répertoire j'ai copier coller le script #2646 page 106 de k3C dans un éditeur de texte et sauvegarder en d8.py dans le même répertoire j'ai ouvert un terminal et lancer la commande: python d8.py http://www.d8.tv/d8-series/pid5313-d8-h.html si la vidéo est toujours en ligne elle se charge dans ce répertoire ce script est valable pour d8, k3c en a fait d'autres pour tf1 etc. il y a pluzdl pour le site de pluzz. espérant t'aider un peu... Hors ligne #2687 Le 16/02/2013, à 13:39 k3c Re : TVDownloader: télécharger les médias du net ! @ thom83 je prux sortir un hack rapidement, quitte à faire propre plus tard Hors ligne #2688 Le 16/02/2013, à 14:05 mulder29 Re : TVDownloader: télécharger les médias du net ! On me met encore m = re.search('\d{6}$',sys.argv[1]) bash: Erreur de syntaxe près du symbole inattendu « ( » ~$ if m is None: > try: > id = s.findAll('div',attrs={"class":u"block-common block-player-programme"})[0]('canal:player')[0]['videoid'] bash: Erreur de syntaxe près du symbole inattendu « ( » ~$ except: Commande 'except:' non trouvée, vouliez-vous dire : La commande 'except' du paquet 'qmail' (universe) except: : commande introuvable ~$ Display all 2596 possibilities? (y or n) ~$ t 'imposiible de trouver l\'id de la video' > Sans doute là que ça coince. J'ai tapé la ligne de commande dans le Terminal, rien ne se passe. (en revanche, ma flèche du curseur est devenue une croix) Hors ligne #2689 Le 16/02/2013, à 14:10 angeline Re : TVDownloader: télécharger les médias du net ! @K3c, Merci, j'ai le même début, mais pas la même fin cb@cb-desktop:~$ pluzzdl -v http://pluzz.francetv.fr/videos/jt20h.html [DEBUG ] main.py pluzzdl 0.9.4 avec Python 2.7.3 (i686) [DEBUG ] main.py OS : Linux #58-Ubuntu SMP Thu Jan 24 15:51:02 UTC 2013 [DEBUG ] Navigateur.py GET http://pluzz.francetv.fr/videos/jt20h.html [DEBUG ] PluzzDL.py ID de l'émission : 77106215 [DEBUG ] Navigateur.py GET http://www.pluzz.fr/appftv/webservices/video/getInfosOeuvre.php?mode=zeri&id-diffusion=77106215 [DEBUG ] PluzzDL.py Lien MMS : mms://a988.v101995.c10199.e.vm.akamaistream.net/7/988/10199/3f97c7e6/ftvigrp.download.akamai.com/10199/cappuccino/production/publication/Autre/Autre/2013/S07/J5/738525_jt20h_20130215.wmv [DEBUG ] PluzzDL.py Lien RTMP : None [DEBUG ] PluzzDL.py URL manifest : http://ftvodhdsecz-f.akamaihd.net/z/streaming-adaptatif/2013/S07/J5/77106215-20130215-,398,632,934,k.mp4.csmil/manifest.f4m [DEBUG ] PluzzDL.py URL m3u8 : http://medias2.francetv.fr/catchup-mobile/france-dom-tom/non-token/non-drm/m3u8/2013/S07/J5/77106215-20130215.m3u8 [DEBUG ] PluzzDL.py Utilisation de DRM : non [DEBUG ] PluzzDL.py Chaine : France 2 [DEBUG ] Historique.py Historique chargé [DEBUG ] Navigateur.py GET http://medias2.francetv.fr/catchup-mobile/france-dom-tom/non-token/non-drm/m3u8/2013/S07/J5/77106215-20130215.m3u8 [DEBUG ] Navigateur.py Forbidden Traceback (most recent call last): File "/usr/share/pluzzdl/main.py", line 91, in <module> progressFnct = progressFnct ) File "/usr/share/pluzzdl/PluzzDL.py", line 119, in __init__ downloader.telecharger() File "/usr/share/pluzzdl/PluzzDL.py", line 259, in telecharger self.m3u8 = self.navigateur.getFichier( self.m3u8URL ) File "/usr/share/pluzzdl/Navigateur.py", line 58, in getFichier page = self.urlOpener.open( requete, timeout = self.timeOut ) File "/usr/lib/python2.7/urllib2.py", line 406, in open response = meth(req, response) File "/usr/lib/python2.7/urllib2.py", line 519, in http_response 'http', request, response, code, msg, hdrs) File "/usr/lib/python2.7/urllib2.py", line 444, in error return self._call_chain(*args) File "/usr/lib/python2.7/urllib2.py", line 378, in _call_chain result = func(*args) File "/usr/lib/python2.7/urllib2.py", line 527, in http_error_default raise HTTPError(req.get_full_url(), code, msg, hdrs, fp) urllib2.HTTPError: HTTP Error 403: Forbidden [DEBUG ] Historique.py Historique sauvé Je sèche. PS.: cela va si mal que cela en France... que les infos ne doivent pas sortir de l'hexagone ? Edit: Oubliez la ligne au dessus... Avec Opera, cela se charge, mais très lentement, (en pointillés) du à mon internet très lent, et ce n'est pas regardable! D'où la nécessité de télécharger. Dernière modification par angeline (Le 16/02/2013, à 14:25) ıɔǝɔ ǝɯɯoɔ xnǝıɯ ʇsǝ,ɔ nʇunqnʞ Hors ligne #2690 Le 16/02/2013, à 15:05 k3c Re : TVDownloader: télécharger les médias du net ! @ mulder29 tu exécutes du bash, pas du python @ angeline n'étant pas en France, il te faudrait un proxy France,non ? Hors ligne #2691 Le 16/02/2013, à 15:12 mulder29 Re : TVDownloader: télécharger les médias du net ! ok et comment je fais pour passer en mode "python" ? Hors ligne #2692 Le 16/02/2013, à 15:13 angeline Re : TVDownloader: télécharger les médias du net ! @K3c Par Opera ou Firefox, je ne passe pas par un proxy actuellement. Avec Opera, j'ai droit à un bonus de 50 secondes de pub... qui fonctionne très bien ! Par Firefox, c'est bloqué par AdBlock, mais le journal dans tous les cas arrive trop lentement. Le pb me parait autre part. ıɔǝɔ ǝɯɯoɔ xnǝıɯ ʇsǝ,ɔ nʇunqnʞ Hors ligne #2693 Le 16/02/2013, à 15:53 angeline Re : TVDownloader: télécharger les médias du net ! @Kc2 Si je récupère [DEBUG ] PluzzDL.py Lien MMS : mms://a988.v101995.c10199.e.vm.akamaistream.net/7/988/10199/3f97c7e6/ftvigrp.download.akamai.com/10199/cappuccino/production/publication/Autre/Autre/2013/S07/J5/738525_jt20h_20130215.wmv que je change le "mms" en "rtsp" le journal est téléchargeable par mon downloader préféré ! Mais pas par "pluzzdl" ıɔǝɔ ǝɯɯoɔ xnǝıɯ ʇsǝ,ɔ nʇunqnʞ Hors ligne #2694 Le 16/02/2013, à 23:22 angeline Re : TVDownloader: télécharger les médias du net ! C'était trop beau... cela n'a pas duré. [DEBUG ] PluzzDL.py Lien MMS : None [DEBUG ] PluzzDL.py Lien RTMP : None Donc, comment passer par un proxy S.V.P. ıɔǝɔ ǝɯɯoɔ xnǝıɯ ʇsǝ,ɔ nʇunqnʞ Hors ligne #2695 Le 16/02/2013, à 23:28 mulder29 Re : TVDownloader: télécharger les médias du net ! Ok, j'ai installé idle, j'ai lancé une python shell et j'ai ouvert mon dossier leafpad d8.py en prenant soin de copier/coller le texte J'ai ensuite copié dans le terminal le lien python d8.py http://www.d8.tv/d8-series/pid5313-d8-h.html et... il n'a l'air de rien se passer en fait. Hors ligne #2696 Le 17/02/2013, à 00:11 k3c Re : TVDownloader: télécharger les médias du net ! @ angeline $ pluzzdl --help usage: pluzzdl [options] urlEmission Télécharge les émissions de Pluzz positional arguments: urlEmission URL de l'émission Pluzz a charger optional arguments: -h, --help show this help message and exit -b, --progressbar affiche la progression du téléchargement -p PROXY, --proxy PROXY utilise un proxy HTTP au format suivant http://URL:PORT -s, --sock si un proxy est fourni avec l'option -p, un proxy SOCKS5 est utilisé au format suivant ADRESSE:PORT -v, --verbose affiche les informations de debugage -t, --soustitres récupère le fichier de sous-titres de la vidéo (si disponible) --nocolor désactive la couleur dans le terminal --version show program's version number and exit donc pluzzdl -p http://1.2.3.4:80 ... en supposant que 1.2.3.4:80 est un proxy français valide @ mulder 29 quand je lance ta commande, ça m'affiche python d8.py http://www.d8.tv/d8-series/pid5313-d8-h.html rtmpdump -r "rtmp://vod-fms.canalplus.fr/ondemand/videos/1302/1047160_20_1500k.mp4" -o "H.mp4" en fait cette procédure lance juste la commande ci-dessus Hors ligne #2697 Le 17/02/2013, à 00:51 angeline Re : TVDownloader: télécharger les médias du net ! @K3c Merci pour la réponse. Cependant, je doute ! J'ai essayé 6/7 proxy... pas un ne veut coopérer. Par un navigateur: Firefox, Opéra, ou Chromium, l'émission passe. Sans proxy. Par pluzzdll, toujours la même interdiction cb@cb-desktop:~$ pluzzdl -v http://pluzz.francetv.fr/videos/jt20h.html [DEBUG ] main.py pluzzdl 0.9.4 avec Python 2.7.3 (i686) [DEBUG ] main.py OS : Linux #58-Ubuntu SMP Thu Jan 24 15:51:02 UTC 2013 [DEBUG ] Navigateur.py GET http://pluzz.francetv.fr/videos/jt20h.html [DEBUG ] PluzzDL.py ID de l'émission : 77496935 [DEBUG ] Navigateur.py GET http://www.pluzz.fr/appftv/webservices/video/getInfosOeuvre.php?mode=zeri&id-diffusion=77496935 [DEBUG ] PluzzDL.py Lien MMS : mms://a988.v101995.c10199.e.vm.akamaistream.net/7/988/10199/3f97c7e6/ftvigrp.download.akamai.com/10199/cappuccino/production/publication/Autre/Autre/2013/S07/J6/740386_jt20h_20130216.wmv [DEBUG ] PluzzDL.py Lien RTMP : None [DEBUG ] PluzzDL.py URL manifest : http://ftvodhdsecz-f.akamaihd.net/z/streaming-adaptatif/2013/S07/J6/77496935-20130216-,398,632,934,k.mp4.csmil/manifest.f4m [DEBUG ] PluzzDL.py URL m3u8 : http://medias2.francetv.fr/catchup-mobile/france-dom-tom/non-token/non-drm/m3u8/2013/S07/J6/77496935-20130216.m3u8 [DEBUG ] PluzzDL.py Utilisation de DRM : non [DEBUG ] PluzzDL.py Chaine : France 2 [DEBUG ] Historique.py Historique chargé [DEBUG ] Navigateur.py GET http://medias2.francetv.fr/catchup-mobile/france-dom-tom/non-token/non-drm/m3u8/2013/S07/J6/77496935-20130216.m3u8 [DEBUG ] Navigateur.py Forbidden Traceback (most recent call last): File "/usr/share/pluzzdl/main.py", line 91, in <module> progressFnct = progressFnct ) File "/usr/share/pluzzdl/PluzzDL.py", line 119, in __init__ downloader.telecharger() File "/usr/share/pluzzdl/PluzzDL.py", line 259, in telecharger self.m3u8 = self.navigateur.getFichier( self.m3u8URL ) File "/usr/share/pluzzdl/Navigateur.py", line 58, in getFichier page = self.urlOpener.open( requete, timeout = self.timeOut ) File "/usr/lib/python2.7/urllib2.py", line 406, in open response = meth(req, response) File "/usr/lib/python2.7/urllib2.py", line 519, in http_response 'http', request, response, code, msg, hdrs) File "/usr/lib/python2.7/urllib2.py", line 444, in error return self._call_chain(*args) File "/usr/lib/python2.7/urllib2.py", line 378, in _call_chain result = func(*args) File "/usr/lib/python2.7/urllib2.py", line 527, in http_error_default raise HTTPError(req.get_full_url(), code, msg, hdrs, fp) urllib2.HTTPError: HTTP Error 403: Forbidden [DEBUG ] Historique.py Historique sauvé Par chance, France télévision s'est réveillé, et maintenant une URL en mms apparait. Il est 18h ici, je télécharge. Avec proxy j'obtiens: pluzzdl -v -p http://176.31.247.227:8085 http://pluzz.francetv.fr/videos/jt20h.html [DEBUG ] main.py pluzzdl 0.9.4 avec Python 2.7.3 (i686) [DEBUG ] main.py OS : Linux #58-Ubuntu SMP Thu Jan 24 15:51:02 UTC 2013 [DEBUG ] Navigateur.py GET http://pluzz.francetv.fr/videos/jt20h.html [CRITICAL] PluzzDL.py Impossible de récupérer l'ID de l'émission Mais je pense que peu de proxy laissent passer les infos de vidéo, du moins dans les gratuits. Je reste à l'écoute. Dernière modification par angeline (Le 17/02/2013, à 00:53) ıɔǝɔ ǝɯɯoɔ xnǝıɯ ʇsǝ,ɔ nʇunqnʞ Hors ligne #2698 Le 17/02/2013, à 02:17 mulder29 Re : TVDownloader: télécharger les médias du net ! @ mulder 29 quand je lance ta commande, ça m'affiche python d8.py http://www.d8.tv/d8-series/pid5313-d8-h.html rtmpdump -r "rtmp://vod-fms.canalplus.fr/ondemand/videos/1302/1047160_20_1500k.mp4" -o "H.mp4" en fait cette procédure lance juste la commande ci-dessus Euh, moi, ça reste sur la ligne sur python... et aucune autre réaction o_O Ok, donc il y a vraiment un bug dans mon système et python est inutilisable chez moi. Hors ligne #2699 Le 17/02/2013, à 02:20 k3c Re : TVDownloader: télécharger les médias du net ! si tu tapes dans un terminal python ça t'affiche quoi ? Hors ligne #2700 Le 17/02/2013, à 10:59 mulder29 Re : TVDownloader: télécharger les médias du net ! Ça m'affiche : Python 2.7.3 (default, Aug 1 2012, 05:16:07) [GCC 4.6.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> par rapport à la commande fait précédemment, tu en déduis ? Hors ligne
When i run my python code through terminal m getting this error : def GPlag(text,encode=False): import urllib, urllib2, json if encode == True: text = text.encode('utf-8') query = urllib.quote_plus(text) base_url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=' url = base_url + '%22' + query + '%22' request = urllib2.Request(url,None) response = urllib2.urlopen(request) result = json.load(response) output = [] if result['responseData']['results'] != []: firstMatch = result['responseData']['results'][0] output.append(firstMatch['title']) output.append(firstMatch['visibleUrl']) output.append(firstMatch['content']) return output def callGPlag(n): for s in n: outcome = GPlag(s,encode=True) It gives; TypeError: 'NoneType' object has no attribute '__getitem__' in this if result['responseData']['results'] line. Here is the traceback: Traceback (most recent call last): ... in <module> GPlagFile(sys.argv[1]) ... in GPlagFile outcome = GPlag(s,encode=True) ... in GPlag if 'results' in result['responseData']: TypeError: argument of type 'NoneType' is not iterable
Using REST with Ajax by Nic Ferrier 02/23/2006 This article shows how to use Ajax techniques to make web apps with REST APIs. Everyone's talking about REST these days. Lots of people are still struggling with it, and there's good reason for that--REST is actually quite difficult to fit into the browser-based HTML Web, for two reasons: Current HTML forms support only GETandPOST, notPUTorDELETE HTML forms always involve a page change As far as I'm concerned, this is fair enough; REST is primarily a web services platform, an alternative to CORBA, SOAP, and XMLRPC, not a user interface. Still, at least some of the time, it's nice to use a REST API as the foundation for a web app. On the other hand, Ajax can be really complicated, and it always seems to involve lots of different bits of code. That is the point of this article. REST's clean definition of an application's architecture solves at least some of the problems with Ajax; and using Ajax to a REST web app leaves the REST API uncomplicated and pure. REST API The best way for me to explain this is to refer to a real REST API. I recently built a multiuser ATOM/RSS aggregator. Each user can have a list of feeds (URLs, basically) and manipulate those list items. I built a simple REST API for managing the lists. The feed aggregator needs to: add a feed to the user's list list the current feeds remove a feed from the user's list Clearly I could model the list of a user's feeds as a resource: /feed ... and each feed as a subresource of that: /feed/106 That is the great thing about REST: it's easy to work out the details of a pure REST implementation: A POSTto/feedwill add a feed to the user's list. ThePOSTwill have to include a parameter specifying the feed URL. A GETto/feedwill list the feeds. A nice HTML display of the feeds might be appropriate. A DELETEto/feed/67will remove the feed with that ID. This is all very simple, and simplicity is of course why REST is so powerful. This code is pretty easy to do with just about any web server and language combination. I used mod_python. Here's the main handler and GET method implementation: import os import re import urllib from mod_python import apache from mod_python import util def handler(http): if http.method == "GET": return get(http) elif http.method == "POST": return post(http) elif http.method == "DELETE": return delete(http) return apache.HTTP_NOT_ACCEPTABLE def get(http): """Display a list of feeds.""" username = http.user form = util.FieldStorage(http) def feeds(): try: dbcon = PgSQL.connect(http.get_options()["DBURL-" + http.hostname]) curs = dbcon.cursor() curs.execute("select id, url from get_feeds('%s')" % (username)) result = curs.fetchone() while result != None: yield result result = curs.fetchone() dbcon.close() except: pass return http.content_type = "text/html" xmlout(feeds, http) return apache.OK def xmlout(generator, out): """Use 'generator' to get the data and send it to 'out'""" print >>out, "<?xml version="1.0"?>" print >>out, "<?xsl-stylesheet href='/display-feed.xslt' version="1.0"?>" print >>out, "<feeds>" for id,url in generator(): print >>out, "<feed><id>%s</id><url>%s</url></feed>" % (id, re.sub("&", "&amp;", url)) print >>out, "</feeds>" The GET returns an HTML page of the user's feeds. Notice that there is an XML representation in the code, so I could add content negotiation to the mix to support both XML and HTML output.
I am trying to do HTTP basic authentication with bottle.py using the following decorator I have written: def check_auth(username, password): if username == 'admin' and password == 'pass': return True else: return False def authenticate(msg_string = "Authenticate."): response.content_type = "application/json" message = {'message': msg_string} resp = jsonpickle.encode(message) response.status = "401 - Unauthorized" response.headers['WWW-Authenticate'] = 'Basic realm="PyBit"' return resp def requires_auth(f): def decorated(*args, **kwargs): print request.auth auth = request.auth if not auth: return authenticate() elif not check_auth(auth[0],auth[1]): response.status = "401 - Unauthorized" return authenticate("HTTP Authentication Failed.") else: return f(*args, **kwargs) return decorated It works in the builtin wsgiref server, but not when I run my app under Apache using mod_wsgi. The "auth" object is always "None" in that case. Is apache pinching it?
I would like to access my Google Affiliate Network product feed via the Google search API for shopping. I would like to do this from a backend Python library i'm developing. Has anyone done something like this? I have the following: A Google account Enabled Search API for Shopping in the Google API Console and got an API key (for server apps) and a client ID + client secret (for installed applications). A GAN account and got the pid. Several advertiser who approved me so i have products available in my product feed. OAuth2 Python Code: from apiclient.discovery import build from oauth2client.client import OAuth2WebServerFlow from oauth2client.tools import run from oauth2client.django_orm import Storage from models import CredentialsModel storage = Storage(CredentialsModel, 'name', 'GAN Reporting', 'credentials') credentials = storage.get() if credentials is None or credentials.invalid == True: flow = OAuth2WebServerFlow( client_id=MyClientID, client_secret=MyClientSecret, scope='https://www.googleapis.com/auth/shoppingapi', user_agent='cleverblocks/1.0', access_type='offline') credentials = run(flow, storage) http = httplib2.Http() credentials.authorize(http) client = build('shopping', 'v1', http=http, developerKey=MyAPIKey) resource = client.products() request = resource.list(source='gan:MyGANPid', country='US') return request.execute() Running this i get back the following error (HttpError 412): no advertisers are registered for the given publisher The user I am using to authenticate is listed on the GAN->settings->users section. I've been hacking at this from all directions to the point where I'm now starting to think this API is broken. Has anyone managed to access GAN product feed via the Search API for Shopping? Any help is appreciated.
I have a hierarchical two combo-box. The first combo-box displays a list of customerNames, i.e. different companies from a MySQL db. Each customer has branches in different cities. Then, when a customer name is chosen from combo-box1 option list, e.g. {Aldi, Meyer, Carrefour, WalMart}, for that particular customer, a list of cities/branches is automatically displayed in the combo-box2. Something like that, e.g.: combo1: chosen_customer [Aldi] --> cities:{NY, Boston, Berlin, Tokyo, London} then.. combo2: options {NY, Boston, Berlin, Tokyo, London} The problem comes when we chose again another customer, that eventually has a smaller number of branches - e.g. combo1: chosen_customer [Meyer] --> {LA, San Francisco}, then.. we got combo2: options {LA, San Francisco, Berlin, Tokyo, London} intead of combo2: options {LA, San Francisco} Here is the function that runs the combo2, which is called every time a customerName is chosen from the list combo1: def loadComboCity(self,customerName): """query results cityList into self.mydb.matrix""" queryName="citylist_thisCustomer" self.mysqlAPI(queryName,customerName) id=0 for row in self.mydb.matrix: cityname=self.mydb.matrix[id][0] self.addcomboCity(id,cityname) id=id+1 del self.mydb.matrix[:] and the function that adds each name of the list city that belongs to that customer: def addcomboCity(self,id,cityname): self.comboCity.addItem(QtCore.QString()) self.comboCity.setItemText(id, QtGui.QApplication.translate("MainWindow", cityname, None, QtGui.QApplication.UnicodeUTF8)) We tried to use del to clean the previous content of the list, but it still gets the same behavior. This is a Qt or a Python related problem? Or there is some bit that we are missing here? All comments and suggestions are highly appreciated.
LowKeys = dict(La = 'z', Lb = 'x', Lc = 'c', Ld = 'v', Le = 'b', Lf = 'n', Lg = 'm') MidKeys = dict(Ma = 'q', Mb = 'w', Mc = 'e', Md = 'r', Me = 't', Mf = 'y', Mg = 'u') HighKeys = dict(Ha = 'i', Hb = 'o', Hc = 'p', Hd = '[', He = ']') SharpLowKeys = dict(SLa = 's', SLc = 'f', SLd = 'g', SLf = 'j', SLg = 'k') FlatLowKeys = dict(FLa = 'a', FLb = 's', FLd = 'f', FLe = 'g', FLg = 'j') SharpMidKeys = dict(SMa = '2', SMc = '4', SMd = '5', SMf = '7', SMg = '8') FlatMidKeys = dict(FMa = '1', FMb = '2', FMd = '4', FMe = '5', FMg = '7') SharpHighKeys = dict(SHa = '9', SHc = '-', SHd = '=') FlatHighKeys = dict(FHa = '8', FHb = '9', FHd = '-', FHe = '=') notes = raw_input('Notes: ') notes = notes.split() I want to replace all the items that appear in both notes and any of the dicts with the dict value. Ex: notes = La, Ha, Lb notes = z, i, x Is there a way to do this? Or a better way than what I'm trying?
Ok, so I need to build several barcharts that have in between the first bar and the others a yellow line. var y = d3.scale.ordinal() .rangeRoundBands([0, height], .2); ... svg.selectAll(".bar") .data(data) .enter().append("rect") .attr("class", function(d) { return "bar " + d.label; }) .attr("id", function(d){return d.label}) .attr("x", function(d) { return x(Math.min(0, d.value)); }) .attr("y", function(d) { return y(d.label); }) .attr("width", function(d) { return Math.abs(x(d.value) - x(0)); }) .attr("height", y.rangeBand()); and this is where I'm trying to build the yellow line: svg.append("line") .attr("class", "yellow") .attr("x1", 0 ) .attr("x2", width ) .attr("y1", y.rangeBand() + 14) .attr("y2", y.rangeBand()+14); So, this way the line is more or less in the position I need, but if the number of bar changes (and it happens) and consequently the height of the bar change, it obviously go out of position and end up behind the bar. Any idea on how to find the value?
Total Python newb here. I have a images directory and I need to return the names and urls of those files to a django template that I can loop through for links. I know it will be the server path, but I can modify it via JS. I've tried os.walk, but I keep getting empty results. If your images are in one directory import os root="/my" Path=os.path.join(root,"path","images") os.chdir(Path) for files in os.listdir("."): if files[-3:].lower() in ["gif","png","jpg","bmp"] : print "image file: ",files If it's a single directory,
The goal is just to retrieve a specific file without downloading the entire contents, using the HTTP range method as described: http://www.codeproject.com/KB/cs/remotezip.aspx You can solve this a bit more generally with less code. Essentially, create enough of a file-like object for ZipFile to use. So you wind up with Using the same idea, you could also create a caching wrapper for HttpFile to avoid repeated downloads. And here's the code: (note the lack of error-handling) #!/usr/bin/python import urllib2 class HttpFile(object): def __init__(self, url): self.url = url self.offset = 0 self._size = -1 def size(self): if self._size < 0: f = urllib2.urlopen(self.url) self._size = int(f.headers["Content-length"]) return self._size def read(self, count=-1): req = urllib2.Request(self.url) if count < 0: end = self.size() - 1 else: end = self.offset + count - 1 req.headers['Range'] = "bytes=%s-%s" % (self.offset, end) f = urllib2.urlopen(req) data = f.read() # FIXME: should check that we got the range expected, etc. chunk = len(data) if count >= 0: assert chunk == count self.offset += chunk return data def seek(self, offset, whence=0): if whence == 0: self.offset = offset elif whence == 1: self.offset += offset elif whence == 2: self.offset = self.size() + offset else: raise Exception("Invalid whence") def tell(self): return self.offset Since there was no such library I have written a small module myself, most code and logic is is from zipfile with the seek/reads translated to HTTP range requests. Feel free to review and suggest improvements: The code: """ Read remote ZIP files using HTTP range requests """ import struct import urllib2 import zlib import cStringIO from zipfile import ZipInfo, ZipExtFile, ZipInfo from os.path import join, basename # The code is mostly adatpted from the zipfile module # NOTE: ZIP64 is not supported # The "end of central directory" structure, magic number, size, and indices # (section V.I in the format document) structEndArchive = "<4s4H2LH" stringEndArchive = "PK\005\006" sizeEndCentDir = struct.calcsize(structEndArchive) _ECD_SIGNATURE = 0 _ECD_DISK_NUMBER = 1 _ECD_DISK_START = 2 _ECD_ENTRIES_THIS_DISK = 3 _ECD_ENTRIES_TOTAL = 4 _ECD_SIZE = 5 _ECD_OFFSET = 6 _ECD_COMMENT_SIZE = 7 # These last two indices are not part of the structure as defined in the # spec, but they are used internally by this module as a convenience _ECD_COMMENT = 8 _ECD_LOCATION = 9 # The "central directory" structure, magic number, size, and indices # of entries in the structure (section V.F in the format document) structCentralDir = "<4s4B4HL2L5H2L" stringCentralDir = "PK\001\002" sizeCentralDir = struct.calcsize(structCentralDir) # indexes of entries in the central directory structure _CD_SIGNATURE = 0 _CD_CREATE_VERSION = 1 _CD_CREATE_SYSTEM = 2 _CD_EXTRACT_VERSION = 3 _CD_EXTRACT_SYSTEM = 4 _CD_FLAG_BITS = 5 _CD_COMPRESS_TYPE = 6 _CD_TIME = 7 _CD_DATE = 8 _CD_CRC = 9 _CD_COMPRESSED_SIZE = 10 _CD_UNCOMPRESSED_SIZE = 11 _CD_FILENAME_LENGTH = 12 _CD_EXTRA_FIELD_LENGTH = 13 _CD_COMMENT_LENGTH = 14 _CD_DISK_NUMBER_START = 15 _CD_INTERNAL_FILE_ATTRIBUTES = 16 _CD_EXTERNAL_FILE_ATTRIBUTES = 17 _CD_LOCAL_HEADER_OFFSET = 18 # The "local file header" structure, magic number, size, and indices # (section V.A in the format document) structFileHeader = "<4s2B4HL2L2H" stringFileHeader = "PK\003\004" sizeFileHeader = struct.calcsize(structFileHeader) _FH_SIGNATURE = 0 _FH_EXTRACT_VERSION = 1 _FH_EXTRACT_SYSTEM = 2 _FH_GENERAL_PURPOSE_FLAG_BITS = 3 _FH_COMPRESSION_METHOD = 4 _FH_LAST_MOD_TIME = 5 _FH_LAST_MOD_DATE = 6 _FH_CRC = 7 _FH_COMPRESSED_SIZE = 8 _FH_UNCOMPRESSED_SIZE = 9 _FH_FILENAME_LENGTH = 10 _FH_EXTRA_FIELD_LENGTH = 11 def _http_get_partial_data(url, start_range, end_range=None): req = urllib2.Request(url) range_header = "bytes=%s" % start_range if end_range is not None: range_header += "-%s" % end_range req.headers['Range'] = range_header f = urllib2.urlopen(req) return f def _EndRecData(url): """Return data from the "End of Central Directory" record, or None. The data is a list of the nine items in the ZIP "End of central dir" record followed by a tenth item, the file seek offset of this record.""" ECD = _http_get_partial_data(url, -sizeEndCentDir) content_range = ECD.headers.get('Content-Range') filesize = int(content_range.split('/')[1]) if content_range and '/' in content_range else 0 data = ECD.read() ECD.close() if data[0:4] == stringEndArchive and data[-2:] == "\000\000": # the signature is correct and there's no comment, unpack structure endrec = struct.unpack(structEndArchive, data) endrec = list(endrec) # Append a blank comment and record start offset endrec.append("") endrec.append(filesize - sizeEndCentDir) return endrec # Either this is not a ZIP file, or it is a ZIP file with an archive # comment. Search the end of the file for the "end of central directory" # record signature. The comment is the last item in the ZIP file and may be # up to 64K long. It is assumed that the "end of central directory" magic # number does not appear in the comment. # Search by retrieving chunks of 256, 1k and 64k try_ranges = (1 << 8, 1 << 10, 1 << 16) for check_range in try_ranges: ECD = _http_get_partial_data(url, -(check_range + sizeEndCentDir)) data = ECD.read() content_range = ECD.headers.get('Content-Range') ECD.close() download_start = content_range.split('-')[0] start = data.rfind(stringEndArchive) if start >= 0: # found the magic number; attempt to unpack and interpret recData = data[start:start+sizeEndCentDir] endrec = list(struct.unpack(structEndArchive, recData)) commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize] endrec.append(comment) endrec.append(download_start + start) return endrec raise IOError class HTTPZipFile: def __init__(self, url): self.url = url self.NameToInfo = {} # Find file info given name self.filelist = [] # List of ZipInfo instances for archive self.pwd = None self.comment = '' self.debug = 0 self._RealGetContents() def _RealGetContents(self): """Read in the table of contents for the ZIP file.""" try: endrec = _EndRecData(self.url) except IOError: raise BadZipfile("File is not a zip file") if not endrec: raise BadZipfile, "File is not a zip file" if self.debug > 1: print endrec size_cd = endrec[_ECD_SIZE] # bytes in central directory offset_cd = endrec[_ECD_OFFSET] # offset of central directory self.comment = endrec[_ECD_COMMENT] # archive comment # "concat" is zero, unless zip was concatenated to another file concat = endrec[_ECD_LOCATION] - size_cd - offset_cd #if endrec[_ECD_SIGNATURE] == stringEndArchive64: # # If Zip64 extension structures are present, account for them # concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) if self.debug > 2: inferred = concat + offset_cd print "given, inferred, offset", offset_cd, inferred, concat # self.start_dir: Position of start of central directory self.start_dir = offset_cd + concat ECD = _http_get_partial_data(self.url, self.start_dir, self.start_dir+size_cd-1) data = ECD.read() ECD.close() fp = cStringIO.StringIO(data) total = 0 while total < size_cd: centdir = fp.read(sizeCentralDir) if centdir[0:4] != stringCentralDir: raise BadZipfile, "Bad magic number for central directory" centdir = struct.unpack(structCentralDir, centdir) if self.debug > 2: print centdir filename = fp.read(centdir[_CD_FILENAME_LENGTH]) # Create ZipInfo instance to store file information x = ZipInfo(filename) x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, x.CRC, x.compress_size, x.file_size) = centdir[1:12] x.volume, x.internal_attr, x.external_attr = centdir[15:18] # Convert date/time code to (year, month, day, hour, min, sec) x._raw_time = t x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) x._decodeExtra() x.header_offset = x.header_offset + concat x.filename = x._decodeFilename() self.filelist.append(x) self.NameToInfo[x.filename] = x # update total bytes read from central directory total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] + centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]) if self.debug > 2: print "total", total def namelist(self): """Return a list of file names in the archive.""" l = [] for data in self.filelist: l.append(data.filename) return l def infolist(self): """Return a list of class ZipInfo instances for files in the archive.""" return self.filelist def printdir(self): """Print a table of contents for the zip file.""" print "%-46s %19s %12s" % ("File Name", "Modified ", "Size") for zinfo in self.filelist: date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6] print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size) def getinfo(self, name): """Return the instance of ZipInfo given 'name'.""" info = self.NameToInfo.get(name) if info is None: raise KeyError( 'There is no item named %r in the archive' % name) return info def open(self, name, pwd=None): """Return file-like object for 'name'.""" if not self.url: raise RuntimeError, \ "Attempt to read ZIP archive that was already closed" zinfo = self.getinfo(name) offset = zinfo.header_offset f = _http_get_partial_data(self.url, offset, offset+sizeFileHeader-1) fheader = f.read() f.close() fheader = struct.unpack(structFileHeader, fheader) offset += sizeFileHeader f = _http_get_partial_data(self.url, offset, offset+fheader[_FH_FILENAME_LENGTH]-1) fname = f.read() f.close() if fname != zinfo.orig_filename: raise BadZipfile, \ 'File name in directory "%s" and header "%s" differ.' % ( zinfo.orig_filename, fname) is_encrypted = zinfo.flag_bits & 0x1 if is_encrypted: raise RuntimeError, "File %s is encrypted, " \ "not supported." % name offset += fheader[_FH_FILENAME_LENGTH]+fheader[_FH_EXTRA_FIELD_LENGTH] f = _http_get_partial_data(self.url, offset, offset+fheader[_FH_COMPRESSED_SIZE]-1) data = f.read() return ZipExtFile(cStringIO.StringIO(data), 'r', zinfo) if __name__ == "__main__": # Some tests link="http://dfn.dl.sourceforge.net/project/filezilla/FileZilla_Client/3.5.1/FileZilla_3.5.1_win32.zip" hzfile = HTTPZipFile(link) hzfile.printdir() for fname in ('GPL.html', 'resources/blukis/48x48/filter.png', 'resources/finished.wav'): source_name = join('FileZilla-3.5.1', fname) dest_fname = join('/tmp', basename(fname)) print "Extracing %s to %s" % (source_name, dest_fname) with hzfile.open(source_name) as f: data = f.read() new_file = open(dest_fname, 'w') new_file.write(data) new_file.close()
Please, draw it. ¿All the groupings must take all the nodes from `A'? ¿How could you make d(N) groupings if you have a node with a lesser degree? > For each node in B (in each grouping) is the d(Bn)<4: ¿So in a grouping, there can't be more than 3 incident edges on a B node? but since P = NP is unproven either way, then maybe it's actually just P. ¿is it legal? ¿what is its penalty? If a node has no edges, ¿does it incur in penalty? with your example: G1 = 3+2+3 = 8 G2 = 3+2+3 = 8 G3 = 0(¿4?)+3+3 = 6(¿10?) total = 22 (¿26?) ¿was `4' just an example? using http://snag.gy/b0mw9.jpg and the groupings that you suggested, ¿what will the penalty, per grouping, per node? Can I see the code you have so far? def options(A, start=None, best=-1): #start and best are only there so that I can restart the program at a later point max=-1 if(start==None): current=[0]*len(A) else: current=start for B in A: if max<len(A[B]): max=len(A[B]) while(next_permutation(A, current)): print(current) c=(score(A, current, max)) if(c[0]<best): output(c, 'w', current) best=c[0] print("Best:"+str(best)) elif(c[0]==best): output(c, 'a', current) print("1") def score(A, current, max): options=[{} for i in xrange(max)] for i, B in zip(xrange(len(current)), A): p=get_permutation(current[i], len(A[B])) for j,num in zip(xrange(len(A[B])),p): if(options[num-1].has_key(A[B][j])): options[num-1][A[B][j]].append(B) else: options[num-1][A[B][j]]=[B] ans=0 for group in options: if(group>4): ans+=2 for subject in group: if(len(group[subject])<4): ans+=4-len(group[subject]) return(ans, options) def get_permutation(i, N): p0, p=[k for k in xrange(1, N+1)], [] for j in xrange(N-1, 0, -1): j_f=reduce(lambda x, y: x*y, [k for k in xrange(1, j+1)]) c=i/j_f i%=j_f p.append(p0[c]) p0=p0[:c]+p0[c+1:] p.append(p0[0]) return(p) def next_permutation(A, p): for i, B in zip(xrange(len(p)), A): fact=reduce(lambda x, y: x*y, [k for k in xrange(1, len(A[B])+1)]) p[i]+=1 if(p[i]==fact): p[i]=0 else: return(True) return(False) def output(option, mode, current): fout=open("ans.txt", mode) fout.write("=============================\n") fout.write("Score: "+str(option[0])+"\n[") for i in current: fout.write(str(i)+", ") fout.write("]\n") for i,group in zip(xrange(len(option[1])), option[1]): fout.write("Group "+str(i)+"\n") for subject in group: fout.write(subject+": ") for name in group[subject]: fout.write(name+", ") fout.write("\n") fout.close() values={"0" : ("A","B","C",), "1" : ("D","C","E","F",), "2" : ("D","E","G","F",), "3" : ("B","C","E","F",), "4" : ("A","D","H","E",), "5" : ("A","D","I",), "6" : ("A","D","B",), "7" : ("A","D","B","H",), "8" : ("A","D","B","I",), "9" : ("A","D","J",), "10" : ("D","B","J",), "11" : ("A","D","B",), "12" : ("A","D","I","G",), "13" : ("D","C","H",), "14" : ("B","C","F",), "15" : ("A","D","B",), "16" : ("D","B","C","E",), "17" : ("A","D","I",), "18" : ("A","D","B",), "19" : ("A","B","E","F",), "20" : ("A","G","F","J",), "21" : ("A","E","G","F",), "22" : ("A","E","G","J",), "23" : ("A","D","B",), "24" : ("D","B","C",), "25" : ("D","C","I",), "26" : ("A","D","B",), "27" : ("A","D","I","J",), "28" : ("A","D","J",), "29" : ("A","C","I",), "30" : ("A","D","E",), "31" : ("A","H","J",), } options(values) Also, why are you working on this problem?
jQuery and Ajax While web2py is mainly for server-side development, the welcome scaffolding app comes with the base jQuery library[jquery], jQuery calendars (date picker, datetime picker and clock), and some additional JavaScript functions based on jQuery. Nothing in web2py prevents you from using other Ajax libraries such as Prototype, ExtJS, or YUI, but we decided to package jQuery because we find it to be easier to use and more powerful than other equivalent libraries. We also find that it captures the web2py spirit of being functional and concise. web2py_ajax.html The scaffolding web2py application "welcome" includes a file called views/web2py_ajax.html which looks like this: {{ response.files.insert(0,URL('static','js/jquery.js')) response.files.insert(1,URL('static','css/calenadar.css')) response.files.insert(2,URL('static','js/calendar.js')) response.include_meta() response.include_files() }} <script type="text/javascript"><!-- // These variables are used by the web2py_ajax_init // function in web2py.js (which is loaded below). var w2p_ajax_confirm_message = "{{=T('Are you sure you want to delete this object?')}}"; var w2p_ajax_date_format = "{{=T('%Y-%m-%d')}}"; var w2p_ajax_datetime_format = "{{=T('%Y-%m-%d %H:%M:%S')}}"; //--></script> <script src="{{=URL('static','js/web2py.js')}}" type="text/javascript"></script> This file is included in the HEAD of the default "layout.html" and it provides the following services: Includes "static/jquery.js". Includes "static/calendar.js" and "static/calendar.css", which are used for the popup calendar. Includes all response.metaheaders Includes all response.files(requires CSS and JS, as declared in the code) Sets form variables and includes "static/js/web2y.js" "web2py.js" does the following: Defines an ajaxfunction (based on jQuery $.ajax). Makes any DIV of class "error" or any tag object of class "flash" slide down. Prevents typing invalid integers in INPUT fields of class "integer". Prevents typing invalid floats in INPUT fields of class "double". Connects INPUT fields of type "date" with a popup date picker. Connects INPUT fields of type "datetime" with a popup datetime picker. Connects INPUT fields of type "time" with a popup time picker. Defines web2py_ajax_component, a very important tool that will be described in Chapter 12. Defines web2py_websocket, a function that can be used for HTML5 websockets (not described in this book but read the examples in the source of "gluon/contrib/websocket__messaging.py").websockets Defines functions to the entropy calculation and input validation of the password field. It also includes popup, collapse, and fade functions for backward compatibility. Here is an example of how the other effects play well together. Consider a test app with the following model: db = DAL("sqlite://db.db") db.define_table('child', Field('name'), Field('weight', 'double'), Field('birth_date', 'date'), Field('time_of_birth', 'time')) db.child.name.requires=IS_NOT_EMPTY() db.child.weight.requires=IS_FLOAT_IN_RANGE(0,100) db.child.birth_date.requires=IS_DATE() db.child.time_of_birth.requires=IS_TIME() with this "default.py" controller: def index(): form = SQLFORM(db.child) if form.process().accepted: response.flash = 'record inserted' return dict(form=form) and the following "default/index.html" view: {{extend 'layout.html}} {{=form}} The "index" action generates the following form: If an invalid form is submitted, the server returns the page with a modified form containing error messages. The error messages are DIVs of class "error", and because of the above web2py.js code, the errors appears with a slide-down effect: The color of the errors is given in the CSS code in "layout.html". The web2py.js code prevents you from typing an invalid value in the input field. This is done before and in addition to, not as a substitute for, the server-side validation. The web2py.js code displays a date picker when you enter an INPUT field of class "date", and it displays a datetime picker when you enter an INPUT field of class "datetime". Here is an example: The web2py.js code also displays the following time picker when you try to edit an INPUT field of class "time": Upon submission, the controller action sets the response flash to the message "record inserted". The default layout renders this message in a DIV with id="flash". The web2py.js code is responsible for making this DIV appear and making it disappear when you click on it: These and other effects are accessible programmatically in the views and via helpers in controllers. jQuery effects The basic effects described here do not require any additional files; everything you need is already included in web2py_ajax.html. HTML/XHTML objects can be identified by their type (for example a DIV), their classes, or their id. For example: <div class="one" id="a">Hello</div> <div class="two" id="b">World</div> They belong to class "one" and "two" respectively. They have ids equal to "a" and "b" respectively. In jQuery you can refer to the former with the following CSS-like equivalent notations jQuery('.one') // address object by class "one" jQuery('#a') // address object by id "a" jQuery('DIV.one') // address by object of type "DIV" with class "one" jQuery('DIV #a') // address by object of type "DIV" with id "a" and to the latter with jQuery('.two') jQuery('#b') jQuery('DIV.two') jQuery('DIV #b') or you can refer to both with jQuery('DIV') Tag objects are associated to events, such as "onclick". jQuery allows linking these events to effects, for example "slideToggle": <div class="one" id="a" onclick="jQuery('.two').slideToggle()">Hello</div> <div class="two" id="b">World</div> Now if you click on "Hello", "World" disappears. If you click again, "World" reappears. You can make a tag hidden by default by giving it a hidden class: <div class="one" id="a" onclick="jQuery('.two').slideToggle()">Hello</div> <div class="two hidden" id="b">World</div> You can also link actions to events outside the tag itself. The previous code can be rewritten as follows: <div class="one" id="a">Hello</div> <div class="two" id="b">World</div> <script> jQuery('.one').click(function(){jQuery('.two').slideToggle()}); </script> Effects return the calling object, so they can be chained. When the click sets the callback function to be called on click. Similarly for change, keyup, keydown, mouseover, etc. A common situation is the need to execute some JavaScript code only after the entire document has been loaded. This is usually done by the onload attribute of BODY but jQuery provides an alternative way that does not require editing the layout: <div class="one" id="a">Hello</div> <div class="two" id="b">World</div> <script> jQuery(document).ready(function(){ jQuery('.one').click(function(){jQuery('.two').slideToggle()}); }); </script> The body of the unnamed function is executed only when the document is ready, after it has been fully loaded. Here is a list of useful event names: Form events onchange: Script to be run when the element changes onsubmit: Script to be run when the form is submitted onreset: Script to be run when the form is reset onselect: Script to be run when the element is selected onblur: Script to be run when the element loses focus onfocus: Script to be run when the element gets focus Keyboard events onkeydown: Script to be run when key is pressed onkeypress: Script to be run when key is pressed and released onkeyup: Script to be run when key is released Mouse events onclick: Script to be run on a mouse click ondblclick: Script to be run on a mouse double-click onmousedown: Script to be run when mouse button is pressed onmousemove: Script to be run when mouse pointer moves onmouseout: Script to be run when mouse pointer moves out of an element onmouseover: Script to be run when mouse pointer moves over an element onmouseup: Script to be run when mouse button is released Here is a list of useful effects defined by jQuery: Effects jQuery(...).show(): Makes the object visible jQuery(...).hide(): Makes the object hidden jQuery(...).slideToggle(speed, callback): Makes the object slide up or down jQuery(...).slideUp(speed, callback): Makes the object slide up jQuery(...).slideDown(speed, callback): Makes the object slide down jQuery(...).fadeIn(speed, callback): Makes the object fade in jQuery(...).fadeOut(speed, callback): Makes the object fade out The speed argument is usually "slow", "fast" or omitted (the default). The callback is an optional function that is called when the effect is completed. jQuery effects can also easily be embedded in helpers, for example, in a view: {{=DIV('click me!', _onclick="jQuery(this).fadeOut()")}} Other useful methods and attributes for handling selected elements Methods and attributes jQuery(...).prop(name): Returns the name of the attribute value jQuery(...).prop(name, value): Sets the attribute name to value jQuery(...).html(): Without arguments, it returns the inner html from the selected elements, it accepts a string as argument for replacing the tag content. jQuery(...).text(): Without arguments, it returns the inner text of the selected element (without tags), if a string is passed as argument, it replaces the inner text with the new data. jQuery(...).css(name, value): With one parameter, it returns the CSS value of the style attribute specified for the selected elements. With two parameters, it sets a new value for the specified CSS attribute. jQuery(...).each(function): It loops trought the selected elements set and calls function with each item as argument. jQuery(...).index(): Without arguments, it returns the index value for the first element selected related to its siblings. (i.e, the index of a LI element). If an element is passed as argument, it returns the element position related to the selected elements set. jQuery(...).length: This attribute returns the number of elements selected. jQuery is a very compact and concise Ajax library; therefore web2py does not need an additional abstraction layer on top of jQuery (except for the ajax function discussed below). The jQuery APIs are accessible and readily available in their native form when needed. Consult the documentation for more information about these effects and other jQuery APIs. The jQuery library can also be extended using plugins and User Interface Widgets. This topic is not covered here; see ref.[jquery-ui] for details. Conditional fields in forms A typical application of jQuery effects is a form that changes its appearance based on the value of its fields. This is easy in web2py because the SQLFORM helper generates forms that are "CSS friendly". The form contains a table with rows. Each row contains a label, an input field, and an optional third column. The items have ids derived strictly from the name of the table and names of the fields. The convention is that every INPUT field has an id tablename_fieldname and is contained in a row with id tablename_fieldname__row. As an example, create an input form that asks for a taxpayer's name and for the name of the taxpayer's spouse, but only if he/she is married. Create a test application with the following model: db = DAL('sqlite://db.db') db.define_table('taxpayer', Field('name'), Field('married', 'boolean'), Field('spouse_name')) the following "default.py" controller: def index(): form = SQLFORM(db.taxpayer) if form.process().accepted: response.flash = 'record inserted' return dict(form=form) and the following "default/index.html" view: {{extend 'layout.html'}} {{=form}} <script> jQuery(document).ready(function(){ if(jQuery('#taxpayer_married').prop('checked')) jQuery('#taxpayer_spouse_name__row').show(); else jQuery('#taxpayer_spouse_name__row').hide(); jQuery('#taxpayer_married').change(function(){ if(jQuery('#taxpayer_married').prop('checked')) jQuery('#taxpayer_spouse_name__row').show(); else jQuery('#taxpayer_spouse_name__row').hide();}); }); </script> The script in the view has the effect of hiding the row containing the spouse's name: When the taxpayer checks the "married" checkbox, the spouse's name field reappears: Here "taxpayer_married" is the checkbox associated to the "boolean" field "married" of table "taxpayer". "taxpayer_spouse_name__row" is the row containing the input field for "spouse_name" of table "taxpayer". Confirmation on delete Another useful application is requiring confirmation when checking a "delete" checkbox such as the delete checkbox that appears in edit forms. Consider the above example and add the following controller action: def edit(): row = db.taxpayer[request.args(0)] form = SQLFORM(db.taxpayer, row, deletable=True) if form.process().accepted: response.flash = 'record updated' return dict(form=form) and the corresponding view "default/edit.html" {{extend 'layout.html'}} {{=form}} The deletable=True argument in the SQLFORM constructor instructs web2py to display a "delete" checkbox in the edit form. It is False by default. web2py's "web2py.js" includes the following code: jQuery(document).ready(function(){ jQuery('input.delete').prop('onclick', 'if(this.checked) if(!confirm( "{{=T('Sure you want to delete this object?')}}")) this.checked=false;'); }); By convention this checkbox has a class equal to "delete". The jQuery code above connects the onclick event of this checkbox with a confirmation dialog (standard in JavaScript) and unchecks the checkbox if the taxpayer does not confirm: The ajax function In web2py.js, web2py defines a function called ajax which is based on, but should not be confused with, the jQuery function $.ajax. The latter is much more powerful than the former, and for its usage, we refer you to ref.[jquery] and ref.[jquery-b]. However, the former function is sufficient for many complex tasks, and is easier to use. The ajax function is a JavaScript function that has the following syntax: ajax(url, [name1, name2, ...], target) It asynchronously calls the url (first argument), passes the values of the field inputs with the name equal to one of the names in the list (second argument), then stores the response in the innerHTML of the tag with the id equal to target (the third argument). Here is an example of a default controller: def one(): return dict() def echo(): return request.vars.name and the associated "default/one.html" view: {{extend 'layout.html'}} <form> <input name="name" onkeyup="ajax('{{=URL('default', 'echo')}}', ['name'], 'target')" /> </form> <div id="target"></div> When you type something in the INPUT field, as soon as you release a key (onkeyup), the ajax function is called, and the value of the name="name" field is passed to the action "echo", which sends the text back to the view. The ajax function receives the response and displays the echo response in the "target" DIV. Eval target The third argument of the ajax function can be the string ":eval". This means that the string returned by server will not be embedded in the document but it will be evaluated instead. Here is an example of a default controller: def one(): return dict() def echo(): return "jQuery('#target').html(%s);" % repr(request.vars.name) and the associated "default/one.html" view: {{extend 'layout.html'}} <form> <input name="name" onkeyup="ajax('echo', ['name'], ':eval')" /> </form> <div id="target"></div> This allows for more complex responses that can update multiple targets. Auto-completion Web2py contains a built-in autocomplete widget, described in the Forms chapter. Here we will build a simpler one from scratch. Another application of the above ajax function is auto-completion. Here we wish to create an input field that expects a month name and, when the visitor types an incomplete name, performs auto-completion via an Ajax request. In response, an auto-completion drop-box appears below the input field. This can be achieved via the following default controller: def month_input(): return dict() def month_selector(): if not request.vars.month: return '' months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September' ,'October', 'November', 'December'] month_start = request.vars.month.capitalize() selected = [m for m in months if m.startswith(month_start)] return DIV(*[DIV(k, _onclick="jQuery('#month').val('%s')" % k, _onmouseover="this.style.backgroundColor='yellow'", _onmouseout="this.style.backgroundColor='white'" ) for k in selected]) and the corresponding "default/month_input.html" view: {{extend 'layout.html'}} <style> #suggestions { position: relative; } .suggestions { background: white; border: solid 1px #55A6C8; } .suggestions DIV { padding: 2px 4px 2px 4px; } </style> <form> <input type="text" id="month" name="month" style="width: 250px" /><br /> <div style="position: absolute;" id="suggestions" class="suggestions"></div> </form> <script> jQuery("#month").keyup(function(){ ajax('month_selector', ['month'], 'suggestions')}); </script> The jQuery script in the view triggers the Ajax request each time the visitor types something in the "month" input field. The value of the input field is submitted with the Ajax request to the "month_selector" action. This action finds a list of month names that start with the submitted text (selected), builds a list of DIVs (each one containing a suggested month name), and returns a string with the serialized DIVs. The view displays the response HTML in the "suggestions" DIV. The "month_selector" action generates both the suggestions and the JavaScript code embedded in the DIVs that must be executed when the visitor clicks on each suggestion. For example when the visitor types "M" the callback action returns: <div> <div onclick="jQuery('#month').val('March')" onmouseout="this.style.backgroundColor='white'" onmouseover="this.style.backgroundColor='yellow'">March</div> <div onclick="jQuery('#month').val('May')" onmouseout="this.style.backgroundColor='white'" onmouseover="this.style.backgroundColor='yellow'">May</div> </div> Here is the final effect: If the months are stored in a database table such as: db.define_table('month', Field('name')) then simply replace the month_selector action with: def month_input(): return dict() def month_selector(): if not request.vars.month: return '' pattern = request.vars.month.capitalize() + '%' selected = [row.name for row in db(db.month.name.like(pattern)).select()] return ''.join([DIV(k, _onclick="jQuery('#month').val('%s')" % k, _onmouseover="this.style.backgroundColor='yellow'", _onmouseout="this.style.backgroundColor='white'" ).xml() for k in selected]) jQuery provides an optional Auto-complete Plugin with additional functionalities, but that is not discussed here. Ajax form submission Here we consider a page that allows the visitor to submit messages using Ajax without reloading the entire page. Using the LOAD helper, web2py provides a better mechanism for doing it than described here, which will be described in Chapter 12. Here we want to show you how to do it simply using jQuery. It contains a form "myform" and a "target" DIV. When the form is submitted, the server may accept it (and perform a database insert) or reject it (because it did not pass validation). The corresponding notification is returned with the Ajax response and displayed in the "target" DIV. Build a test application with the following model: db = DAL('sqlite://db.db') db.define_table('post', Field('your_message', 'text')) db.post.your_message.requires = IS_NOT_EMPTY() Notice that each post has a single field "your_message" that is required to be not-empty. Edit the default.py controller and write two actions: def index(): return dict() def new_post(): form = SQLFORM(db.post) if form.accepts(request, formname=None): return DIV("Message posted") elif form.errors: return TABLE(*[TR(k, v) for k, v in form.errors.items()]) The first action does nothing other than return a view. The second action is the Ajax callback. It expects the form variables in request.vars, processes them and returns DIV("Message posted") upon success or a TABLE of error messages upon failure. Now edit the "default/index.html" view: {{extend 'layout.html'}} <div id="target"></div> <form id="myform"> <input name="your_message" id="your_message" /> <input type="submit" /> </form> <script> jQuery('#myform').submit(function() { ajax('{{=URL('new_post')}}', ['your_message'], 'target'); return false; }); </script> Notice how in this example the form is created manually using HTML, but it is processed by the SQLFORM in a different action than the one that displays the form. The SQLFORM object is never serialized in HTML. SQLFORM.accepts in this case does not take a session and sets formname=None, because we chose not to set the form name and a form key in the manual HTML form. The script at the bottom of the view connects the "myform" submit button to an inline function which submits the INPUT with id="your_message" using the web2py ajax function, and displays the answer inside the DIV with id="target". Voting and rating Another Ajax application is voting or rating items in a page. Here we consider an application that allows visitors to vote on posted images. The application consists of a single page that displays the images sorted according to their vote. We will allow visitors to vote multiple times, although it is easy to change this behavior if visitors are authenticated, by keeping track of the individual votes in the database and associating them with the request.env.remote_addr of the voter. Here is a sample model: db = DAL('sqlite://images.db') db.define_table('item', Field('image', 'upload'), Field('votes', 'integer', default=0)) Here is the default controller: def list_items(): items = db().select(db.item.ALL, orderby=db.item.votes) return dict(items=items) def download(): return response.download(request, db) def vote(): item = db.item[request.vars.id] new_votes = item.votes + 1 item.update_record(votes=new_votes) return str(new_votes) The download action is necessary to allow the list_items view to download images stored in the "uploads" folder. The votes action is used for the Ajax callback. Here is the "default/list_items.html" view: {{extend 'layout.html'}} <form><input type="hidden" id="id" name="id" value="" /></form> {{for item in items:}} <p> <img src="{{=URL('download', args=item.image)}}" width="200px" /> <br /> Votes=<span id="item{{=item.id}}">{{=item.votes}}</span> [<span onclick="jQuery('#id').val('{{=item.id}}'); ajax('vote', ['id'], 'item{{=item.id}}');">vote up</span>] </p> {{pass}} When the visitor clicks on "[vote up]" the JavaScript code stores the item.id in the hidden "id" INPUT field and submits this value to the server via an Ajax request. The server increases the votes counter for the corresponding record and returns the new vote count as a string. This value is then inserted in the target item{{=item.id}} SPAN. Ajax callbacks can be used to perform computations in the background, but we recommend usingcronor a background process instead (discussed in chapter 4), since the web server enforces a timeout on threads. If the computation takes too long, the web server kills it. Refer to your web server parameters to set the timeout value. top
Ok The long detailed BS that began this all is below the long line. The resulting answer is here. Your static points are x,y coordinates with the x values and y values placed in seperate arrays (coorArrx and coorArrY respectively) make sure to never use a value = imgx or imy. # Random Bezier Curve using De Casteljau's algorithm # http://en.wikipedia.org/wiki/Bezier_curve # http://en.wikipedia.org/wiki/De_Casteljau%27s_algorithm # FB - 201111244 import random from PIL import Image, ImageDraw imgx = 500 imgy = 500 image = Image.new("RGB", (imgx, imgy)) draw = ImageDraw.Draw(image) def B(coorArr, i, j, t): if j == 0: return coorArr[i] return B(coorArr, i, j - 1, t) * (1 - t) + B(coorArr, i + 1, j - 1, t) * t # n = random.randint(3, 6) # number of control points n=4 #coorArrX = [] #coorArrY = [] #for k in range(n): # x = random.randint(0, imgx - 1) # y = random.randint(0, imgy - 1) # coorArrX.append(x) # coorArrY.append(y) coorArrX=[3,129,12,77] coorArrY=[128,52,12,491] # plot the curve numSteps = 10000 for k in range(numSteps): t = float(k) / (numSteps - 1) x = int(B(coorArrX, 0, n - 1, t)) y = int(B(coorArrY, 0, n - 1, t)) try: image.putpixel((x, y), (0, 255, 0)) except: pass # plot the control points cr = 3 # circle radius for k in range(n): x = coorArrX[k] y = coorArrY[k] try: draw.ellipse((x - cr, y - cr, x + cr, y + cr), (255, 0, 0)) except: pass image.show() =.........................................................................................=I am also something of a newcommer to all of this, and I REFUSE to look this up as I see it like you do...a learning experiencee. But as I look at this code I see something strange for k in range(n): x = (0, imgx - 1) y = (0, imgy - 1) coorArrX.append(x) coorArrY.append(y) Are you sure this part is correct? imgx is defined as 500 elsewhere, and n is 4.so this could read as for k in range(4): x = (0, 500 - 1) y = (0, 500 - 1) which (since these values never change at all in this code) means: x = (0, 499) y = (0, 499) on every pass.So each time they get to : coorArrX.append(x) coorArrY.append(y) They simply keep adding new copies of the same data to the array, so when it is done the array looks like this (internally) [(0, 499), (0, 499), (0, 499), (0,499)] What makes this more confusing, is that coorArrX and coorArrY are A) Identical, and B) identical in their basic parts(that is each element is identical). Therefore, when you get to this part of the code: # plot the control points cr = 3 # circle radius for k in range(n): x = coorArrX[k] y = coorArrY[k] try: draw.ellipse((x - cr, y - cr, x + cr, y + cr), (255, 0, 0)) except: pass and you substitute in the values in the arrays, you get: # plot the control points cr = 3 # circle radius for k in range(n): x = coorArrX[k] y = coorArrY[k] try: draw.ellipse(((0, 499) - 3, (0, 499) - 3, (0, 499) + 3, (0, 499) + 3), (255, 0, 0)) except: pass Now this is the part that controls the drawing of the curved segments for the plot, but I do not see how centering an elispe on those impossible coordinate sets can draw anything?! Broke down and did a copy paste test run. This code is purely bogus, either placed to dupe people into wasting time, or placed where OP found it for same reason. But it was fun trying!!
#2301 Le 28/10/2012, à 16:38 ynad Re : TVDownloader: télécharger les médias du net ! Re @11gjm la liste des correctifs, dans la dernière il y a 4h les deux nouveaux fichiers main.py (v 0.9.3) et PluzzDL.py qui permettent le changement url @+ Hors ligne #2302 Le 28/10/2012, à 16:56 11gjm Re : TVDownloader: télécharger les médias du net ! Bonjour , @ynad : Merci pour taréponse . Je vais aller voir . Cordialement . Hors ligne #2303 Le 28/10/2012, à 16:58 chaoswizard Re : TVDownloader: télécharger les médias du net ! @chaoswizard : penses à modifier , aussi , la version .exe . A l'occasion . Merci . Ouais, enfin, c'est encore un test : j'ai juste essayé de charger une seule vidéo >< Dernière modification par chaoswizard (Le 28/10/2012, à 16:59) Ubuntu ==> Debian ==> Archlinux Hors ligne #2304 Le 28/10/2012, à 18:16 duocore Re : TVDownloader: télécharger les médias du net ! je ne vois pas comment utiliser la commande: http://www.encodage.org/arte.php?http:/ … e_la_vidéo pour la video:http://www.wat.tv/video/saison-episode-malediction-5bflx_4dfnt_.html merci pour votre aide ps: je suis un noob pour la ligne de commande Hors ligne #2305 Le 28/10/2012, à 19:46 11gjm Re : TVDownloader: télécharger les médias du net ! Bonjour , @duocore : On recommence à zéro !!! Enfin presque . La solution serait d'utiliser "flvstreamer" . !!! POINT !!! . ( On ne va pas sur le site encodage.org , OK . Recherche de flux pour ARTE ) . Donc , soit tu as un RTMPdump (2.5) , soit tu installes FLVStreamer (v1.9 ou supérieure) . Ci-dessous un batch (modifié pour flv... avec LNX...) : flvstreamer -r "rtmpte://wske.wat.tv:1935/ondemand" -a "ondemand" -f "LNX 10,2,202,243" -s "http://www.wat.tv/images/v40/PlayerWat.swf" -p "http://www.wat.tv/video/saison-episode-malediction-5bflx_4dfnt_.html" -y "mp4:vod/H264-384x288/15/25/8931525.h264?bu=wat.tv&login=Haven-nt1&i=90.28.251.251&u=4e847b52ed42048286d2adcfbb2aa526&sum=c3787fe2936c236b1981b0b3967bb275" -o 8931525.h264.flv A+ . Hors ligne #2306 Le 28/10/2012, à 20:53 duocore Re : TVDownloader: télécharger les médias du net ! j'ai lancé la commande flvstreamer -r "rtmpte://wske.wat.tv:1935/ondemand" -a "ondemand" -f "LNX 10,2,202,243" -s "http://www.wat.tv/images/v40/PlayerWat.swf" -p "http://www.wat.tv/video/saison-episode- … dfnt_.html" -y "mp4:vod/H264-384x288/15/25/8931525.h264?bu=wat.tv&login=Haven-nt1&i=90.28.251.251&u=4e847b52ed42048286d2adcfbb2aa526&sum=c3787fe2936c236b1981b0b3967bb275" -o 8931525.h264.flv j'ai 2 erreurs error:rtmp server sent error error: rtmp server requested close je ne sais pas quoi faire d'autre Hors ligne #2307 Le 28/10/2012, à 21:16 11gjm Re : TVDownloader: télécharger les médias du net ! Re-... , > je ne sais pas quoi faire d'autre Moi non plus ; désolé . A+ . Hors ligne #2308 Le 28/10/2012, à 22:14 k3c Re : TVDownloader: télécharger les médias du net ! Pour moi c'est ok gg@gg-SATELLITE-L755:~$ rtmpdump -r "rtmpte://wske.wat.tv:1935/ondemand" -a "ondemand" -f "LNX 10,2,202,243" -s "http://www.wat.tv/images/v40/PlayerWat.swf" -p "http://www.wat.tv/video/saison-episode- … dfnt_.html" -y "mp4:vod/H264-384x288/15/25/8931525.h264?bu=wat.tv&login=Haven-nt1&i=90.28.251.251&u=4e847b52ed42048286d2adcfbb2aa526&sum=c3787fe2936c236b1981b0b3967bb275" -o 8931525.h264.flv RTMPDump v2.4 (c) 2010 Andrej Stepanchuk, Howard Chu, The Flvstreamer Team; license: GPL Connecting ... WARNING: HandShake: Type mismatch: client sent 6, server answered 9 INFO: Connected... ERROR: HandleCtrl: Ignoring SWFVerification request, use --swfVfy! Starting download at: 0.000 kB INFO: Metadata: INFO: duration 2434.90 INFO: moovPosition 40.00 INFO: width 640.00 INFO: height 360.00 INFO: videocodecid avc1 INFO: audiocodecid mp4a INFO: avcprofile 77.00 INFO: avclevel 30.00 INFO: aacaot 2.00 INFO: videoframerate 25.00 INFO: audiosamplerate 44100.00 INFO: audiochannels 2.00 INFO: tags: INFO: ©too Lavf54.32.101 INFO: trackinfo: INFO: length 31166464.00 INFO: timescale 12800.00 INFO: language eng INFO: sampledescription: INFO: sampletype avc1 INFO: length 107379232.00 INFO: timescale 44100.00 INFO: language fre INFO: sampledescription: INFO: sampletype mp4a 3963.010 kB / 49.52 sec (2.0%)^C Caught signal: 2, cleaning up, just a second... ERROR: RTMP_ReadPacket, failed to read RTMP packet body. len: 70017 4004.062 kB / 49.52 sec (2.0%) Hors ligne #2309 Le 28/10/2012, à 22:20 k3c Re : TVDownloader: télécharger les médias du net ! duocore tu devrais peut-être te démarrer une Ubuntu récente dans Qemu-kvm ou Virtual Box Tu réserves 5 Go pour cela. Tu télécharges l'iso qui te convient. Tu la lances, et tu as une distrib récente. Hors ligne #2310 Le 30/10/2012, à 00:56 duocore Re : TVDownloader: télécharger les médias du net ! j'ai installer la version 12.4 et utiliser la commande: rtmpdump -r "rtmpte://wske.wat.tv:1935/ondemand" -a "ondemand" -f "LNX 10,2,202,243" -s "http://www.wat.tv/images/v40/PlayerWat.swf" -p "http://www.wat.tv/video/saison-episode- … dfnt_.html" -y "mp4:vod/H264-384x288/15/25/8931525.h264?bu=wat.tv&login=Haven-nt1&i=90.28.251.251&u=4e847b52ed42048286d2adcfbb2aa526&sum=c3787fe2936c236b1981b0b3967bb275" -o 8931525.h264.flv cela commence tres bien, mais une fois arrivé a 17% : Error rtmp_readpacket, failed to read rtmp packet body. len: 72250 XX kb/XX sec(17%) download may be incomplete (downloaded about 17%, try resuming. et je voudrais savoir comment faites vous pour recuperer toutes les données apres rtmpdump merci de votre aide Hors ligne #2311 Le 30/10/2012, à 17:13 11gjm Re : TVDownloader: télécharger les médias du net ! Bonjour à tous , @duocore : Nous allons suivre une nouvelle procédure . Le flux fourni , est coupé au bout de 20% environ (???) . IL FAUT RELANCER LE "BATCH" ( 4 ou 5 fois ) , sans détruire ce qui a été téléchargé . ( NB: C'est à ça que sert l'instruction --resume à la fin de l'instruction ) . Ci-dessous , il y a les nouvelles instructions pour tes commandes ( sous Linux ) . rtmpdump -b 1000000000 -r "rtmpte://wske.wat.tv:1935/ondemand" -a "ondemand" -f "LNX 10,2,202,243" -W "http://www.wat.tv/images/v40/PlayerWat.swf?revision=4.1.80" -p "http://www.wat.tv/video/saison-episode-malediction-5bflx_4dfnt_.html" -y "mp4:vod/H264-384x288/15/25/8931525.h264?bu=wat.tv&login=Haven-nt1&i=90.28.251.251&u=4e847b52ed42048286d2adcfbb2aa526&sum=c3787fe2936c236b1981b0b3967bb275" -o 8931525.h264.flv --resume ( NB: Si , ça ne passe pas , essayer de transformer "rtmpte" en "rtmpe" ) . Je viens d'effectuer le téléchargement ( en 5 fois ) . C'est parfait . Mais , quelle galère !!! ====== @k3c : Il faut re-modifier le fichier "TF1 version 0.2 par k3c" . infos indiquées à : http://forum.ubuntu-fr.org/viewtopic.ph … #p11015781 La précédente mouture indiquait de transformer "rtmpte" en "rtmpe" , ( fonction 'replace' ) . Il s'avère que ceci n'est plus nécessaire . Voir exemple , ci-dessous : ----- http://www.wat.tv/video/saison-episode- … dfnt_.html --- rtmpte://wske.wat.tv/ondemand/mp4:vod/H264-384x288/15/25/8931525.h264 ----- Il apparaît , un autre soucis . Cela concerne la reprise du téléchargement ( interrompu ) . Peux-tu entrer une instruction afin de relancer la capture ? Cordialement . Édité à 17h38mn === ??? === Je viens de tester avec "captvty" ( sous Windows ) . J'ai obtenu "saison-2---episode-1-malédiction.flv" , sans coupure . Peut-être , était-ce le serveur qui nous jouait des tours (???) . =========== Dernière modification par 11gjm (Le 30/10/2012, à 18:39) Hors ligne #2312 Le 30/10/2012, à 19:02 k3c Re : TVDownloader: télécharger les médias du net ! @ 11 gjm #!/usr/bin/env python # -*- coding:utf-8 -*- # TF1 version 0.3 par k3c, correction de 11gjm import subprocess, optparse, re, sys, os, shlex import socket from urllib2 import Request, urlopen, URLError, HTTPError from urllib2 import ProxyHandler, Request, urlopen, URLError, HTTPError import time,md5,random,urllib2 import bs4 as BeautifulSoup timeout = 90 listeUserAgents = [ 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_5; fr-fr) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.20.1', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.27 Safari/525.13' , 'Mozilla/5.0 (X11; U; Linux x86_64; en-us) AppleWebKit/528.5+ (KHTML, like Gecko, Safari/528.5+) midori', 'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.107 Safari/535.1', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us) AppleWebKit/312.1 (KHTML, like Gecko) Safari/312', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.8 (KHTML, like Gecko) Chrome/17.0.940.0 Safari/535.8' ] def get_wat(id): ID_WAT = id def base36encode(number): if not isinstance(number, (int, long)): raise TypeError('number must be an integer') if number < 0: raise ValueError('number must be positive') alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' base36 = '' while number: number, i = divmod(number, 36) base36 = alphabet[i] + base36 return base36 or alphabet[0] ts = base36encode(int(time.time())) timesec = hex(int(ts, 36))[2:] while(len(timesec)<8): timesec="0"+timesec token = md5.new("9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba00912564/web/"+str(ID_WAT)+""+timesec).hexdigest() id_url1 = "http://www.wat.tv/get/web/"+str(ID_WAT)+"?token="+token+"/"+timesec+"&getURL=1&country=FR" return id_url1 def main(): # timeout en secondes socket.setdefaulttimeout(timeout) usage = "usage: python tmc_tf1.py [options] <url de l'emission>" parser = optparse.OptionParser( usage = usage ) parser.add_option( "--nocolor", action = 'store_true', default = False, help = 'desactive la couleur dans le terminal' ) parser.add_option( "-v", "--verbose", action = "store_true", default = False, help = 'affiche les informations de debugage' ) ( options, args ) = parser.parse_args() if( len( args ) > 2 or args[ 0 ] == "" ): parser.print_help() parser.exit( 1 ) the_url = sys.argv[1] html = urlopen(sys.argv[1]).read() nom = sys.argv[1].split('/')[-1:][0] no = nom.split('.')[-2:][0] soup = BeautifulSoup.BeautifulSoup(html) debut_id = str(soup.find('div', attrs={'class' : 'unique' })) id = [x.strip() for x in re.findall("mediaId :([^,]*)",debut_id)][0] id_url1 = get_wat(id) opener = urllib2.build_opener() opener.addheaders = [('User-agent', random.choice(listeUserAgents))] data = opener.open(id_url1).read() opener.close() if data[0:4] == 'http': ua = random.choice(listeUserAgents) arguments = 'curl "%s" -L -g -A "%s" -o "%s.mp4"' % (data, ua, no) process = subprocess.Popen(arguments, stdout=subprocess.PIPE, shell=True).communicate()[0] if data[0:4] == 'rtmp': host = re.search('rtmpte://(.*)/ondemand',data).group(1) host = host.replace('rtmpte','rtmpe') player = "http://www.wat.tv/images/v40/PlayerWat.swf?revision=4.0.992" data0 = re.search('rtmpte://(.*)h264',data).group(0) data1 = re.search('&u=(.*)&sum',data).group(1) data2 = re.search('&sum=(.*)$',data).group(1) data3 = data1+data2 app = re.search('wske.wat.tv/(.*)/mp4:vod',data).group(1) playpath = re.search('mp4:vod(.*).h264',data).group(0) # cmds = "rtmpdump"+" --host "+host+" -l 0 --app "+app+" --playpath "+playpath+" --swfVfy "+player+" --port 443 -e -k 1 --flv "+str(no)+".mp4" cmds = 'rtmpdump -r "%s" -c 1935 -m 10 -w 40ee2a765514facfc6b1eb2745d22cdbdfaaf7f1488b17600c1a22dd3074735d -x 339543 -o "%s.mp4" " --resume"' % (data0, str(no)) arguments = shlex.split( cmds ) print arguments process = subprocess.Popen( arguments, stdout = subprocess.PIPE).communicate()[0] if __name__ == "__main__": main() J'ai téléchargé http://videos.tmc.tv/las-vegas/saison-5 … 1-848.html et eu une vidéo de 41 mns 5 s, comme indiqué dans mediainfo. Hors ligne #2313 Le 30/10/2012, à 21:03 duocore Re : TVDownloader: télécharger les médias du net ! merci cela fonctionne et je voudrais savoir comment faites vous pour recuperer toutes les données apres rtmpdump merci de votre aide Hors ligne #2314 Le 30/10/2012, à 21:49 11gjm Re : TVDownloader: télécharger les médias du net ! Bonjour , @k3c : Constations suites aux modifs de "tf1_03.py" --- Écritures du batch --- python tf1_03.py url_de_la_video --- 1) Cela fonctionne . 2) Arrêt ( n'importe quand ??? ) . 3) Relance du process => ça écrase la vidéo enregistrée . ( blocage éventuel du nouveau process => suppression manuelle de la vidéo ) . Y-a-t'il une instruction à ajouter ? ===== J'ai créé un batch pour Windows ; ça fonctionne , mais même problèmes . ===== Donc , _ soit ma ligne n'est pas assez rapide ( 250 ko/s en DL ) . _ soit je n'ai pas assez de mémoire vive ( 1 Mo ) _ soit le serveur est saturé Merci , pour ton aide . Cordialement . Hors ligne #2315 Le 30/10/2012, à 22:06 k3c Re : TVDownloader: télécharger les médias du net ! @ duocore regarde ce thread http://stream-recorder.com/forum/use-rt … p+tutorial Hors ligne #2316 Le 30/10/2012, à 23:17 duocore Re : TVDownloader: télécharger les médias du net ! K3c j'ai regardé ton lien il s'agit de logiciels windows: url snopper,... j'ai essayer de voir si il y avais des logiciels linux je n'en ai pas trouvé Hors ligne #2317 Le 31/10/2012, à 06:48 k3c Re : TVDownloader: télécharger les médias du net ! @ duocore si, il y a du Linux flasm openssl tout part du player, qui est spécifique à chaque site. Hors ligne #2318 Le 31/10/2012, à 08:36 duocore Re : TVDownloader: télécharger les médias du net ! j'ai obtenu HMAC-SHA256(loaderexport.swf)= b18528111bda4516700df254576cd4b46061eb92ef2663ebd07da2d6240102f7 j'ai utilisé cette commande ./rtmpdump --swfhash "b18528111bda4516700df254576cd4b46061eb92ef2663ebd07da2d6240102f7" --swfsize 1927 pour cette video: http://videos.nt1.tv/haven/saison-2-epi … 1-846.html mais ne fonctionne pas. ensuite j'ai trouvé pyurlsnooper pour linux Hors ligne #2319 Le 31/10/2012, à 19:02 chaoswizard Re : TVDownloader: télécharger les médias du net ! j'ai obtenu HMAC-SHA256(loaderexport.swf)= b18528111bda4516700df254576cd4b46061eb92ef2663ebd07da2d6240102f7 Eu,h, j'ai pas tout suivi mais il y a une méthode pour récupérer les clefs HMAC enfouies au fond des lecteurs flash ?! Je veux !!! Ubuntu ==> Debian ==> Archlinux Hors ligne #2320 Le 31/10/2012, à 23:06 bibichouchou Re : TVDownloader: télécharger les médias du net ! salut, à chaoswizard : Félicitations pour la nouvelle mouture de pluzzdl (intégration des sous-titres). Ça fonctionne très bien. à k3c, 11gjm : Merci pour le code. Pour tmc ou tf1, je dois veillir le jeton d'autentification pour que cela fonctionne : j'ai remplacé ts = base36encode(int(time.time())) par ts = base36encode(int(time.time() + 60)) NB 1 : je ne peux pas faire marcher votre code tel quel mais comme ma distrib + ses paquets sont bloqués à l'époque 10.10, je pense que cela vient de moi. NB 2 : votre code devrait marcher pour nt1 (modulo 1 modif ou 2) qui met ses vidéos chez wat. à duocore : voilà une ligne de commande pour la viédo que tu mentionnes. Fontctionne chez moi avec rtmpdump v2.4. A utiliser rapidement. Pour la génération d'une telle ligne, voir le code de k3c/11gjm pour tf1/tmc. rtmpdump -e -r 'rtmpte://wske.wat.tv/ondemand/mp4:vod/H264-384x288/11/23/8921123.h264?bu=&login=Haven-nt1&u=68a27801372a38cb7ddb916301c61d63&sum=1398f2768bbce391058a8db6dfabaaa8' -o Saison_2_-_Episode_2_A_chacun_sa_peur.mp4 Amicalement Dernière modification par bibichouchou (Le 31/10/2012, à 23:57) Hors ligne #2321 Le 01/11/2012, à 11:09 k3c Re : TVDownloader: télécharger les médias du net ! @ bibichouchou Merci pour tes précisions, voici un script qui télécharge sur NT1 les deux premiers épisodes de Haven, mais pas http://www.nt1.tv/the-walking-dead/epis … 3-846.html faudra que je regarde pourquoi Edit : boulet inside, ce n'est pas une video... Ca marche pour http://videos.nt1.tv/enquetes-impossibl … 7-846.html #!/usr/bin/env python # -*- coding:utf-8 -*- # NT1 version 0.1 par k3c, correction de bibichouchou import subprocess, optparse, re, sys, os, shlex import socket from urllib2 import Request, urlopen, URLError, HTTPError from urllib2 import ProxyHandler, Request, urlopen, URLError, HTTPError import time,md5,random,urllib2 import bs4 as BeautifulSoup timeout = 90 listeUserAgents = [ 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_5; fr-fr) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.20.1', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.27 Safari/525.13' , 'Mozilla/5.0 (X11; U; Linux x86_64; en-us) AppleWebKit/528.5+ (KHTML, like Gecko, Safari/528.5+) midori', 'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.107 Safari/535.1', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us) AppleWebKit/312.1 (KHTML, like Gecko) Safari/312', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.8 (KHTML, like Gecko) Chrome/17.0.940.0 Safari/535.8' ] def get_wat(id): ID_WAT = id def base36encode(number): if not isinstance(number, (int, long)): raise TypeError('number must be an integer') if number < 0: raise ValueError('number must be positive') alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' base36 = '' while number: number, i = divmod(number, 36) base36 = alphabet[i] + base36 return base36 or alphabet[0] ts = base36encode(int(time.time())+60) timesec = hex(int(ts, 36))[2:] while(len(timesec)<8): timesec="0"+timesec token = md5.new("9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba00912564/web/"+str(ID_WAT)+""+timesec).hexdigest() id_url1 = "http://www.wat.tv/get/web/"+str(ID_WAT)+"?token="+token+"/"+timesec+"&getURL=1&country=FR" return id_url1 def main(): # timeout en secondes socket.setdefaulttimeout(timeout) usage = "usage: python tmc_tf1.py [options] <url de l'emission>" parser = optparse.OptionParser( usage = usage ) parser.add_option( "--nocolor", action = 'store_true', default = False, help = 'desactive la couleur dans le terminal' ) parser.add_option( "-v", "--verbose", action = "store_true", default = False, help = 'affiche les informations de debugage' ) ( options, args ) = parser.parse_args() if( len( args ) > 2 or args[ 0 ] == "" ): parser.print_help() parser.exit( 1 ) the_url = sys.argv[1] html = urlopen(sys.argv[1]).read() nom = sys.argv[1].split('/')[-1:][0] no = nom.split('.')[-2:][0] soup = BeautifulSoup.BeautifulSoup(html) debut_id = str(soup.find('section', attrs={'class' : 'player-unique' })) id = [x.strip() for x in re.findall("mediaId :([^,]*)",debut_id)][0] id_url1 = get_wat(id) opener = urllib2.build_opener() opener.addheaders = [('User-agent', random.choice(listeUserAgents))] data = opener.open(id_url1).read() opener.close() if data[0:4] == 'http': ua = random.choice(listeUserAgents) arguments = 'curl "%s" -L -g -A "%s" -o "%s.mp4"' % (data, ua, no) process = subprocess.Popen(arguments, stdout=subprocess.PIPE, shell=True).communicate()[0] if data[0:4] == 'rtmp': host = re.search('rtmpte://(.*)/ondemand',data).group(1) host = host.replace('rtmpte','rtmpe') player = "http://www.wat.tv/images/v40/PlayerWat.swf?revision=4.0.992" data0 = re.search('rtmpte://(.*)h264',data).group(0) data1 = re.search('&u=(.*)&sum',data).group(1) data2 = re.search('&sum=(.*)$',data).group(1) data3 = data1+data2 app = re.search('wske.wat.tv/(.*)/mp4:vod',data).group(1) playpath = re.search('mp4:vod(.*).h264',data).group(0) # cmds = "rtmpdump"+" --host "+host+" -l 0 --app "+app+" --playpath "+playpath+" --swfVfy "+player+" --port 443 -e -k 1 --flv "+str(no)+".mp4" cmds = 'rtmpdump -r "%s" -c 1935 -m 10 -w 40ee2a765514facfc6b1eb2745d22cdbdfaaf7f1488b17600c1a22dd3074735d -x 339543 -o "%s.mp4" " --resume"' % (data0, str(no)) arguments = shlex.split( cmds ) print arguments process = subprocess.Popen( arguments, stdout = subprocess.PIPE).communicate()[0] if __name__ == "__main__": main() Dernière modification par k3c (Le 01/11/2012, à 11:16) Hors ligne #2322 Le 01/11/2012, à 13:42 k3c Re : TVDownloader: télécharger les médias du net ! Ah au fait pour le script pour télécharger sur http://tou.tv, une personne sur neo-net a fait une remarque pertinente, que le format téléchargé était du flv et pas du mp4. Mediainfo le confirme. Il faut donc remplacer .mp4 par .flv dans le script, quand on crée la commande. Hors ligne #2323 Le 01/11/2012, à 21:30 11gjm Re : TVDownloader: télécharger les médias du net ! Bonjour à tous , @k3c : ===== http://www.nt1.tv/the-walking-dead/epis … 3-846.html ---- C'est normal , la diffusion n'est prévue que pour le 2 Nov. . ===== ===== http://videos.nt1.tv/enquetes-impossibl … 7-846.html --- ok , aussi pour captvty --- Ne fonctionne pas avec RTMPExploreX . J'ai pu récupérer les commandes : rtmpdump -e -r "rtmpe://wske.wat.tv:1935/ondemand" -a "ondemand" -f "WIN 11,4,402,265" -W "http://www.wat.tv/images/v40/PlayerWat.swf?revision=4.1.81" -p "http://videos.nt1.tv/enquetes-impossibles/emission-du-31-octobre-rediffusion-7613577-846.html" -y "mp4:vod/H264-384x288/63/11/8936311.h264?bu=nt1.tv&login=nt1-enquetes-impossibles&i=90.28.251.160&u=4e847b52ed42048286d2adcfbb2aa526&sum=b17cded51c339f983c9a4b80eb4ce4b4" -o 8936311.mp4 Les modifs effectuées dans le batch : _ ajouté "-e" _ ajouté "?revision=4.1.81" pour le player _ modifié le fichier final "8936311.mp4" au lieu de "8936311.h264.flv" ===== Je reviendrai pour expliquer mes PB , avec les fichiers tf1_03.py et nt1_01.py , qui ne fonctionnent plus ( pb de librairies ) . A+ . Hors ligne #2324 Le 01/11/2012, à 22:45 11gjm Re : TVDownloader: télécharger les médias du net ! Bonjour , 1er essai avec tf1_02x.py (fonction replace existante) et player "?revision=4.0.992" ====== Ligne 59 id = [x.strip() for x in re.findall("mediaId [^,]*)",debut_id)][0] IndexError : list index out of range ====== ====== Ligne 84 in module <main> , main() ====== 2ième essai avec tf1_03.py (fonction replace supprimée) et player "?revision=4.0.992" ====== Même constat ====== 3ième essai avec tf1_03_Pl.py (fonction replace supprimée) et player "?revision=4.1.181" ====== Même constat ====== A+ . Hors ligne #2325 Le 02/11/2012, à 10:24 k3c Re : TVDownloader: télécharger les médias du net ! @11gjm Bizarre, à ce niveau-là, on fait juste du bête parsing HTML Peux-tu faire python >>> from urllib2 import urlopen >>> import bs4 as BeautifulSoup >>> a = urlopen('http://videos.nt1.tv/haven/saison-2-episode-2-a-chacun-sa-peur-7598701-846.html').read() >>> soup = BeautifulSoup.BeautifulSoup(a) >>> ! la ligne suivante est valable uniquement pour NT1 >>> soup.find('section', attrs={'class' : 'player-unique' }) <section class="player-unique"><article> <meta content="Flash" itemprop="playerType"/> <meta content="PT2413S" itemprop="duration"/><meta content="2012-10-22T19:18:00+0200" itemprop="uploadDate"/><meta content="2012-11-02T22:15:00+0100" itemprop="expires"/><link content="/haven/saison-2-episode-2-a-chacun-sa-peur-7598701-846.html" itemprop="url"/><link content="/mmdia/i/56/2/les-mysteres-de-haven-saison-2-les-photos-de-l-episode-2-10794562sryxr.jpg?v=2" itemprop="thumbnailUrl"/> <meta content="628" itemprop="width"/><meta content="353" itemprop="height"/> <div id="FlashPlayer" style="background:#FFF; width:100%; height:353px"><div id="FlashPlayer2"></div></div><script charset="utf-8" src="http://s.wat.fr/js/v2.5/swfobject.js" type="text/javascript"></script><script charset="utf-8" src="http://s.wat.fr/js/v2.5/WATPlayer.js" type="text/javascript"></script><script type="text/javascript">if (!typeof(PWpreroll)) {var PWpreroll = '';}if ( typeof( sitepage ) != "undefined" ) {WATPlayer.showSyndicated({previewMedia : "http://s.nt1.tv/mmdia/i/48/0/10791480uosbb.jpg",mediaId : 8921123,isStartAd : true,ad20 : 1,modeWatMedia : true,autoStart : 1,recoTf1 : "http://www.nt1.tv/js/video/poursuite/0, … 1351242045",unvId : "846",conId : "4471687",chaId : "3427061",setVolume : "10",playlistIdCustom : "8806145",baseURL : "http://www.wat.tv",url : "http://www.wat.tv/swfnt1/239910nIc0K118921123",width : "100%",height : "100%",container : "FlashPlayer2", oasTag : sitepage, PWpreroll : PWpreroll});}else{WATPlayer.showSyndicated({previewMedia : "http://s.nt1.tv/mmdia/i/48/0/10791480uosbb.jpg",mediaId : 8921123,isStartAd : true,ad20 : 1,modeWatMedia : true,autoStart : 1,recoTf1 : "http://www.nt1.tv/js/video/poursuite/0, … 1351242045",unvId : "846",conId : "4471687",chaId : "3427061",setVolume : "10",playlistIdCustom : "8806145",baseURL : "http://www.wat.tv",url : "http://www.wat.tv/swfnt1/239910nIc0K118921123",width : "100%",height : "100%",container : "FlashPlayer2", PWpreroll : PWpreroll});}</script><script type="text/javascript">var permalinkWat = document.location.href;WATPlayer.setPermalink(permalinkWat);</script> </article></section> >>> 'mediaId' in str(soup.find('section', attrs={'class' : 'player-unique' })) True >>> ! avec tmc >>> a = urlopen('http://videos.tmc.tv/las-vegas/las-vegas-episode-6-saison-1-devoirs-de-vacances-7613614-848.html').read() >>> soup = BeautifulSoup.BeautifulSoup(a) >>> soup.find('div', attrs={'class' : 'unique'}) <div class="unique"> <meta content="Flash" itemprop="playerType"/><meta content="640" itemprop="width"/><meta content="360" itemprop="height"/> <div id="FlashPlayer" style="background:#FFF; width:100%; height:360px"><div id="FlashPlayer2"></div></div><script charset="utf-8" src="http://s.wat.fr/js/v2.5/swfobject.js" type="text/javascript"></script><script charset="utf-8" src="http://s.wat.fr/js/v2.5/WATPlayer.js" type="text/javascript"></script><script type="text/javascript">if (!typeof(PWpreroll)) {var PWpreroll = '';}if ( typeof( sitepage ) != "undefined" ) {WATPlayer.showSyndicated({previewMedia : "http://s.tmc.tv/mmdia/i/77/4/10794774emmvg.jpg",mediaId : 8936403,isStartAd : true,ad20 : 1,modeWatMedia : true,autoStart : 1,recoTf1 : "http://www.tmc.tv/js/video/poursuite/0, … 1351512528",unvId : "848",conId : "4481392",chaId : "3465098",setVolume : "10",playlistIdCustom : "5473071",baseURL : "http://www.wat.tv",url : "http://www.wat.tv/swftmc/243996nIc0K118936403",width : "100%",height : "100%",container : "FlashPlayer2", oasTag : sitepage, PWpreroll : PWpreroll});}else{WATPlayer.showSyndicated({previewMedia : "http://s.tmc.tv/mmdia/i/77/4/10794774emmvg.jpg",mediaId : 8936403,isStartAd : true,ad20 : 1,modeWatMedia : true,autoStart : 1,recoTf1 : "http://www.tmc.tv/js/video/poursuite/0, … 1351512528",unvId : "848",conId : "4481392",chaId : "3465098",setVolume : "10",playlistIdCustom : "5473071",baseURL : "http://www.wat.tv",url : "http://www.wat.tv/swftmc/243996nIc0K118936403",width : "100%",height : "100%",container : "FlashPlayer2", PWpreroll : PWpreroll});}</script><script type="text/javascript">var permalinkWat = document.location.href;WATPlayer.setPermalink(permalinkWat);</script> </div> >>> 'mediaId' in str(soup.find('div', attrs={'class' : 'unique'})) True Dernière modification par k3c (Le 02/11/2012, à 10:29) Hors ligne
Classes in Python By Jason Myers length = 3price = 5.99count = 0 name = "George"poem = "Roses are Red\nViolets are Blue"option = "n" discount = Trueshow_help = False (c)Tomo.Yun (www.yunphoto.net/en/) class Fish: pass >> Fish() <... Fish instance at ...> >> Fish() <... Fish instance at ...> >> a = Fish() >> b = Fish() >> a == b False class Fish:     breathes_in_water = True skin = "scales" >> myfish = Fish() >> myfish.skin "scales" >> if myfish.breathes_in_water:... print "Glug glug!" Glug glug! class Fish: def move(self, speed): print "swimming %s!" % speed >> myfish = Fish() >> myfish.move("fast") swimming fast! ... def move(self, speed): ... myfish.move("fast") class Fish: def move(self, speed): print self.name + " is moving " + speed + "!" >> myfish = Fish()>> myfish.name = "Spencer">> myfish.move("fast")Spencer is moving fast! >> myfish = Fish()>> myfish.name = "Spencer">> myfish.color = "Gold" >> myotherfish = Fish()>> myotherfish.name = "Susan">> myotherfish.color = "Blue" class Fish: def __init__(self): print "Fish init!" >> myfish = Fish()Fish init! >> Fish()Fish init! class Fish: def __init__(self, fish_name): self.name = fish_name >> myfish = Fish("Spencer")>> myfish.name"Spencer" class Fish: breathes_in_water = True skin = "scales" def __init__(self, fish_name, fish_color): self.name = fish_name self.color = fish_color def move(self, speed): print self.name + " is moving " + speed + "!" >> spencer = Fish("Spencer", "Gold")>> print spencer.move("slowly")Spencer is moving slowly! class Fish: def __init__(self, species): self.species = species spencer = Fish("Goldfish")susan = Fish("Flounder") class Goldfish: breathes_in_water = True skin = "scales" def move(self, speed): print "Swimming upright %s!" % speed class Flounder: breathes_in_water = True skin = "scales" def move(self, speed): print "Swimming sideways %s!" % speed class Fish: skin = "scales" class Goldfish(Fish): # <- Fish specified as parent def move(self, speed): print "Moving %s" % speed >> spencer = Goldfish()>> spencer.skin # <- So it has "skin" from Fish"scales" class Fish: color = "Blue" # Default fish color is Blue skin = "scales" class Flounder(Fish): pass # Inherits color = "Blue" from parent class Goldfish(Fish): color = "Gold" # Override color for Goldfish >> susan = Flounder()>> susan.color"Blue">> spencer = Goldfish()>> spencer.color"Gold" class Fish: pass class Goldfish(Fish): pass class Flounder(Fish): pass >> spencer = Goldfish()>> isinstance(spencer, Fish)True>> isinstance(spencer, Goldfish)True>> isinstance(spencer, Flounder)False class Fish(object): pass class Goldfish(Fish): pass class Aquarium(object): fish = [] def __init__(self, fish): self.fish = fish class Fish(object): color = "Blue" def __init__(self, name): self.name = name class Goldfish(Fish): color = "Gold" class Flounder(Fish): pass >> my_fish = [Goldfish("Spencer"), Goldfish("Vladimir"), Flounder("Susan")]>> my_aquarium = Aquarium(my_fish)>> for fish in my_aquarium.fish:... print fish.name class Aquarium(object): ... def feed(self, food): for fish in self.fish: fish.eat(food) class Fish(object): ... def eat(self, food): print self.name + " is eating " + food + "!" class Goldfish(Fish): pass >> my_aquarium = Aquarium([Goldfish("Spencer"), Goldfish("Susan")]) >> my_aquarium.feed("flakes")Spencer is eating flakes!Susan is eating flakes! By Jason Myers
ps1a.py #lists prime numbers up to the 1000th prime def testPrime(x): factor = 2 while factor**2 <= x: if x % factor == 0: return False else: factor = factor + 1 return True candidate = 3 numPrime = 1 while numPrime < 1000: if testPrime(candidate): numPrime = numPrime + 1 candidate = candidate + 2 print "The 1000th prime number is", candidate - 2 ps1b.py #asks for an upper limit #calculates all primes and the sum of their logs up to the upper limit #prints out the sum of the logs, the upper limit, #and the ratio between the two from math import * def testPrime(x): factor = 2 while factor**2 <= x: if x % factor == 0: return False else: factor = factor + 1 return True candidate = 3 primeLogSum = log(2) n = int(raw_input("What is n? ")) while candidate < n: if testPrime(candidate): primeLogSum = primeLogSum + log(candidate) candidate = candidate + 2 print primeLogSum, n, primeLogSum/n
I have a list: list1=[] the length of the list is undetermined so I am trying to append objects to the end of list1 like such: for i in range(0, n): list1=list1.append([i]) But my output keeps giving this error: AttributeError: 'NoneType' object has no attribute 'append' Is this because list1 starts off as an empty list? How do I fix this error?
Django Vanilla Views Beautifully simple class-based views. Author: Tom Christie. Follow me on Twitter, here. View --+------------------------- RedirectView | +-- GenericView -------+-- TemplateView | | | +-- FormView | +-- GenericModelView --+-- ListView | +-- DetailView | +-- CreateView | +-- UpdateView | +-- DeleteView Django's generic class-based view implementation is unneccesarily complicated. Django vanilla views gives you exactly the same functionality, in a vastly simplified, easier-to-use package, including: No mixin classes. No calls to super(). A sane class hierarchy. A stripped down API. Simpler method implementations, with less magical behavior. Remember, even though the API has been greatly simplified, everything you're able to do with Django's existing implementation is also supported in django-vanilla-views. Although note that the package does not yet include the date based generic views. If you believe you've found some behavior in Django's generic class-based views that can't also be trivially achieved in django-vanilla-views, then please open a ticket, and we'll treat it as a bug. To review the full set of API differences between the two implementations, please see the migration guide for the base views, and the model views. For further background, the original release announcement for django-vanilla-views is available here. There are also slides to a talk 'Design by minimalism' which introduces django-vanilla-views and was presented at the Django User Group, London. You can also view the Django class hierarchy for the same set of views that django-vanilla-views provides, here. Helping you to code smarter Django Vanilla Views isn't just easier to use. I'd contest that because it presents fewer points of API to override, you'll also end up writing better, more maintainable code as a result. You'll be working from a smaller set of repeated patterns throughout your projects, and with a much more obvious flow control in your views. As an example, a custom view implemented against Django's CreateView class might typically look something like this: from django.views.generic import CreateView class AccountCreateView(CreateView): model = Account def get_success_url(self): return self.object.account_activated_url() def get_form_class(self): if self.request.user.is_staff: return AdminAccountForm return AccountForm def get_form_kwargs(self): kwargs = super(AccountCreateView, self).get_form_kwargs() kwargs['owner'] = self.request.user return kwargs def form_valid(self, form): send_activation_email(self.request.user) return super(AccountCreateView, self).form_valid(form) Writing the same code with django-vanilla-views, you'd instead arrive at a simpler, more concise, and more direct style: from vanilla import CreateView from django.http import HttpResponseRedirect class AccountCreateView(CreateView): model = Account def get_form(self, data=None, files=None, **kwargs): user = self.request.user if user.is_staff: return AdminAccountForm(data, files, owner=user, **kwargs) return AccountForm(data, files, owner=user, **kwargs) def form_valid(self, form): send_activation_email(self.request.user) account = form.save() return HttpResponseRedirect(account.account_activated_url()) Requirements Django: 1.3, 1.4, 1.5, 1.6 Python: 2.6, 2.7, 3.2, 3.3 Installation Install using pip. pip install django-vanilla-views Usage Import and use the views. from vanilla import ListView, DetailView For example: from django.core.urlresolvers import reverse_lazy from example.notes.models import Note from vanilla import CreateView, DeleteView, ListView, UpdateView class ListNotes(ListView): model = Note class CreateNote(CreateView): model = Note success_url = reverse_lazy('list_notes') class EditNote(UpdateView): model = Note success_url = reverse_lazy('list_notes') class DeleteNote(DeleteView): model = Note success_url = reverse_lazy('list_notes') Compare and contrast To help give you an idea of the relative complexity of django-vanilla-views against Django's existing implementations, let's compare the two. Inheritance hierachy, Vanilla style. The inheritance hierarchy of the views in django-vanilla-views is trivial, making it easy to figure out the control flow in the view. CreateView --> GenericModelView --> View Total number of source files: 1 (model_views.py) Inheritance hierachy, Django style. Here's the corresponding inheritance hiearchy in Django's implementation of CreateView. +--> SingleObjectTemplateResponseMixin --> TemplateResponseMixin |CreateView --+ +--> ProcessFormView --> View | | +--> BaseCreateView --+ | +--> FormMixin ----------+ +--> ModelFormMixin --+ +--> ContextMixin +--> SingleObjectMixin --+ Calling hierarchy, Vanilla style. Let's take a look at the calling hierarchy when making an HTTP GET request to CreateView. CreateView.get() | +--> GenericModelView.get_form() | | | +--> GenericModelView.get_form_class() | +--> GenericModelView.get_context_data() | | | +--> GenericModelView.get_context_object_name() | +--> GenericModelView.render_to_response() | +--> GenericModelView.get_template_names() Total number of code statements covered: ~40 Calling hierarchy, Django style. Here's the equivalent calling hierarchy in Django's CreateView implementation. BaseCreateView.get() | +--> ProcessFormView.get() | +--> ModelFormMixin.get_form_class() | | | +--> SingleObjectMixin.get_queryset() | +--> FormMixin.get_form() | | | +--> ModelFormMixin.get_form_kwargs() | | | | | +--> FormMixin.get_form_kwargs() | | | +--> FormMixin.get_prefix() | | | +--> FormMixin.get_initial() | +--> ModelFormMixin.get_context_data() | | | +--> SingleObjectMixin.get_context_object_name() | | | +--> SingleObjectMixin.get_context_data() | | | +--> SingleObjectMixin.get_context_object_name() | | | +--> ContextMixin.get_context_data() | +--> TemplateResponseMixin.render_to_response() | +--> SingleObjectTemplateResponseMixin.get_template_names() | +--> TemplateResponseMixin.get_template_names() Total number of code statements covered: ~70 Example project This repository includes an example project in the example directory. You can run the example locally by following these steps: git clone git://github.com/tomchristie/django-vanilla-views.git cd django-vanilla-views/example # Create a clean virtualenv environment and install Django virtualenv env source env/bin/activate pip install -r requirements.txt # Ensure the local copy of the 'vanilla' pacakge is on our path export PYTHONPATH=..:. # Run the project python ./manage.py syncdb --noinput python ./manage.py runserver Open a browser and navigate to http://127.0.0.1:8000. Once you've added a few notes you should see something like the following: License Copyright © Tom Christie. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
i get the following error when trying a script thaat sends mail import urllib.request import re import smtplib from email.mime.text import MIMEText from bs4 import BeautifulSoup page=urllib.request.urlopen("http://www.crummy.com/") soup=BeautifulSoup(page) v=soup.findAll('a',href=re.compile('http://www.crummy.com/2012/07/24/0')) for link in v: w=link.get('href') server = smtplib.SMTP( "smtp.gmail.com", 587 ) server.starttls() server.login( 'xxxxxxxxxxx', 'xxxxxxx' ) server.sendmail( 'xxxxxxxxx', 'xxxxxxxxx', "bonus question is up" ) Traceback (most recent call last): File "C:\Python32\bonus", line 14, in server = smtplib.SMTP( "smtp.gmail.com", 587 ) File "C:\Python32\lib\smtplib.py", line 259, ininitFile "C:\Python32\lib\smtplib.py", line 319, in connect self.sock = self._get_socket(host, port, self.timeout) File "C:\Python32 \lib\smtplib.py", line 294, in _get_socket return socket.create_connection((host, port), timeout) File "C:\Python32\lib\socket.py", line 386, in create_connection for res in getaddrinfo(host, port, 0, SOCK_STREAM): socket.gaierror: [Errno 11004] getaddrinfo failed plse advice on the best way to go round it
cmarcx Re : [Script] Client Hubic pour linux ;) Bonjour, J'ai essayé de monter hubic avec la commande : ./hubicmount -l <email> -p <passwd> -o umask=022 -o uid=1000 /media/hubic Mais j'obtiens toujours l'erreur "Unable to login to hubic". Pourtant les logins indiqués sont bons, je fais un copier-coller, et ils fonctionnent avec le client d'Hubic... Je dois changer quelque chose ? oui dans certains cas il faut utiliser le nic-handle au lieu du login perso (email) Hors ligne Jarodd Re : [Script] Client Hubic pour linux ;) Merci cmarcx, je vais réessayer alors ! Fixe : 12.04 LTS Sony Vaio : 14.04 LTS / Windows 8.1 Hors ligne René B Re : [Script] Client Hubic pour linux ;) Oui, il faut installer libneon27-gnutls-dev pour contourner le probleme... (j'y ai eu recours en reponse ici au 15e message) J'ai essayé d'installer libneon27-gnutls-dev Il me demande de supprimer libgnutls-dev et quand j'essaye de le faire il m'envoie le message libneon27-gnutls-dev: Dépend : libgnutls-dev mais ne doit pas être installé Je suis sous Ubuntu 12.04 64bits Merci de votre aide Hors ligne Jarodd Re : [Script] Client Hubic pour linux ;) Bonjour, Lorsque j'essaye de lancer le client avec la commande hubicmount, j'obtiens cette erreur : ## error: could not mount remote server 'https://cloudnas1.ovh.com/edab6f9344c9b4cebca156db9ffbeeb4/moi'. reason: 301 Moved Permanently to 'https://cloudnas1.ovh.com/edab6f9344c9b4cebca156db9ffbeeb4/moi/'. Je ne comprend pas ce que cela signifie Ca vient de la commande ou de chez Hubic ? Fixe : 12.04 LTS Sony Vaio : 14.04 LTS / Windows 8.1 Hors ligne thereelneo Re : [Script] Client Hubic pour linux ;) Je viens de tester tes scripts, ça marche niquel, et c'est quand même plus agréable que le client linux hubic. Merci ! Hors ligne Jarodd Re : [Script] Client Hubic pour linux ;) Suis-je le seul à avoir une erreur avec ce script ? Fixe : 12.04 LTS Sony Vaio : 14.04 LTS / Windows 8.1 Hors ligne martyjeu Re : [Script] Client Hubic pour linux ;) Non, moi aussi j'ai une erreur : alain@alainMini-110c-1000:/usr/bin$ ./hubicmount -l xxxxxxxx -p xxxxxxxxx -o umask=022 -o uid=1000 HOME/hubic ## error: could not mount remote server 'https://cloudnas1.ovh.com/35263b08f362e … d90050146/'. reason: 502 Bad Gateway. quelqu'un connait une solution ?:mad: merci ! Hors ligne Couseb Re : [Script] Client Hubic pour linux ;) Hello J'ai essayé ce script avec mon compte créé il y a plusieurs mois, ça fonctionne bien. Mais avec les identifiants de mes parents, dont le compte a été créé cette semaine, je ne peux plus récupérer les paramètres de WebDav. Ni avec ce script, ni celui d'origine de protocol-hacking. L'auteur saurait-il étudier s'il y a eu modification pour les nouveaux comptes et modifier le script en conséquence ? Merci d'avance Hors ligne cosmobob Re : [Script] Client Hubic pour linux ;) salut, il semble qu'ovh est en train de migrer vers une autre plateforme sans webdav :s donc ca risque de ne plus marcher! en attente de ce qu'ils proposent pour voir Hors ligne Jarodd Re : [Script] Client Hubic pour linux ;) Quelles sont tes sources sur ce changement ? Dernière modification par Jarodd (Le 04/11/2012, à 12:28) Fixe : 12.04 LTS Sony Vaio : 14.04 LTS / Windows 8.1 Hors ligne temporaire8864 Re : [Script] Client Hubic pour linux ;) Un script plus recent pour obtenir URL, login et mot de passe pour connexion WebDAV: http://pastebin.com/LCHjJHh2 Dernière modification par temporaire8864 (Le 30/11/2012, à 16:02) Hors ligne dawabz94 Re : [Script] Client Hubic pour linux ;) Un script plus recent pour obtenir URL, login et mot de passe pour connexion WebDAV: http://pastebin.com/LCHjJHh2 Merveilleux ce script, il m'a retourné les infos webdav, j'ai paramétré mon appli de synchro mobile avec les informations retournées, et le login a été validé j'ai pu me connecter et parcourir mon arborescence Je ne connais pas - par contre la durée de validité du login/mot de passe cryptés retourné Mais l'information retournée par ce script est valide; merci ! Pour trace, je colle ici le code du script également Merci merci merci #!/usr/bin/python # -*- coding: utf-8 -*- import httplib, urllib class config: ovh_host = "ws.ovh.com" def get_webdav_info(login, password): ws_info={} null = None # Log into CloudNAS with nasLogin and retrieve a session ID print 'Logging into CloudNAS to retrieve a session ID...' params = 'session=&params={"email":"%s","password":"%s"}' % (urllib.quote(login), urllib.quote(password)) headers = {"Content-type": 'application/x-www-form-urlencoded', "User-Agent": 'hubiC/0.4.8 beta (Windows NT 6.1; fr_FR)'} conn = httplib.HTTPSConnection(config.ovh_host) conn.request("POST", "/cloudnas/r3/ws.dispatcher/nasLogin", params, headers) resp = conn.getresponse() s = resp.status r = resp.reason data = resp.read() try: d = eval(data) sid = d['answer']['id'] except KeyError: return ws_info finally: conn.close() print '# SID:', sid # POST on getNas using session ID to get URL print 'Retrieving user-specific URL for WebDAV...' params = 'session=%s' % sid conn = httplib.HTTPSConnection(config.ovh_host) conn.request("POST", "/cloudnas/r3/ws.dispatcher/getNas", params, headers) resp = conn.getresponse() s = resp.status r = resp.reason data = resp.read() try: d = eval(data) ws_info['url'] = d['answer'][0]['url'] except KeyError : return ws_info finally: conn.close() print '# URL:', ws_info['url'] # POST on getCredentials using session ID to get credentials print 'Retrieving user-specific credentials for WebDAV...' params = 'session=%s' % sid conn = httplib.HTTPSConnection(config.ovh_host) conn.request("POST", "/cloudnas/r3/ws.dispatcher/getCredentials", params, headers) resp = conn.getresponse() s = resp.status r = resp.reason data = resp.read() try: d = eval(data) ws_info['login'] = d['answer']['username'] ws_info['passwd'] = d['answer']['secret'] except KeyError: return ws_info finally: conn.close() print '# Ok' return ws_info if __name__ == '__main__': import sys import getpass import os.path login = raw_input("Login: ") password = getpass.getpass() ws_info = get_webdav_info(login, password) print ''' URL: %(url)s login: %(login)s password: %(passwd)s ''' % ws_info Hors ligne
There are dozens of other WP installs on the same server, subject to the same ModSec rules, and none of them has a problem. This is the only WP install that has jetpack installed and it only occurs when JetPack is active. The problem is as follows: Some actions on the post/page edit screen (so far it seems to be the ones that don't refresh the URL, like UPDATE or configure featured image) show me the following alert:http://i.imgur.com/ULPiX.png The logs show: Received From: kvm1->/var/log/httpd/error_log Rule: 1002 fired (level 2) -> "Unknown problem somewhere in the system." Portion of the log(s): body.xml:1: parser error : Document is empty ModSecurity: XML parser error: XML: Failed parsing document. [hostname "www.mydomain.org"] [uri "/wp-admin/admin-ajax.php"] [unique_id "blablah"] ModSecurity: [file "/etc/httpd/modsecurity.d/00_asl_z_antievasion.conf"] [line "36"] [id "330791"] [msg "Failed to parse request body. This may be an impedence mismatch attack, a broken application or a broken connection. This is not a false positive. Check your application or client for errors."] [data "XML parser error: XML: Failed parsing document."] [severity "CRITICAL"] Access denied with code 403 (phase 2). Match of "eq 0" against "REQBODY_ERROR" required. [hostname "www.mydomain.org"] [uri "/wp-admin/admin-ajax.php"] [unique_id "blablah"] Jetpack is 1.7 WP is 3.4.1 Apache 2.2.15 PHP fCGId 5.3.3 Any ideas? The WP forum has been unresponsive so far. I've sent an email to JetPack support.
obelix Ajouter les backports ! bonjour... je voudrais ajouter les backsports comme indiquer http://wiki.ubuntu-fr.org/installation/depots mais cela plante avec tous les backports une idee ? merci de votre aide ftp://ftp2.caliu.info/backports/dists/breezy-backports/main/binary-i386/Packages.gz: Impossible de récupérer le fichier, le serveur a répondu « Failed to open file. » ftp://ftp2.caliu.info/backports/dists/breezy-backports/universe/binary-i386/Packages.gz: Impossible de récupérer le fichier, le serveur a répondu « Failed to open file. » ftp://ftp2.caliu.info/backports/dists/breezy-backports/multiverse/binary-i386/Packages.gz: Impossible de récupérer le fichier, le serveur a répondu « Failed to open file. » ftp://ftp2.caliu.info/backports/dists/breezy-backports/restricted/binary-i386/Packages.gz: Impossible de récupérer le fichier, le serveur a répondu « Failed to open file. » obelix Re : Ajouter les backports ! une autre question.. j'ai pu rajouter les PLF mais synaptic me dit attention" paquets non authneitfie ??? pourquoi ? merci yeKcim Re : Ajouter les backports ! Pour plf > Normal, les paquets ne sont pas soutenu par l'equipe de dev de ubuntu, les paquets ne sont pas certifié par ubuntu, ils ne sont pas responsables. Participer à un projet libre est un jeu... Et toi, à quoi tu joues ? http://yeknan.free.fr Hors ligne AlexandreP Re : Ajouter les backports ! Les backports pour Breezy ne sont pas encore ouverts. «La capacité d'apprendre est un don; La faculté d'apprendre est un talent; La volonté d'apprendre est un choix.» -Frank Herbert 93,8% des gens sont capables d'inventer des statistiques sans fournir d'études à l'appui. Hors ligne bernez Re : Ajouter les backports ! J'ai rajouté dans mon sources.list les lignes : deb http://antesis.freecontrib.org/mirrors/ubuntu/plf/ breezy free non-free deb-src http://antesis.freecontrib.org/mirrors/ubuntu/plf/ breezy free non-free en attendant l'ouverture de backports de Breezy et pour avoir certains paquetages tels que : realplay_10.0.6.776-1plf1_i386.deb, skype_1.2.0.18-1_i386.deb, sun-j2sdk1.5_1.5.0+update05_i386.deb, w32codecs_20050412-1plf4_i386.deb, libdvdcss2_1.2.9-1ubuntu2_i386.deb Paquetages utiles parfois vyé kanari ka fè bonsoup. Kenavo. A galon ! Hors ligne bernez Re : Ajouter les backports ! merci Valère vyé kanari ka fè bonsoup. Kenavo. A galon ! Hors ligne
Below is a small snippet the illustrates the problem I'm having related to the size of the cube used in an Axes3D instance from matplotlib and the cutting off of axis labels. While I can change the background color of the figure canvas pretty easily, this still causes the text located on the labels to become distorted. Does anyone have an idea how best to change the actual size of the 3D box used to plot data? Simply changing the subplot dimensions doesn't seem to help. This behavior is best illustrated by running the code. Once plotted, the date formatted text changes in color with one end becoming slightly darker. Any help is appreciated. from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import numpy as np import matplotlib.dates as dates import datetime, random import matplotlib.ticker as ticker def random_date(): date = datetime.date(2008, 12,01) while 1: date += datetime.timedelta(days=30) yield (date) def format_date(x, pos=None): return dates.num2date(x).strftime('%Y-%m-%d') #use FuncFormatter to format dates r_d = random_date() some_dates = [dates.date2num(r_d.next()) for i in range(0,20)] fig = plt.figure(facecolor = 'k') ax = fig.add_subplot(111, projection='3d') fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9) for c, z in zip(['r', 'g', 'b', 'y'], [30, 20, 10, 0]): xs = np.array(some_dates) ys = np.random.rand(20) ax.bar(xs, ys, zs=z, zdir='y', color=c, alpha=0.8,width=8) ax.w_xaxis.set_major_locator(ticker.FixedLocator(some_dates)) # I want all the dates on my xaxis ax.w_xaxis.set_major_formatter(ticker.FuncFormatter(format_date)) for tl in ax.w_xaxis.get_ticklabels(): # re-create what autofmt_xdate but with w_xaxis tl.set_ha('right') tl.set_rotation(30) ax.set_ylabel('Series') ax.set_zlabel('Amount') plt.show()
I am interested in whether there is a way to introspect a Python instance infallibly to see its __dict__ despite any obstacles that the programmer might have thrown in the way, because that would help me debug problems like unintended reference loops and dangling resources like open files. A simpler example is: how can I see the keys of a dict subclass if the programmer has hidden keys() behind a class of its own? The way around that is to manually call the dict keys() method instead of letting inheritance call the subclass's version of the method: # Simple example of getting to the real info # about an instance class KeyHidingDict(dict): def keys(self): return [] # there are no keys here! khd = KeyHidingDict(a=1, b=2, c=3) khd.keys() # drat, returns [] dict.keys(khd) # aha! returns ['a', 'b', 'c'] Now my actual question is, how can I see the __dict__ of an instance, no matter what the programmer might have done to hide it from me? If they set a __dict__ class variable then it seems to shadow the actual __dict__ of any objects inherited from that class: # My actual question class DunderDictHider(object): __dict__ = {'fake': 'dict'} ddh = DunderDictHider() ddh.a = 1 ddh.b = 2 print ddh.a # prints out 1 print ddh.__dict__ # drat, prints {'fake': 'dict'} This false value for __dict__ does not, as you can see, interfere with actual attribute setting and getting, but it does mislead dir() by hiding a and b and displaying fake as the object's instance variable instead. Again, my goal is to write a tool that helps me introspect class instances to see “what is really going on” when I am wondering why a set of class instances is taking so much memory or holding so many files open — and even though the situation above is extremely contrived, finding a way around it would let the tool work all the time instead of saying “works great, unless the class you are looking at has… [description of the exceptional situation above].” I had thought I would be able to infallibly grab the __dict__ with something like: dict_descr = object.__dict__['__dict__'] print dict_descr(ddh, DunderDictHider) But it turns out that object does not have a __dict__ descriptor. Instead, the subtype_dict() C function seems to get separately attached to each subclass of object that the programmer creates; there is no central way to name or fetch the descriptor so that it can be manually applied to objects whose class shadows it. Any ideas, anyone? :)
I have a simple command-line binary program hello which outputs to STDOUT: What is your name? and waits for the user to input it. After receiving their input it outputs: Hello, [name]! and terminates. I want to use Python to run computations on the final output of this program ("Hello, [name]!"), however before the final output I want the Python script to essentially "be" the binary program. In other words I'd like Python to forward all of the prompts to STDOUT and then accept the user's input and give it to the program. However I want to hide the final output so I can process it and show my own results to the user. I do not want to replicate the hello's behavior in the script, as this simple program is a stand-in for a more complex program that I am actually working with. I was hoping there would be some sort of mechanic in subprocess where I would be able to do something akin to: while process.is_running(): next_char = process.stdout.read(1) if next_char == input_prompt_thing: # somehow check if the program is waiting for input user_input = raw_input(buffer) process.stdin.write(user_input) else: buffer += next_char I have been playing with subprocess and essentially got as far as realizing I could use process.stdout.read(1) to read from the program before it began blocking, but I can't figure out how to break this loop before the process blocks my Python script. I am not too familiar with console I/O and it is not an area of much expertise for me, so I am starting to feel pretty lost. I appreciate any help!
Hej, chciałem poruszyć kwestię walidacji plików konfiguracyjnych. W aplikacji nad którą teraz siedzę, mimo, że została zaprojektowana dopiero w 1/3, mój plik konfiguracyjny naprawdę się rozrósł. Na chwilę obecną korzystam z takiej fajnej opcji jak łączenie słowników - w kodzie przechowuję słownik ze wszystkimi kluczami jakie można wykorzystać w pliku konfiguracyjnym i domyślnymi wartościami. Wczytuję plik konfiguracyjny, tworzę z niego słownik i łączę go z domyślnym. Jednak potrzebuję czegoś jeszcze, do walidacji wartości kluczy. Załóżmy, że mamy klucz foo, a jego wartość może być True lub False. Użytkownik wpisując w pliku konfiguracyjnym wartość 'bar' może zepsuć jakąś ładną funkcję. Cały plik konfiguracyjny musi być zweryfikowany na samym starcie aplikacji. Opisywanie każdej party klucz-wartość IFami nie za specjalnie mi podchodzi, bo jest to ogromna ilość par klucz-wartość. Regexy też nie są fajną opcją do tego. Czy w ogóle jest sens walidować plik konfiguracyjny, a jeśli tak to dlaczego? zadane Żadnego ciekawego, gotowego narzędzia nie znalazłem, więc pomyślałem i naskrobałem własne. Najpier definiujemy przykładowy słownik: test = {'foo' : 'bar', 'fo1' : { 'ba1' : 'ba2'} } W przypadku gdy chcemy dany klucz walidować, zamiast podawać parę key - value podajemy key - tuple(value, lambda). Wygląda to tak: default = {'foo' : ('bar', lambda v: v == 'bar' or 'dwabary'), 'fo1' : { 'ba1' : (lambda v: True if v == 'ba1' and default['foo'][0] == 'dwabary')} } Funkcja walidująca wygląda tak: def check_values(loaded_config, level = []): for item in loaded_config.iteritems(): key, value = item[0], item[1] if value.__class__ == dict: check_values(value, level + [key]) else: tmp_dict = self.default.copy() for l in level: tmp_dict = tmp_dict[l] dvalue = tmp_dict[key] if dvalue.__class__ == tuple and dvalue[1](dvalue[0]): print True // Tutaj wpisujemy co ma zostać wykonane jeśli // wartość jest poprawna elif dvalue.__class__ != tuple: print True // Tutaj to samo else: print False check_values(wczytany_słownik) W kodzie musimy mieć self.default jako słownik z domyślnymi wartościami, w którym umieszczamy lambdę. Nie zostanie wyrzucony błąd jeśli wartość klucza nie będzie (value, lambda) - wtedy nie zostanie podjęta żadna akcja. Oczywiście nie ma problemu by rozwinąć tę funkcję, by przekazywać jej także domyślny słownik, ale nie mam takiej potrzeby :) Wszelkie uwagi i/lub ulepszenia powyższej funkcji będą bardzo mile widziane w komentarzach :) Nadal jednak jestem zainteresowany innymi metodami walidacji takiego słownika, tak więc nie zamykam jeszcze pytania. Zadałem to samo pytanie na innym serwisie. Dla potomnych, szukających rozwiązania tego problemu, biblioteka: http://pypi.python.org/pypi/voluptuous robi to doskonale. odpowiedziane Hmmm moim skromnym zdaniem plik konfiguracyjny najczęściej jest zbiorem statycznych wartości, które sterują pracą całej aplikacji. Dostęp do tego pliku powinna mieć jedynie osoba upoważniona, mająca wiedzę niezbędną do jego modyfikowania (a zatem odpowiedzialna za poprawne wprowadzenie wartości). Chętnie jednak wysłucham opinii innych gdyż zaintrygował mnie ten temat. Swoją drogą myślę również, że konfiguracja na bazie danych nie jest też złym pomysłem. Łatwa kontrola dostępu oraz częściowa kontrola typów przechowywanych danych. Pozdrawiam odpowiedziane IMHO walidacje powinieneś wykonać a to kiedy to zrobisz (czy podczas parsowania pliku czy później podczas używania pary klucz-wartość) nie ma znaczenia. Osobiście zrobiłbym to najpóźniej jak możliwe (tak jestem leniwy ;) czyli podczas bezpośredniego korzystania z par klucz-wartość bo jak sam mówisz: To podczas parsowania pliku może wystąpić sytuacja gdy klucz foo2 zostanie zdefiniowany po foo1. odpowiedziane zadane: 15 Maj '11, 06:48 wyświetlane: 856 razy ostatnia aktywność: 15 Maj '11, 23:53
#8526 Le 21/02/2013, à 22:33 The Uploader Re : Topic des Couche-Tard (cinquante-sept) Pas chez moi. Passer de Ubuntu 10.04 à Xubuntu 12.04 LTS Archlinux + KDE sur ASUS N56VV. ALSA, SysV, DBus, Xorg = Windows 98 ! systemd, kdbus, ALSA + PulseAudio, Wayland = modern OS (10 years after Windows, but still...) ! Deal with it ! Hors ligne #8527 Le 21/02/2013, à 22:34 Berserker Re : Topic des Couche-Tard (cinquante-sept) Chez moi non plus RAS. Grand maître de la confrérie des Trolleurs. Hors ligne #8528 Le 21/02/2013, à 22:34 pierrecastor Re : Topic des Couche-Tard (cinquante-sept) La, c'est revenue normalement. Les pages ne voulait pas se charger. C'est cool, les quotes dans la signature, j'en une super, n'est-ce pas ? :lol: S.O.D. Hors ligne #8529 Le 21/02/2013, à 23:12 na kraïou Re : Topic des Couche-Tard (cinquante-sept) Je n’ai même pas le droit à ça. Vodka ! Tupain, je pleure comme un con devant mon PC, Maître Mô, il raconte vraiment bien des histoires horribles. Putain de blog, hein ? Un jour, cherai riche, j’achèterai son bouquin, et j’l’offrirai partout. D'ailleurs, je pense que l'ensemble du blog que maître Mô vaut le détour si on s’intéresse au fonctionnement de la justice. Dans le même genre chez Eolas, une histoire écrite à 4 personnes Avec deux avocat (Eolas et Fantômette), une gendarme et une personne du parquet : Dans l’ensemble, Maître Eolas et Maître Mô, ça se complète bien, je trouve. Eolas, c’est le putain de raisonnement juridique carré, rigoureux, posé, que ça ferait presque mousser un historien, et en plus, ça réagit souvent à une actualité polémique, donc ça permet de se faire un avis hors sensationnalisme. Maître Mô, c’est les histoires qui te rappellent que la justice, il y a aussi des petits morceaux d’humains dedans, et que les choses ne sont pas toujours aussi simples que ça. Triste ! Intégriste ! Comploteur ! Connard ! Fourbe ! Linuxeux ! Machiavélique ! Moche ! Branleur ! Grognon ! Prétentieux ! Frimeur ! /b/tard ! Futile ! Étudiant ! Médiéviste ! Perfide ! Debianeux ! Futur maître du monde ! Petit (quasi nanos gigantium humeris insidentes) ! Égoïste ! Nawakiste ! Mauvaise langue ! 34709 ! На краю ! Arrogant ! Suffisant ! Ingrat ! Hors ligne #8530 Le 21/02/2013, à 23:21 na kraïou Re : Topic des Couche-Tard (cinquante-sept) Putain de scandale, ce matin, j’ai mis un noyau d’avocat dans un verre exactement comme me l’a expliqué ma môman, et je n’ai toujours pas d’avocatier ! Triste ! Intégriste ! Comploteur ! Connard ! Fourbe ! Linuxeux ! Machiavélique ! Moche ! Branleur ! Grognon ! Prétentieux ! Frimeur ! /b/tard ! Futile ! Étudiant ! Médiéviste ! Perfide ! Debianeux ! Futur maître du monde ! Petit (quasi nanos gigantium humeris insidentes) ! Égoïste ! Nawakiste ! Mauvaise langue ! 34709 ! На краю ! Arrogant ! Suffisant ! Ingrat ! Hors ligne #8531 Le 21/02/2013, à 23:28 pierrecastor Re : Topic des Couche-Tard (cinquante-sept) Putain de blog, hein ? Grave, d’ailleurs, j'ai passer plus de temps today sur ce blog qu'a bosser sur mes projets. Et j'suis encore dessus. Un jour, cherai riche, j’achèterai son bouquin, et j’l’offrirai partout. En voila une idée quel est bonne. Dans l’ensemble, Maître Eolas et Maître Mô, ça se complète bien, je trouve. Eolas, c’est le putain de raisonnement juridique carré, rigoureux, posé, que ça ferait presque mousser un historien, et en plus, ça réagit souvent à une actualité polémique, donc ça permet de se faire un avis hors sensationnalisme. Maître Mô, c’est les histoires qui te rappellent que la justice, il y a aussi des petits morceaux d’humains dedans, et que les choses ne sont pas toujours aussi simples que ça. Yep, je suis bien d'accord. Et qu'est ce qu'ils écrivent bien, ces cons (moi, jaloux ? Nooon) Y'en à pas mal des interessants dans le genre. Plus près de l'informatique, y'a aussi le blog de zythom, expert judiciaire en informatique que j'aime beaucoup : Deux billets que je trouve représentatif : http://zythom.blogspot.fr/2011/07/une-h … anale.html (enfin, ça, c'est une longue histoire sur 8 billets) Putain de scandale, ce matin, j’ai mis un noyau d’avocat dans un verre exactement comme me l’a expliqué ma môman, et je n’ai toujours pas d’avocatier ! http://pix.tdct.org/upload/original/1319145345.gif Avec des allumettes dans le noyau ? Dernière modification par pierrecastor (Le 21/02/2013, à 23:29) C'est cool, les quotes dans la signature, j'en une super, n'est-ce pas ? :lol: S.O.D. Hors ligne #8532 Le 21/02/2013, à 23:31 Le grand Rorh Sha Re : Topic des Couche-Tard (cinquante-sept) D'ailleurs, je pense que l'ensemble du blog que maître Mô vaut le détour si on s’intéresse au fonctionnement de la justice. Dans le même genre chez Eolas, une histoire écrite à 4 personnes Avec deux avocat (Eolas et Fantômette), une gendarme et une personne du parquet : Y a du spam dans les commentaires. ▛▜ ▛▜ ▛▜ ▙▟ ▐▀ ▙▟ ▛▜ ▌▚ ▙▟ ▌▚ ▛▜ ▄▌ ▛▜ ▛▜ Hors ligne #8533 Le 21/02/2013, à 23:34 Sopo les Râ Re : Topic des Couche-Tard (cinquante-sept) Comment pouvez-vous envisager des alliances avec ces gens-là ? Un socialiste qui s'adresse à un UMP à propos du FN ? Non : un UMP qui s'adresse à Marine Le Pen à propos de la gauche. Terrifiant. La sieste, c'est maintenant. * * * « J'ai l'intention de vivre éternellement. Pour l'instant, tout se passe comme prévu. » Hors ligne #8534 Le 21/02/2013, à 23:41 na kraïou Re : Topic des Couche-Tard (cinquante-sept) Grave, d’ailleurs, j'ai passer plus de temps today sur ce blog qu'a bosser sur mes projets. Et j'suis encore dessus. Et une fois que tu as terminé, tu attendras avec impatience le prochain billet, et comme en ce moment, il poste peu, ce sera TERRIBLE. Et le nouveau billet sera évidemment long et posté quand tu auras dix milliards de choses urgentes à faire. Plus près de l'informatique, y'a aussi le blog de zythom, expert judiciaire en informatique que j'aime beaucoup : Ha oui, j’avais lu pas mal son blog, mais j’oublie toujours d’aller voir. na kraïou a écrit : Putain de scandale, ce matin, j’ai mis un noyau d’avocat dans un verre exactement comme me l’a expliqué ma môman, et je n’ai toujours pas d’avocatier ! Avec des allumettes dans le noyau ? Non, on a décidé que ce serait aussi bien de ne pas gaspiller d’allumettes, et de faire un entonnoir avec un goulot de bouteille (sans le bouchon) de Coca (du Breizh Cola ! ) et de mettre le noyau dedans avec de l’eau juste pour que le fond trempe. Dernière modification par na kraïou (Le 21/02/2013, à 23:47) Triste ! Intégriste ! Comploteur ! Connard ! Fourbe ! Linuxeux ! Machiavélique ! Moche ! Branleur ! Grognon ! Prétentieux ! Frimeur ! /b/tard ! Futile ! Étudiant ! Médiéviste ! Perfide ! Debianeux ! Futur maître du monde ! Petit (quasi nanos gigantium humeris insidentes) ! Égoïste ! Nawakiste ! Mauvaise langue ! 34709 ! На краю ! Arrogant ! Suffisant ! Ingrat ! Hors ligne #8535 Le 21/02/2013, à 23:47 Elzen Re : Topic des Couche-Tard (cinquante-sept) na kraïou a écrit : Dans l’ensemble, Maître Eolas et Maître Mô, ça se complète bien, je trouve. Eolas, c’est le putain de raisonnement juridique carré, rigoureux, posé, que ça ferait presque mousser un historien, et en plus, ça réagit souvent à une actualité polémique, donc ça permet de se faire un avis hors sensationnalisme. Maître Mô, c’est les histoires qui te rappellent que la justice, il y a aussi des petits morceaux d’humains dedans, et que les choses ne sont pas toujours aussi simples que ça. Yep, je suis bien d'accord. Et qu'est ce qu'ils écrivent bien, ces cons (moi, jaloux ? Nooon) Moi, quand je serai grand, je serai Maître Eolas ! Elzen : polisson, polémiste, polymathe ! (ex-ArkSeth) Un script pour améliorer quelques trucs du forum. La joie de t'avoir connu surpasse la peine de t'avoir perdu… J'ai pour qualité de ne jamais attaquer les gens. J'ai pour défaut de souvent avoir l'air de le faire. Hors ligne #8536 Le 21/02/2013, à 23:48 Shanx Re : Topic des Couche-Tard (cinquante-sept) Je me doute que certaines personnes ne seront pas d’accord, mais Kill BIll est quand même un super film. Certes, le scénario n’est pas tellement poussé, mais quel maîtrise et quelle réalisation… Et comme toujours avec Tarantino, quelle bande son… « En vérité, je ne voyage pas, moi, pour atteindre un endroit précis, mais pour marcher : simple plaisir de voyager. » R. L. Stevenson -- Blog et randos Hors ligne #8537 Le 21/02/2013, à 23:49 The Uploader Re : Topic des Couche-Tard (cinquante-sept) J'suis 100% d'accord. (in before the haters) Passer de Ubuntu 10.04 à Xubuntu 12.04 LTS Archlinux + KDE sur ASUS N56VV. ALSA, SysV, DBus, Xorg = Windows 98 ! systemd, kdbus, ALSA + PulseAudio, Wayland = modern OS (10 years after Windows, but still...) ! Deal with it ! Hors ligne #8538 Le 21/02/2013, à 23:59 Grünt Re : Topic des Couche-Tard (cinquante-sept) Grünt a écrit : Le code (qui ne fonctionne pas ) est là : http://grunt.fdn.fr/bot2.py J'essaie de faire passer la variable "tg_start" qui stocke le timestamp du premier "ta gueule" en dehors de la fonction en le faisant sortir avec un return, mais ça ne marche pas du tout. C'est quoi que t'appelles "un objet approprié" ? Un truc au format date ? Ok, tu t’y prends juste avec les pieds, donc. L’objet approprié, c’est ton bot, par exemple. class Bot(ircbot.SingleServerIRCBot): def __init__(self): […] self.tg_start = 0 def on_pubmsg(self, serv, ev): self.tg_start = self.message_or_action(serv, ev, self.tg_start) def on_action(self, serv, ev): self.tg_start = self.message_or_action(serv, ev, self.tg_start) Ok, merci, ça marche comme ça (fallait aussi mettre return self.tg_start à la fin de message_or_action, forcément). Je commence à piger comment ça marche tout ça Red flashing lights. I bet they mean something. Hors ligne #8539 Le 22/02/2013, à 00:08 Shanx Re : Topic des Couche-Tard (cinquante-sept) Pour fêter ça, j’écoute la BO du Seigneurs des Anneaux. « En vérité, je ne voyage pas, moi, pour atteindre un endroit précis, mais pour marcher : simple plaisir de voyager. » R. L. Stevenson -- Blog et randos Hors ligne #8540 Le 22/02/2013, à 00:10 The Uploader Re : Topic des Couche-Tard (cinquante-sept) Perso je regarde ça. (super mod ! ) Passer de Ubuntu 10.04 à Xubuntu 12.04 LTS Archlinux + KDE sur ASUS N56VV. ALSA, SysV, DBus, Xorg = Windows 98 ! systemd, kdbus, ALSA + PulseAudio, Wayland = modern OS (10 years after Windows, but still...) ! Deal with it ! Hors ligne #8541 Le 22/02/2013, à 00:23 naingenieu Re : Topic des Couche-Tard (cinquante-sept) Bonsoir tout le monde Hors ligne #8542 Le 22/02/2013, à 01:10 na kraïou Re : Topic des Couche-Tard (cinquante-sept) Flemme de laver un couteau et une fourchette, flemme de cuire la viande. Yep, carrément. Triste ! Intégriste ! Comploteur ! Connard ! Fourbe ! Linuxeux ! Machiavélique ! Moche ! Branleur ! Grognon ! Prétentieux ! Frimeur ! /b/tard ! Futile ! Étudiant ! Médiéviste ! Perfide ! Debianeux ! Futur maître du monde ! Petit (quasi nanos gigantium humeris insidentes) ! Égoïste ! Nawakiste ! Mauvaise langue ! 34709 ! На краю ! Arrogant ! Suffisant ! Ingrat ! Hors ligne #8543 Le 22/02/2013, à 01:22 :!pakman Re : Topic des Couche-Tard (cinquante-sept) Dernière modification par :!pakman (Le 22/02/2013, à 01:23) ... Hors ligne #8544 Le 22/02/2013, à 01:23 Elzen Re : Topic des Couche-Tard (cinquante-sept) ♪♫ Heureux qui, comme Ulysse, a fait un beau voyage,Ou comme c'estuy-là qui conquit la Toison,Puis s'en est retourné, plein d'usage et raison,Vivre entre ses parents le reste de son âge ! ♪♫ Quand reverrais-je, hélas, de mon petit village,Fumer la cheminée, et en quelle saison,Reverrais-je le clos de ma pauvre maison,Qui m'est une province, et beaucoup davantage ? ♪♫ Plus me plaît le séjour qu'ont bâti mes ailleux,Que des palais romains le front audacieux !Plus que le marbre dur me plaît l'ardoise fine,Plus mon Loir gaulois que le Tibre latin,Plus mon petit Liré que le mont Palatin,Et plus que l'air marin, la douceur angevine… ♪♫ J'ai traversé les mers à la force de mes bras,Seuls contre les dieux, perdu dans les marées,Retranché dans la cale, mes vieux tympans percés,Pour ne plus jamais entendre les sirènes et leurs voix, ♪♫ Nos vies sont une guerre où il ne tient qu'à nousDe nous soucier de nos sorts, de trouver le bon choix,De nous méfier de nos pas, et de toute cette eau qui dort,Qui pollue nos chemins soit disant pavés d'or… Mais quand reverrais-je, de mon petit village,Fumer la cheminée, et en quelle saison… ? ♪♫ Elzen : polisson, polémiste, polymathe ! (ex-ArkSeth) Un script pour améliorer quelques trucs du forum. La joie de t'avoir connu surpasse la peine de t'avoir perdu… J'ai pour qualité de ne jamais attaquer les gens. J'ai pour défaut de souvent avoir l'air de le faire. Hors ligne #8545 Le 22/02/2013, à 01:25 Вiɑise Re : Topic des Couche-Tard (cinquante-sept) Haaa je me souviens plus de l'air ! Hors ligne #8546 Le 22/02/2013, à 01:27 Pylades Re : Topic des Couche-Tard (cinquante-sept) Ok, merci, ça marche comme ça (fallait aussi mettre return self.tg_start à la fin de message_or_action, forcément). Je commence à piger comment ça marche tout ça Euh, non, fallait pas. Ce que j’ai proposé, c’était les modifs minimales pour que cela fonctionne ; mais si l’on voulait proprer le code cela serait plus long. Mais ton initiative est harmful, désolé. “Any if-statement is a goto. As are all structured loops. “And sometimes structure is good. When it’s good, you should use it. “And sometimes structure is _bad_, and gets into the way, and using a goto is just much clearer.” Linus Torvalds – 12 janvier 2003 Hors ligne #8547 Le 22/02/2013, à 01:29 Hors ligne #8548 Le 22/02/2013, à 01:30 Вiɑise Re : Topic des Couche-Tard (cinquante-sept) rzzzz Hors ligne #8549 Le 22/02/2013, à 01:30 Pylades Re : Topic des Couche-Tard (cinquante-sept) @ Elzen : je suis űber-fan du sonnet du début, mais la suite, elle vient d’où ? “Any if-statement is a goto. As are all structured loops. “And sometimes structure is good. When it’s good, you should use it. “And sometimes structure is _bad_, and gets into the way, and using a goto is just much clearer.” Linus Torvalds – 12 janvier 2003 Hors ligne #8550 Le 22/02/2013, à 01:32 Elzen Re : Topic des Couche-Tard (cinquante-sept) J'viens d'en causer sur IRC, donc je copie-colle : <Elzen> Le poème est de Du Bellay ; ça c'est la version chantée par Ridan. À ma connaissance, c'est la seule version chantée du poème. <Elzen> (Brassens en a fait une qui porte ce nom-là, mais qui n'a que la première strophe en commun avec le poème). Effectivement, le poème d'origine s'arrête à « la douceur angevine », je ne sais pas si la suite est de Ridan ou pas. Elzen : polisson, polémiste, polymathe ! (ex-ArkSeth) Un script pour améliorer quelques trucs du forum. La joie de t'avoir connu surpasse la peine de t'avoir perdu… J'ai pour qualité de ne jamais attaquer les gens. J'ai pour défaut de souvent avoir l'air de le faire. Hors ligne
JavaScript reedbird8 — 2012-10-02T13:44:41-04:00 — #1 Not sure if this is the best place to ask this, or if it can be done, but here it goes. Is there a way to generate a tooltip anytime a certain word or phrase appears within my site? Ideally, I'd like to create a series of tooltips for some terminology. Essentially, anytime the abbreviation "TSD-1a" appears a tooltip would be generated for this abbreviation. (I'm looking for something other than me having to type the html markup everytime I want the tooltip to appear.) pullo — 2012-10-02T17:09:17-04:00 — #2 Hi, Presuming that your content is in a <div> called "container" you can wrap all occurences of the abbreviation "TSD-1a" in <a> tags and apply a class of "normalTip" to these with the following jQuery: $('#container').html($('#container').html().replace(/(TSD-1a)/g,'<a href="#" class="normalTip" title="Some Text">$1</a>')); After that you can get all elements with the class "normalTip" and hook them up to tooltips. The title attribute serves as the text for the tip. Here's an example: <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Tooltip demo</title> <script src="http://code.jquery.com/jquery-latest.js"></script> <script type="text/javascript" src="http://ara-abcarians.com/jquery/atooltip/js/jquery.atooltip.js"></script> <link type="text/css" href="http://ara-abcarians.com/jquery/atooltip/css/atooltip.css" rel="stylesheet" media="screen" /> <script>$(function(){$('a.normalTip').aToolTip(); });</script> </head> <body> <div id="container" style="width:600px; margin: 0 auto; padding-top:50px;"> <p> Lorem ipsum dolor sit amet, consectetur adipisicing elit, TSD-1a sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut TSD-1a enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis TSD-1a aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur TSD-1a sint occaecat cupidatat non proident, TSD-1a sunt in culpa qui officia deserunt mollit anim id est TSD-1a laborum. </p> </div> <script>$('#container').html($('#container').html().replace(/(TSD-1a)/g,'<a href="#" class="normalTip" title="Some Text">$1</a>'));</script> </body> </html> Here, I've used a simple jQuery tooltip plugin to achieve the tooltips. If you opt for this, please download the files you need from here: http://ara-abcarians.com/jquery/atooltip/ and don't use the author's site as a CDN. reedbird8 — 2012-10-02T17:33:42-04:00 — #3 Thanks! Question - will it only grab those directly appearing in the div with id "container"? Or if there is a div inside the "container" div, will it grab those, as well? i.e.: <body> <div id="container" style="width:600px; margin: 0 auto; padding-top:50px;"> <div id="left column"> <p> Lorem ipsum dolor sit amet, consectetur adipisicing elit, TSD-1a sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut TSD-1a enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis TSD-1a aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur TSD-1a sint occaecat cupidatat non proident, TSD-1a sunt in culpa qui officia deserunt mollit anim id est TSD-1a laborum. </p> </div> </div> <script>$('#container').html($('#container').html().replace(/(TSD-1a)/g,'<a href="#" class="normalTip" title="Some Text">$1</a>'));</script> </body> pullo — 2012-10-03T01:34:46-04:00 — #4 It'll grab everything within the "container" div, irrespective of whether it's nested in a further div/divs. Is this the desired behaviour? reedbird8 — 2012-10-03T05:10:06-04:00 — #5 yes, that's what I would want it to do. pullo — 2012-10-03T05:12:03-04:00 — #6 Cool If this changes at any point you can always refine the '#container' selector. reedbird8 — 2012-10-03T05:48:10-04:00 — #7 Well, it works, but it clashes with other script on my site. A bunch of my other jquery features stop working. (tabs, certain menus, etc) pullo — 2012-10-03T07:02:07-04:00 — #8 That's not good. Are you using any other libraries? If you can post a url, I don't mind taking a look. reedbird8 — 2012-10-03T07:12:42-04:00 — #9 Thanks, url is http://theturtleroom.com pullo — 2012-10-03T07:27:27-04:00 — #10 Nice site, but everything seems to be working for me. Plus I could find no mention of the abbreviation "TSD-1a" reedbird8 — 2012-10-03T07:42:34-04:00 — #11 Thanks! Oh, I had pulled it down, since things weren't working....I'll put it back up. Instead of looking for TSD-1a (which is for a future reference), look for SCL on this page: http://theturtleroom.com/projects/steve-projects/graptemys/graptemys-barbouri/ You can see that the tabs below the text and the main navigational menus (can't get to the submenu level) have stopped working. In addition, my sidebar menu is a jquery menu and no longer automatically collapses (and expands/collapses on hover) pullo — 2012-10-03T08:03:26-04:00 — #12 Your problem is twofold. You have included jQuery twice. jQuery seems to be operating in noConflict mode on your page. Just for your info: all noConflict mode does is have jQuery relinquish control of the $ variable. You can read about it here: http://api.jquery.com/jQuery.noConflict/ reedbird8 — 2012-10-03T08:08:19-04:00 — #13 Thanks again! It's all working well now! There is still one place where the tooltip is not visually appearing properly. Its not live to the public yet, but if you wouldn't mind looking at it, I'll send some login information and the page link so you could look at that page, too. pullo — 2012-10-03T08:09:33-04:00 — #14 Go on then, but if I ever want to buy a turtle, I expect a good price reedbird8 — 2012-10-03T08:17:18-04:00 — #15 No problem!....message sent reedbird8 — 2012-10-03T10:23:07-04:00 — #16 So, one last tooltip question: I'd like to be able to tip all of TSD, TSD-1a, TSD-1b, and TSD-2. However, a tip for TSD seems to interfere with the other 3 tips. Any idea? pullo — 2012-10-03T14:17:59-04:00 — #17 Hi There, do it like this, with the use of a regular expression: <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Tooltip demo</title> <script src="http://code.jquery.com/jquery-latest.js"></script> <script type="text/javascript" src="http://ara-abcarians.com/jquery/atooltip/js/jquery.atooltip.js"></script> <link type="text/css" href="http://ara-abcarians.com/jquery/atooltip/css/atooltip.css" rel="stylesheet" media="screen" /> <script>$(function(){$('a.normalTip').aToolTip(); });</script> </head> <body> <div id="container" style="width:600px; margin: 0 auto; padding-top:50px;"> <div id="left column"> <p> TSD Lorem ipsum TSD dolor sit amet, consectetur adipisicing elit, TSD-1a sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut TSD-1a enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis TSD-1a aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur TSD-1a sint occaecat cupidatat non TSD, proident, TSD-1a sunt in culpa qui officia deserunt mollit anim id est TSD TSD-1a laborum TSD. </p> </div> </div> <script> //Matches (start of line or whitespace)(TSD)(whitespace, or a fullstop, or a comma, or the end of the line) var tsd = /(^|\\s)(TSD)(\\s|\\.|,|$)/g; var tsdTip = '$1<a href="#" class="normalTip" title="TSD">$2</a>$3' //Matches (start of line or whitespace)(TSD-1a)(whitespace, or a fullstop, or a comma, or the end of the line) var tsd1a = /(^|\\s)(TSD-1a)(\\s|\\.|,|$)/g; var tsd1aTip = '$1<a href="#" class="normalTip" title="TSD-1a">$2</a>$3' $('#container').html( $('#container').html().replace(tsd, tsdTip).replace(tsd1a, tsd1aTip) ); </script> </body> </html> reedbird8 — 2012-10-03T14:39:15-04:00 — #18 Thanks again! These forums are a great place to get help.
it's the first time i am using this environment. The part of SQLAlchemy i am willing to use is just the one that allows me to query the database using Table objects with autoload = True. I am doing this as my tables already exist in the DB (mysql server) and were not created by defining flask models. I have gone through all the documentation and i don't seem to find an answer. Here is some code: app = Flask(__name__) app.config.from_object(__name__) metadata = None def connect_db(): engine = create_engine(app.config['DATABASE_URI']) global metadata metadata = MetaData(bind=engine) return engine.connect() @app.before_request def before_request(): g.db = connect_db() @app.teardown_request def teardown_request(exception): g.db.close() Now you could be wondering why i use that global var named metadata. Ok some more code: @app.route('/test/<int:id>') def test(test_result_id): testTable = Table('test_table', metadata , autoload=True) As you can see i need that object to be global in order to access it from within a function. Also I am declaring the same var testTable in each function that needs it. I have the feeling this is not the right approach. I coudn't find any best practice advice for a case like mine. Thanks all!
Topic: Error 500, Upgrading from OpenSource to Pro Hi Guys ! Finaly i buyed this Masterpiece of Mailserver and now i upgraded to iRedAdmin Pro. Used the guide from Zhang. in my apache log i get this error: [Thu Oct 07 13:31:22 2010] [error] [client 80.123.169.178] mod_wsgi (pid=3289): Target WSGI script '/usr/share/apache2/iredadmin/iredadmin.py' cannot be loaded as Python module. [Thu Oct 07 13:31:22 2010] [error] [client 80.123.169.178] mod_wsgi (pid=3289): Exception occurred processing WSGI script '/usr/share/apache2/iredadmin/iredadmin.py'. [Thu Oct 07 13:31:22 2010] [error] [client 80.123.169.178] Traceback (most recent call last): [Thu Oct 07 13:31:22 2010] [error] [client 80.123.169.178] File "/usr/share/apache2/iredadmin/iredadmin.py", line 28, in <module> [Thu Oct 07 13:31:22 2010] [error] [client 80.123.169.178] from libs import iredbase [Thu Oct 07 13:31:22 2010] [error] [client 80.123.169.178] File "/usr/share/apache2/iredadmin/libs/iredbase.py", line 53, in <module> [Thu Oct 07 13:31:22 2010] [error] [client 80.123.169.178] webmaster = cfg.general.get('webmaster', 'root') [Thu Oct 07 13:31:22 2010] [error] [client 80.123.169.178] File "/var/lib/python-support/python2.5/web/utils.py", line 68, in __getattr__ [Thu Oct 07 13:31:22 2010] [error] [client 80.123.169.178] raise AttributeError, k [Thu Oct 07 13:31:22 2010] [error] [client 80.123.169.178] AttributeError: 'general' Sry for my english, but i think everyone understand my problem kr Franky ---- Urgent issue? Pay iRedMail developer to solve it remotely at $39.
Components and plugins Components and plugins are relatively new features of web2py, and there is some disagreement between developers about what they are and what they should be. Most of the confusion stems from the different uses of these terms in other software projects and from the fact that developers are still working to finalize the specifications. However, plugin support is an important feature and we need to provide some definitions. These definitions are not meant to be final, just consistent with the programming patterns we want to discuss in this chapter. We will try to address two issues here: How can we build modular applications that minimize server load and maximize code reuse? How can we distribute pieces of code in a more or less plugin-and-play fashion? Components address the first issue; plugins address the second. Components Acomponentis a functionally autonomous part of a web page. A component may be composed of modules, controllers and views, but there is no strict requirement other than, when embedded in a web page, it must be localized within an html tag (for example a DIV, a SPAN, or an IFRAME) and it must perform its task independently of the rest of the page. We are specifically interested in components that are loaded in the page and communicate with the component controller function via Ajax. An example of a component is a "comments component" that is contained into a DIV and shows users' comments and a post-new-comment form. When the form is submitted, it is sent to the server via Ajax, the list is updated, and the comment is stored server-side in the database. The DIV content is refreshed without reloading the rest of the page. The web2py LOAD function makes this easy to do without explicit JavaScript/Ajax knowledge or programming. Our goal is to be able to develop web applications by assembling components into page layouts. Consider a simple web2py app "test" that extends the default scaffolding app with a custom model in file "models/db_comments.py": db.define_table('comment', Field('body','text',label='Your comment'), Field('posted_on','datetime',default=request.now), Field('posted_by',db.auth_user,default=auth.user_id)) db.comment.posted_on.writable=db.comment.posted_on.readable=False db.comment.posted_by.writable=db.comment.posted_by.readable=False one action in "controllers/comments.py" @auth.requires_login() def post(): return dict(form=crud.create(db.comment), comments=db(db.comment).select()) and the corresponding "views/comments/post.html" {{extend 'layout.html'}} {{for comment in comments:}} <div class="comment"> on {{=comment.posted_on}} {{=comment.posted_by.first_name}} says <span class="comment_body">{{=comment.body}}</span> </div> {{pass}} {{=form}} You can access it as usual at: http://127.0.0.1:8000/test/comments/post So far there is nothing special in this action, but we can turn it into a component by defining a new view with extension ".load" that does not extend the layout. Hence we create a "views/comments/post.load": {{#extend 'layout.html' <- notice this is commented out!}} {{for comment in comments:}} <div class="comment"> on {{=comment.posted_on}} {{=comment.posted_by.first_name}} says <span class="comment_body">{{=comment.body}}</span> </div> {{pass}} {{=form}} We can access it at the URL http://127.0.0.1:8000/test/comments/post.load and it will look like this: This is a component that we can embed into any other page by simply doing {{=LOAD('comments','post.load',ajax=True)}} For example in "controllers/default.py" we can edit def index(): return dict() and in the corresponding view add the component: {{extend 'layout.html'}} <p>{{='bla '*100}}</p> {{=LOAD('comments','post.load',ajax=True)}} Visiting the page http://127.0.0.1:8000/test/default/index will show the normal content and the comments component: The {{=LOAD(...)}} component is rendered as follows: <script type="text/javascript"><!-- web2py_component("/test/comment/post.load","c282718984176") //--></script><div id="c282718984176">loading...</div> (the actual generated code depends on the options passed to the LOAD function). The web2py_component(url,id) function is defined in "web2py_ajax.html" and it performs all the magic: it calls the url via Ajax and embeds the response into the DIV with corresponding id; it traps every form submission into the DIV and submits those forms via Ajax. The Ajax target is always the DIV itself. The full signature of the LOAD helper is the following: LOAD(c=None, f='index', args=[], vars={}, extension=None, target=None, ajax=False, ajax_trap=False, url=None,user_signature=False, content='loading...',**attr): Here: the first two arguments candfare the controller and the function that we want to call respectively. argsandvarsare the arguments and variables that we want to pass to the function. The former is a list, the latter is a dictionary. extensionis an optional extension. Notice that the extension can also be passed as part of the function as inf='index.load'. targetis theidof the target DIV. If it is not specified a random targetidis generated. ajaxshould be set toTrueif the DIV has to be filled via Ajax and toFalseif the DIV has to be filled before the current page is returned (thus avoiding the Ajax call). ajax_trap=Truemeans that any form submission in the DIV must be captured and submitted via Ajax, and the response must be rendered inside the DIV.ajax_trap=Falseindicates that forms must be submitted normally, thus reloading the entire page.ajax_trapis ignored and assumed to beTrueifajax=True. url, if specified, overrides the values ofc,f,args,vars, andextensionand loads the component at theurl. It is used to load as components pages served by other applications (which my or may not be created with web2py). user_signaturedefaults to False but, if you are logged in, it should be set to True. This will make sure the ajax callback is digitally signed. This is documented in chapter 4. contentis the content to be displayed while performing the ajax call. It can be a helper as incontent=IMG(..). optional **attr(attributes) can be passed to the containedDIV. If no .load view is specified, there is a generic.load that renders the dictionary returned by the action without layout. It works best if the dictionary contains a single item. If you LOAD a component having the .load extension and the corresponding controller function redirects to another action (for example a login form), the .load extension propagates and the new url (the one to redirect too) is also loaded with a .load extension. *Please note:* Because Ajax post does not support multipart forms, i.e. file uploads, upload fields will not work with the LOAD component. You could be fooled into thinking it would work because upload fields will function normally if POST is done from the individual component's .load view. Instead, uploads are done with ajax-compatible 3rd-party widgets and web2py manual upload store commands. Client-Server component communications When the action of a component is called via Ajax, web2py passes two HTTP headers with the request: web2py-component-location web2py-component-element which can be accessed by the action via the variables: request.env.http_web2py_component_locationrequest.env.http_web2py_component_element The latter is also accessible via: request.cid The former contains the URL of the page that called the component action. The latter contains the id of the DIV that will contain the response. The component action can also store data in two special HTTP response headers that will be interpreted by the full page upon response. They are: web2py-component-flash web2py-component-command and they can be set via: response.headers['web2py-component-flash']='....' response.headers['web2py-component-command']='...' or (if the action is called by a component) automatically via: response.flash='...'response.js='...' The former contains text that you want to be flashed upon response. The latter contains JavaScript code that you want to be executed upon response. It cannot contain newlines. As an example, let's define a contact form component in "controllers/contact/ask.py" that allows the user to ask a question. The component will email the question to the system administrator, flash a "thank you" message, and remove the component from the page: def ask(): form=SQLFORM.factory( Field('your_email',requires=IS_EMAIL()), Field('question',requires=IS_NOT_EMPTY())) if form.process().accepted: if mail.send(to='admin@example.com', subject='from %s' % form.vars.your_email, message = form.vars.question): response.flash = 'Thank you' response.js = "jQuery('#%s').hide()" % request.cid else: form.errors.your_email = "Unable to send the email" return dict(form=form) The first four lines define the form and accept it. The mail object used for sending is defined in the default scaffolding application. The last four lines implement all the component-specific logic by getting data from the HTTP request headers and setting the HTTP response headers. Now you can embed this contact form in any page via {{=LOAD('contact','ask.load',ajax=True)}} Notice that we did not define a .load view for our ask component. We do not have to because it returns a single object (form) and therefore the "generic.load" will do just fine. Remember that generic views are a development tool. In production you should copy "views/generic.load" into "views/contact/ask.load". user_signature argument: {{=LOAD('contact','ask.load',ajax=True,user_signature=True)}} which add a digital signature to the URL. The digital signature must then be validated using a decorator in the callback function: @auth.requires_signature() def ask(): ... Trapped Ajax links Normally a link is not trapped, and by clicking in a link inside a component, the entire linked page is loaded. Sometimes you want the linked page to be loaded inside the component. This can be achieved using the A helper: {{=A('linked page',_href='http://example.com',cid=request.cid)}} If cid is specified, the linked page is loaded via Ajax. The cid is the id of the html element where to place the loaded page content. In this case we set it to request.cid, i.e. the id of the component that generates the link. The linked page can be and usually is an internal URL generated using the URL command. Plugins Apluginis any subset of the files of an application. and we really mean any: A plugin is not a module, is not a model, it is not a controller, is not a view, yet it may contain modules, models, controllers and/or views. A plugin does not need to be functionally autonomous and it may depend on other plugins or specific user code. A pluginis not aplugins systemand therefore has no concept of registration nor isolation, although we will give rules to try to achieve some isolation. We are talking about a plugin for your app, not a plugin for web2py. So why is it called a plugin? Because it provides a mechanism for packing a subset of an app and unpacking it over another app (i.e. plug-in). Under this definition, any file in your app can be treated as a plugin. When the app is distributed, its plugins are packed and distributed with it. In practice, the admin provides an interface for packing and unpacking plugins separately from your app. Files and folder of your application that have names with the prefix plugin_name can be packed together into a file called: web2py.plugin.name.w2p and distributed together. The files that compose a plugin are not treated by web2py any differently than other files except that admin understands from their names that they are meant to be distributed together, and it displays them in a separate page: Yet as a matter of fact, by the definition above, these plugins are more general than those recognized as such by admin. In practice we will only be concerned with two types of plugins: Component Plugins. These are plugins that contain components as defined in the previous section. A component plugin can contain one or more components. We can think for example of aplugin_commentsthat contains thecommentscomponent proposed above. Another example could beplugin_taggingthat contains ataggingcomponent and atag-cloudcomponent that share some database tables also defined by the plugin. Layout Plugins. These are plugins that contain a layout view and the static files required by such layout. When the plugin is applied it gives the app a new look and feel. By the above definitions, the components created in the previous section, for example "controllers/contact.py", are already plugins. We can move them from one app to another and use the components they define. Yet they are not recognized as such by admin because there is nothing that labels them as plugins. So there are two problems we need to solve: Name the plugin files using a convention, so that admincan recognize them as belonging to the same plugin If the plugin has model files, establish a convention so that the objects it defines do not pollute the namespace and do not conflict with each other. Let's assume a plugin is called name. Here are the rules that should be followed: Rule 1: Plugin models and controllers should be called, respectively models/plugin_name.py controllers/plugin_name.py and plugin views, modules, static, and private files should be in folders called, respectively: views/plugin_name/ modules/plugin_name/ static/plugin_name/ private/plugin_name/ Rule 2: Plugin models can only define objects with names that start with plugin_name PluginName _ Rule 3: Plugin models can only define session variables with names that start with session.plugin_name session.PluginName Rule 4: Plugins should include license and documentation. These should be placed in: static/plugin_name/license.html static/plugin_name/about.html Rule 5: The plugin can only rely on the existence of the global objects defined in scaffolding "db.py", i.e. a database connection called db an Authinstance calledauth a Crudinstance calledcrud a Serviceinstance calledservice Some plugins may be more sophisticated and have a configuration parameter in case more than one db instance exists. Rule 6: If a plugin needs configuration parameters, these should be set via a PluginManager as described below. By following the above rules we can make sure that: adminrecognizes all theplugin_namefiles and folder as part of a single entity. plugins do not interfere with each other. The rules above do not solve the problem of plugin versions and dependencies. That is beyond our scope. Component plugins Component plugins are plugins that define components. Components usually access the database and define with their own models. Here we turn the previous comments component into a comments_plugin by using the same code we wrote before, but following all of the previous rules. First, we create a model called "models/plugin_comments.py": db.define_table('plugin_comments_comment', Field('body','text', label='Your comment'), Field('posted_on', 'datetime', default=request.now), Field('posted_by', db.auth_user, default=auth.user_id)) db.plugin_comments_comment.posted_on.writable=False db.plugin_comments_comment.posted_on.readable=False db.plugin_comments_comment.posted_by.writable=False db.plugin_comments_comment.posted_by.readable=False def plugin_comments(): return LOAD('plugin_comments','post',ajax=True) (notice the last two lines define a function that will simplify the embedding of the plugin) Second, we define a "controllers/plugin_comments.py" @auth.requires_login() def post(): comment = db.plugin_comments_comment return dict(form=crud.create(comment), comments=db(comment).select()) Third, we create a view called "views/plugin_comments/post.load": {{for comment in comments:}} <div class="comment"> on {{=comment.posted_on}} {{=comment.posted_by.first_name}} says <span class="comment_body">{{=comment.body}}</span> </div> {{pass}} {{=form}} Now we can use admin to pack the plugin for distribution. Admin will save this plugin as: web2py.plugin.comments.w2p We can use the plugin in any view by simply installing the plugin via the edit page in admin and adding this to our own views {{=plugin_comments()}} Of course we can make the plugin more sophisticated by having components that take parameters and configuration options. The more complex the components, the more difficult it becomes to avoid name collisions. The Plugin Manager described below is designed to avoid this problem. Plugin manager The PluginManager is a class defined in gluon.tools. Before we explain how it works inside, we will explain how to use it. Here we consider the previous comments_plugin and we make it better. We want to be able to customize: db.plugin_comments_comment.body.label without having to edit the plugin code itself. Here is how we can do it: First, rewrite the plugin "models/plugin_comments.py" in this way: db.define_table('plugin_comments_comment', Field('body','text',label=plugin_comments.comments.body_label), Field('posted_on', 'datetime', default=request.now), Field('posted_by', db.auth_user, default=auth.user_id)) def plugin_comments() from gluon.tools import PluginManager plugins = PluginManager('comments', body_label='Your comment') comment = db.plugin_comments_comment comment.label=plugins.comments.body_label comment.posted_on.writable=False comment.posted_on.readable=False comment.posted_by.writable=False comment.posted_by.readable=False return LOAD('plugin_comments','post.load',ajax=True) Notice how all the code except the table definition is encapsulated in a single function. Also notice how the function creates an instance of a PluginManager. Now in any other model in your app, for example in "models/db.py", you can configure this plugin as follows: from gluon.tools import PluginManager plugins = PluginManager() plugins.comments.body_label = T('Post a comment') The plugins object is already instantiated in the default scaffolding app in "models/db.py" The PluginManager object is a thread-level singleton Storage object of Storage objects. That means you can instantiate as many as you like within the same application but (whether they have the same name or not) they act as if there were a single PluginManager instance. In particular each plugin file can make its own PluginManager object and register itself and its default parameters with it: plugins = PluginManager('name', param1='value', param2='value') You can override these parameters elsewhere (for example in "models/db.py") with the code: plugins = PluginManager() plugins.name.param1 = 'other value' You can configure multiple plugins in one place. plugins = PluginManager() plugins.name.param1 = '...' plugins.name.param2 = '...' plugins.name1.param3 = '...' plugins.name2.param4 = '...' plugins.name3.param5 = '...' When the plugin is defined, the PluginManager must take arguments: the plugin name and optional named arguments which are default parameters. However, when the plugins are configured, the PluginManager constructor must take no arguments. The configuration must precede the definition of the plugin (i.e. it must be in a model file that comes first alphabetically). Layout plugins Layout plugins are simpler than component plugins because usually they do not contain code, but only views and static files. Yet you should still follow good practice: First, create a folder called "static/plugin_layout_name/" (where name is the name of your layout) and place all your static files there. Second, create a layout file called "views/plugin_layout_name/layout.html" that contains your layout and links the images, CSS and JavaScript files in "static/plugin_layout_name/" Third, modify the "views/layout.html" so that it simply reads: {{extend 'plugin_layout_name/layout.html'}} {{include}} The benefit of this design is that users of this plugin can install multiple layouts and choose which one to apply simply by editing "views/layout.html". Moreover, "views/layout.html" will not be packed by admin together with the plugin, so there is no risk that the plugin will override the user's code in the previously installed layout. plugin_wiki DISCLAIMER: plugin_wiki is still very much under development and therefore we do not promise backward compatibility to the same level as for web2py core functions. plugin_wiki is a plugin on steroids. What we mean is that it defines multiple useful components and it may change the way you develop your applications: You can download it from http://web2py.com/examples/static/web2py.plugin.wiki.w2p The idea behind plugin_wiki is that most applications include pages that are semi-static. These are pages that do not include complex custom logic. They contain structured text (think of a help page), images, audio, video, crud forms, or a set of standard components (comments, tags, charts, maps), etc. These pages may be public, require login or have other authorization restrictions. These pages may be linked by a menu or only be reachable via wizard form. plugin_wiki provides an easy way to add pages that fit in this category to your regular web2py applications. In particular plugin_wiki provides: A wiki-like interface that allows to add pages to your app and reference them by a slug. These pages (which we will refer to as wiki pages) have versions and are stored in the database. Public and private pages (require login). If a page requires login, it may also require that the user have a particular group membership. Three levels: 1,2,3. At level 1, pages can only include text, images, audio and video. At level 2, pages can also include widgets (these are components as defined in the previous section that can be embedded in wiki pages). At level 3, pages can also include web2py template code. A choice of editing pages with the markmin syntax or in HTML using a WYSIWYG editor. A collection of widgets: implemented as components. They are self documenting and they can be embedded as regular components in normal web2py views or, using a simplified syntax, into wiki pages. A set of special pages (meta-code,meta-menu, etc.) that can be used to customize the plugin (for example define code the plugin should run, customize the menu, etc.) Thewelcomeapp plus theplugin_wikican be thought of as a development environment in itself that is suitable for building simple web applications such as a blog. From here on we will assume the plugin_wiki is applied to a copy of the welcome scaffolding app. The first thing you notice after installing the plugin is that it adds a new menu item called pages. Click on the pages menu item and you will be redirected to the plugin action: http://127.0.0.1:8000/myapp/plugin_wiki/index The plugin index page lists the pages created using the plugin itself and allows you to create new ones by choosing a slug. Try creating a home page. You will be redirected to http://127.0.0.1:8000/myapp/plugin_wiki/page/home Click on create page to edit its content. By default, the plugin is at level 3, which means you can insert widgets as well as code in pages. By default it uses the markmin syntax to describe the page content. MARKMIN syntax Here is a primer for the markmin syntax: markmin html # title <h1>title</h1> ## subtitle <h2>subtitle</h2> ### subsubtitle <h3>subsubtitle</h3> **bold** <strong>bold</strong> ''italic'' <i>italic</i> http://... <a href="http://...com">http:...</a> http://...png <img src="http://...png" /> http://...mp3 <audio src="http://...mp3"></audio> http://...mp4 <video src="http://...mp4"></video> qr:http://... <a href="http://..."><img src="qr code"/></a> embed:http://... <iframe src="http://..."></iframe> Notice that links, images, audio, and video files are embedded automatically. For more information on MARKMIN syntax, please refer to Chapter 5. If the page does not exist, you will be asked to create one. The edit page allows you to add attachments to pages (i.e. static files) and you can link to them as [[mylink name attachment:3.png]] or embed them with [[myimage attachment:3.png center 200px]] The size (200px) is optional. center is not optional but it may be replaced by left or right. You can embed blockquoted text with -----this is blockquoted----- as well as tables -----0 | 0 | X0 | X | 0X | 0 | 0----- and verbatim text ``verbatim text`` You can also prepend an optional :class to the final ----- or the final ``. For blockquoted text and tables it will be translated in the class of the tag, for example: -----test-----:abc renders as <blockquote class="abc">test</blockquote> For verbatim text the class can be used to embed content of different types. You can, for example, embed code with syntax highlighting by specifying the language with :code_language `` def index(): return 'hello world' ``:code_python You can embed widgets: ``name: widget_nameattribute1: value1attribute2: value2``:widget From the edit page you can click on "widget builder" to insert widgets from a list, interactively: (for a list of widgets see the next subsection). You can also embed web2py template language code: `` {{for i in range(10):}}<h1>{{=i}}</h1>{{pass}} ``:template Page permissions When editing a page you will find the following fields: active(defaults toTrue). If a page is not active, it will not be accessible to visitors (even if public). public(defaults toTrue). If a page is public, it can be accessed by visitors without logging in. role(defaults to None). If a page has a role, the page can be accessed only by visitors who are logged in and are members of the group with the corresponding role. Special pages meta-menu contains the menu. If this page does not exist, web2py uses the response.menu defined in "models/menu.py". The content of the meta-menu page overrides the menu. The syntax is the following: Item 1 Name http://link1.com Submenu Item 11 Name http://link11.com Submenu Item 12 Name http://link12.com Submenu Item 13 Name http://link13.comItem 2 Name http://link1.com Submenu Item 21 Name http://link21.com Submenu Item 211 Name http://link211.com Submenu Item 212 Name http://link212.com Submenu Item 22 Name http://link22.com Submenu Item 23 Name http://link23.com where the indentation determines the submenu structure. Each item is composed of the text of the menu item followed by a link. A link can be page:slug. A link None does not link to any page. Extra spaces are ignored. Here is another example: Home page:home Search Engines None Yahoo http://yahoo.com Google http://google.com Bing http://bing.com Help page:help This renders as follows: meta-codeis another special page and it must contain web2py code. This is an extension of your models, and in fact you can put model code here. It is executed when "models/plugin_wiki.py" code is executed. You can define tables in meta-code. For example, you can create a simple table "friends" by placing this in meta-code: db.define_table('friend',Field('name',requires=IS_NOT_EMPTY())) and you can create a friend-management interface by embedding in a page of your choice the following code: ## List of friends``name: jqgridtable: friend``:widget## New friend``name: createtable: friend``:widget The page has two headers (starting with #): "List of friends" and "New friend". The page contains two widgets (under the corresponding headers): a jqgrid widget that lists friends and a crud create widget to add a new friend. meta-header, meta-footer, meta-sidebar are not used by the default layout in "welcome/views/layout.html". If you want to use them, edit "layout.html" using admin (or the shell) and place the following tags in the appropriate places: {{=plugin_wiki.embed_page('meta-header') or ''}} {{=plugin_wiki.embed_page('meta-sidebar') or ''}} {{=plugin_wiki.embed_page('meta-footer') or ''}} In this way, the content of those pages will show up in the header, sidebar and footer in the layout. Configuring plugin_wiki As with any other plugins in "models/db.py" you can do from gluon.tools import PluginManager plugins = PluginManager() plugins.wiki.editor = auth.user.email == mail.settings.sender plugins.wiki.level = 3 plugins.wiki.mode = 'markmin' or 'html' plugins.wiki.theme = 'ui-darkness' where editoris true if the current logged-in user is authorized to edit plugin_wiki pages levelis the permission: 1 to edit regular pages, 2 to embed widgets in pages, 3 to embed code modedetermines whether to use a "markmin" editor or a WYSIWYG "html" editor.WYSIWYG themeis the name of the required jQuery UI Theme. By default only the color-neutral "ui-darkness" is installed. You can add themes here: static/plugin_wiki/ui/%(theme)s/jquery-ui-1.8.1.custom.css Current widgets Each widget can be embedded both in plugin_wiki pages and in normal web2py templates. For example, to embed a YouTube video in a plugin_wiki page, you can do ``name: youtubecode: l7AWnfFRc7g``:widget or to embed the same widget in a web2py view, you can do: {{=plugin_wiki.widget('youtube',code='l7AWnfFRc7g')}} In either case, this is the output: Widget arguments that do not have a default value are required. Here is a list of all current widgets: read read(table,record_id=None) Reads and displays a record tableis the name of a table record_idis a record number create create(table,message='',next='',readonly_fields='', hidden_fields='',default_fields='') Displays a record create form tableis the name of a table messageis a the message to be displayed after record is created nextis where to redirect, example "page/index/[id]" readonly_fieldsis a list of comma separated fields hidden_fieldsis a list of comma separated fields default_fieldsis a list of comma separatedfieldname=value update update(table,record_id='',message='',next='', readonly_fields='',hidden_fields='',default_fields='') Displays a record update form tableis the name of a table record_idis he record to be updated or{{=request.args(-1)}} messageis a the message to be displayed after record is created nextis where to redirect, example "page/index/[id]" readonly_fieldsis a list of comma separated fields hidden_fieldsis a list of comma separated fields default_fieldsis a list of comma separatedfieldname=value select select(table,query_field='',query_value='',fields='') Lists all records in the table tableis the name of a table query_fieldandquery_valueif present will filter records by according to the queryquery_field == query_value fieldsis a list of comma separate fields to be displayed search search(table,fields='') Widgets for selecting records tableis the name of a table fieldsis a list of comma separated fields to be displayed jqgrid jqgrid(table,fieldname=None,fieldvalue=None,col_widths='', colnames=None,_id=None,fields='',col_width=80,width=700,height=300) Embeds a jqGrid plugin tableis the table name fieldname,fieldvalueare an optional filter:fieldname==fieldvalue col_widthsis the width of each column colnamesis a list of column names that are displayed _idis the "id" of the TABLE that contains the jqGrid fieldsis a list of columns to be displayed col_widthis the default width of columns heightis the height of the jqGrid widthis the width of the jqGrid Once you have the plugin_wiki installed, you can easily use the jqGrid in your other views too. Example usage (displays yourtable filtered by fk_id==47): {{=plugin_wiki.widget('jqgrid','yourtable','fk_id',47,'70,150', 'Id,Comments',None,'id,notes',80,300,200)}} latex latex(expression) Uses Google charting API to embed LaTeX pie_chart pie_chart(data='1,2,3',names='a,b,c',width=300,height=150,align='center') Embed a pie chart datais a list of comma separated values namesis a list of comma separated labels (one for data item) widthis the width of the image heightis the height of the image aligndetermines the alignment of the image bar_chart bar_chart(data='1,2,3',names='a,b,c',width=300,height=150,align='center') Uses Google charting API to embed a bar chart datais a list of comma separated values namesis a list of comma separated labels (one for data item) widthis the width of the image heightis the height of the image aligndetermines the alignment of the image slideshow slideshow(table, field='image', transition='fade', width=200, height=200) Embeds a slideshow. It gets the images from a table. tableis the table name fieldis the upload field in the table that contains images transitiondetermines the type of transition, e.g. fade, etc. widthis the width of the image heightis the height of the image youtube youtube(code, width=400, height=250) Embeds a YouTube video (by code) codeis the code of the video widthis the width of the image heightis the height of the image vimeo vimeo(code, width=400, height=250) Embeds a Vimeo video (by code) codeis the code of the video widthis the width of the image heightis the height of the image mediaplayer mediaplayer(src, width=400, height=250) Embeds a media file (such as Flash video or an mp3 file) srcis the src of the video widthis the width of the image heightis the height of the image comments comments(table='None', record_id=None) Embeds comments in the page Comments can be linked to a table and/or a record tableis the table name record_idis the id of the record tags tags(table='None', record_id=None) Embeds tags in the page tags can be linked to a table and/or a record tableis the table name record_idis the id of the record tag_cloud tag_cloud() Embeds a tag cloud map map(key='....', table='auth_user', width=400, height=200) Embeds a Google map. It gets points on the map from a table keyis the google map api key (default works for 127.0.0.1) tableis the table name widthis the map width heightis the map height The table must have columns: latitude, longitude and map_popup. When clicking on a dot, the map_popup message will appear. iframe iframe(src, width=400, height=300) Embeds a page in an <iframe></iframe> load_url load_url(src) Loads the content of the url using the LOAD function load_action load_action(action, controller='', ajax=True) Loads the content of URL(request.application, controller, action) using the LOAD function Extending widgets Widgets to plugin_wiki can be added by creating a new model file called "models/plugin_wiki_"name where name is arbitrary and the file contains something like: class PluginWikiWidgets(PluginWikiWidgets): @staticmethod def my_new_widget(arg1, arg2='value', arg3='value'): """ document the widget """ return "body of the widget" The first line states that you are extending the list of widgets. Inside the class, you can define as many functions as needed. Each static function is a new widget, except for functions that start with underscores. The function can take an arbitrary number of arguments which may or not have default values. The docstring of the function must document the function using the markmin syntax itself. When widgets are embedded into plugin_wiki pages, arguments will be passed to the widget as strings. This means the widget function must be able to accept strings for every argument and eventually convert them into the required representation. You can decide what the string representation must be - just make sure this is documented in the docstring. The widget can return a string of web2py helpers. In this latter case they will be serialized using .xml(). Notice how the new widget can access any variable declared in the global namespace. As an example, we are going to create a new widget that display the "contact/ask" form created at the beginning of this chapter. This can be done by creating a file "models/plugin_wiki_contact" that contains: class PluginWikiWidgets(PluginWikiWidgets): @staticmethod def ask(email_label='Your email', question_label='question'): """ This plugin will display a contact us form that allows the visitor to ask a question. The question will be emailed to you and the widget will disappear from the page. The arguments are - email_label: the label of the visitor email field - question_label: the label of the question field """ form=SQLFORM.factory( Field('your_email', requires=IS_EMAIL(), label=email_label), Field('question', requires=IS_NOT_EMPTY()), label=question_label) if form.process().accepted: if mail.send(to='admin@example.com', subject='from %s' % form.vars.your_email, message = form.vars.question): command="jQuery('#%s').hide()" % div_id response.flash = 'Thank you' response.js = "jQuery('#%s').hide()" % request.cid else: form.errors.your_email="Unable to send the email" return form.xml() plugin_wiki widgets are not rendered by a view unless the response.render(...) function is called explicitly by the widget. top
When we announced that Fiesta was shutting down the reaction was beyond anything we could’ve expected. Support poured in via email, twitter, and in comments on the blog post. That support inspired us to spend the past several weeks scrambling to find a way to keep Fiesta alive, and I’m very happy to announce that it won’t be shutting down after all. A good friend of mine and one of the earliest Fiesta users, Joseph Perla, noticed the overwhelming response and reached out to us. He will be taking over the maintenance and development of Fiesta, and setting the course for its future. Joe is a great developer (and person), and I’m confident he’s the right person to keep Fiesta alive and flourishing for a long time to come. Expect to hear more from Joe in the weeks and months to come. Joe is “committed to keeping Fiesta groups alive forever as a core enhancement to email.” Sorry for the confusion/concern caused by the shutdown notice, but know that we wouldn’t have been able to find a way to keep it alive if it weren’t for all of your support. So, thank you, and happy Fiesta’ing! Mike Update (2/28/2011): We’ve found a way to keep Fiesta alive for good. See here for details. We’ve got some sad news for you: we have decided to stop working on Fiesta and to shut the service down. If you’re interested in why, there are more details at the end of this post. If not, here’s the quick overview of how this affects you: we’ve made an export tool available on the list management page to export your lists. We’ve turned off the ability to create new lists, but your current lists will continue to function until March 1st. Hopefully that gives you time to find an alternate solution and coordinate with the other members of your lists. After March 1st, we’ll be turning off the servers that handle incoming email (so lists will no longer function). At that point all personal data will be permanently deleted. We owe all of you a huge debt of gratitude for using Fiesta and making it so fun for us to work on over the past 13 months. We sincerely hope that it’s been a useful tool for you. If you have any questions or concerns please feel free to get in touch with me directly: mike@corp.fiesta.cc We wish you way more than luck, Mike and Dan If you’ve read this far, it means you’re curious to hear about *why* we’re shutting down. I started work on Fiesta as a reaction to a real problem - communicating with groups online is harder than it should be. I think that we’ve come really far with Fiesta and have solved a lot of the problems that we set out to solve. That said, it really has been a labor of love. Dan and I have worked tirelessly to get things to the point where they are now. While Fiesta has been growing rapidly, there is still a long way to go. To be totally honest, I’ve lost the enthusiasm that is necessary to continue to work tirelessly on solving the problem. The reasons for that are probably too uninteresting to list here. The summary is that it seems to me that life is too short to spend time on something that I’m not 100% passionate about, so I’ve decided to stop spending time on Fiesta. You may have some more questions: Why not keep Fiesta running but spend less time on it? Running a service like Fiesta is relatively expensive. There are financial costs (like paying for servers to host the service), but there are also time costs. Behind the scenes we do things every day like fighting spam (still partially a manual process even at really large scale services) and dealing with outages and unexpected issues. All of these things make the idea of putting Fiesta on “autopilot” impractical. Why not sell Fiesta to someone who can keep it running? If we found the right match, someone committed to the well-being of the users who have put their trust in us, we’d definitely consider this option. If you think you’re the right person to take over (or if you know the right person) definitely get in touch. P.S. There are a lot of people who have supported us along the way and we owe a huge thanks to all of you. I’d like to single out Dogpatch Labs and Polaris for a particular thanks: we never would have made it this far without your support. There is a growing (and welcome) trend of web services tightly integrating email into their application workflows. One of the most useful (and frequent) types of integration is reply-by-email. Services like GitHub, Disqus and Facebook all support this. To get reply-by-email working, services set the From header to a special address that goes back to the service in question. Here’s what the From header looks like for messages from GitHub, Disqus and Facebook, respectively: John Doe <reply+i-...@reply.github.com> Disqus <notifications-...@disqus.net> Reply to Comment <c+...@reply.facebook.com> In every case, the ... is actually a long, presumably unique, identification string. This works well in general, but presents a couple of subtle user-experience problems. The most notable problems we’ve found: The autocomplete problem isn’t as much of a sticking point for Disqus and Facebook style headers, since they don’t use the sender’s name. For GitHub-style headers it’s a big problem: when I start typing “John Doe” the autocomplete will include the GitHub reply address. That said, GitHub-style headers are nice as they provide more information/context about who is responsible for the message you’re receiving. To solve these problems, there should be a header that services can use to indicate that they are doing this type of From-header munging. When a client sees that header it can avoid adding the From address to the autocomplete index and, for the purposes of search, can index the message using a different address that the service optionally provides. Let’s say the header chosen is X-Indexable-From. For a service to indicate that a message’s From address shouldn’t be added to contacts/auto-complete, it just sets: X-Indexable-From: false If it’s compatible with a service’s privacy policy, it can also specify the address of the person who is actually responsible for the notification being received. That way, the proper name/address can be added to the receiver’s contacts and search index: X-Indexable-From: john.doe@example.com or X-Indexable-From: John Doe <john.doe@example.com> In any of these examples, the From header stays the same as above - it’s just ignored by clients for the purposes of autocomplete & search indexing. Since we’re proposing this, it may come as no surprise that we’re running into similar issues at Fiesta with our implementation of Reply-Thru. We thought we’d publish this post and see if others have thoughts on this or can point out anything we’re overlooking. Consider it a very informal RFC. Is anybody else interested in pushing something like this forward as an open standard? Alternatively, is there an existing solution to this problem? Edit: We’ve already heard from a couple of people who run services that have the same issue and are looking for solutions. The best thing you can do to help is to spread the word about this problem and the proposed solution. It doesn’t really become viable until we get the attention of at least one big email client, so I’m not proposing that people start adding this header to outbound emails yet. Or at least not until there is consensus built around syntax, header name, etc. One of my favorite MongoDB tricks is the ability to use an ObjectId (the default type for MongoDB’s _id primary key) as a timestamp for when a document was created. Here’s how it works: >>> import pymongo >>> db = pymongo.Connection().test >>> db.test.insert({'hello': 'world'}) ObjectId('4f202e64e6fb1b56ff000000') >>> doc = db.test.find_one() >>> doc['_id'].generation_time datetime.datetime(2012, 1, 25, 16, 31, 32, tzinfo=<...>) We’re inserting a single document and then immediately querying for it. The generation_time property of the automatically generated _id gives us a datetime representing when that ObjectId was generated (precise to the second). This is great for those times when you would’ve otherwise added an extra “created_on” field with just a timestamp. Going in Reverse PyMongo’s ObjectId class also has a method that let’s us generate an ObjectId from a datetime, for use in querying (the other drivers have this too). Let’s insert another document and give it a try: >>> import pprint >>> import datetime >>> from bson.objectid import ObjectId >>> db.test.insert({'hello': 'a little later'}) ObjectId('4f2030d9e6fb1b56ff000001') >>> pprint.pprint(list(db.test.find())) [{u'_id': ObjectId('4f202e64e6fb1b56ff000000'), u'hello': u'world'}, {u'_id': ObjectId('4f2030d9e6fb1b56ff000001'), u'hello': u'a little later'}] >>> timestamp = datetime.datetime(2012, 1, 25, 16, 35) >>> pprint.pprint(list(db.test.find({'_id': {'$gt': ObjectId.from_datetime(timestamp)}}))) [{u'_id': ObjectId('4f2030d9e6fb1b56ff000001'), u'hello': u'a little later'}] The call to ObjectID.from_datetime() is what let’s us create a special ObjectId just for querying. If you look at the API docs you’ll see a note that I wrote a long time ago about when this method is safe to use. That leads us into our next section: Abusing ObjectIds At Fiesta we use ObjectIds to get the timestamps to display in our new archiving UI. Recently we had to import some existing archives for a group that was migrating to Fiesta. This presents a problem: when we import the archives we are creating new documents with new ObjectIds, but we want them to have timestamps that make them look much older. There are a couple of ways we could’ve approached this problem. I’ll start with what we did and then discuss why it’s wrong and what we probably should’ve done instead :). We wrote some code to generate ObjectIds with timestamps that occurred in the past, and manually generated _id values to match the messages we were importing. Here’s the code: import calendar import struct from bson.objectid import ObjectId # Current ObjectId increment INC = 0 def generate_objectid(generation_time): ''' This is unsafe. We generate fake ObjectIds. Set the five (machine id/PID) bytes to '\xFA' so we can at least recognize OIDs we generated. We don't lock around the INC, so this method isn't re-entrant. ''' global INC # Timestamp oid = struct.pack(">i", int(calendar.timegm(generation_time.timetuple()))) # Machine ID / PID oid += "\xFA" * 5 # Increment oid += struct.pack(">i", INC)[1:4] INC = (INC + 1) % 0xFFFFFF return ObjectId(oid) We couldn’t use the above ObjectId.from_datetime() because, as noted in the docs, it’s unsafe for use in anything but queries. The method above is marginally more safe by virtue of using an actual increment and a canary for the Machine ID & PID (from_datetime() uses all \x00s). But it’s still unsafe - if we need to do another import we need to be sure not to use the same canary. We also need to be sure that the canary never matches any of our actual Machine ID / PID bytes. What we should’ve done What we probably should do is add a “created_on” field with a regular datetime timestamp for messages that are being imported. When we go to display a message use created_on if it exists and fall-back to the _id otherwise. That way we’re never resorting to improperly generated ObjectIds, but we still get the benefit of built-in timestamps when we can. I figured I’d do this post in case anybody comes across the same problem, and as a neat way of exposing some of the internals of ObjectIds. The great thing about mailing lists is that they make it simple to discuss things with an entire group. A problem with mailing lists is that they can make it too simple to discuss things with an entire group. Everyone has seen it or had it happen to them: you receive a message through a list and reply with a personal note to the message’s sender. Your reply, however, actually goes to the entire list. Embarrassing. The Problem The reason this happens is because most mailing list software (including Fiesta by default) sets a “Reply-To” header pointing back to the list when a message is distributed. When you press Reply, your email client sees that header and composes a new message to the list rather than the sender. Mailing lists do this to foster group communication; if replies go to the whole group then everyone stays in the loop. Otherwise, it’s very easy for discussions to become fragmented. The problem seems intractable: we want people to be able to send personal replies, but we also want to keep group discussion on the list as much as possible (even if somebody hits Reply instead of Reply-All). To date, all mailing list software has sort of punted on the problem. The list software picks a default behavior and, at best, gives users the ability to customize the behavior themselves. Customization might seem like a great solution, but it’s bad for a couple of reasons. First, customization requires work and management. Who gets to decide how a list behaves? When? Is the setting mandatory or is it hidden away where only advanced users will find it? Second, customization means that different lists behave different ways, even on the same service. This is the same problem seen with all modal UIs; users can never be certain exactly how a list will behave without knowing how it’s configured. A New Solution: Reply-Thru We’ve been working on what we think is a better solution to this problem. We’re calling it Reply-Thru. Reply-Thru is enabled on all lists that are using NewFiesta. When you get a message from a Fiesta list you can Reply or Reply-All, just like a normal email message. If you Reply-All, your message will go to the entire list. If you Reply, it will go only to the sender. What makes Reply-Thru special, however, is how that direct message is sent. It gets sent through Fiesta, and a note is added telling the recipient that it was sent directly to them. The recipient has the option of sending a Reply directly back to you, but they also have the option of hitting Reply-All and taking the conversation back to the list. They can also distribute the message to rest of the list with a single click. Reply-Thru makes direct responses the default, but makes it dead simple to bring the discussion back to the list when that’s where it belongs. Best of all, there’s no configuration: every list can behave the same way. There are some issues with this approach as well, most notably is that we have to manipulate the From address of emails being sent to lists. Instead of seeing an email from mike@example.com, users will see mike-at-example.com+via@fiesta.cc. This might present some confusion, especially to those who are used to the old behavior. We think the benefits of reply-thru outweigh the issues, and we’ll continue to iterate on it as more Fiesta lists transition to NewFiesta. Give it a try and let us know what you think! Since we launched Fiesta, one of the most asked for features has been the ability to browse list messages from the web. We’ve been hard at work on getting this right, and we’re psyched to announce that it’s available now. Here’s an example thread that explains a bit more about it. For new groups it’s enabled by default, but for existing groups you’ll need to go to your settings page and manually enable (we explain why in that thread). There was some good coverage of the launch in Fast Company, as well. Now that this is out the door, we’ll start talking about some of the tech behind it here in the blog, as usual. If you have any questions or feedback - let us know. Parts of the Fiesta API (as well as some new features we’re rolling out for Fiesta itself) rely on the ability to automatically generate clean text (markdown) versions of incoming messages. Our parser tends to prefer using the text version of a message if possible, as it’s generally easier to parse than the HTML version. That said, sometimes a message only contains an HTML version - we need a way to generate our canonical markdown representation from that alone. Enter html2text. html2text is a great little Python module by Aaron Swartz that takes HTML as input and generates a markdown version as output. Here’s an example: >>> import html2text >>> print html2text.html2text('''<html><body><p>Hello World</p> <ul><li>Here's one thing</li> <li>And here's another!</li></ul></body></html>''') Hello World * Here's one thing * And here's another! The output is nicely formatted markdown text, exactly what we were looking for. The only problem we’ve noticed is that the module has some trouble dealing with malformed HTML. Our approach has been to run things through BeautifulSoup first, which tends to do a great job even with crappy markup. Last week we did a post with some tips for improving web app security, focused primarily on the Strict-Transport-Security, X-Frame-Options and X-Content-Security-Policy headers. Even though it was a pretty quick/simple post, the overall reaction was positive. There were some requests for more similar posts, so consider this the next in the series. This post is about a couple of options you can set on cookies to improve security. HttpOnly When your server sets a cookie the client’s browser will include it in future requests. Cookies are probably the easiest way to maintain state across requests, so most web applications use them to store users’ logged-in state, etc. That makes them a good target for attacks: if an attacker can get ahold of a user’s cookie they’ll be able to take actions in the app as if they were that user. In addition to including them in future requests, the browser also exposes cookies to Javascript running on the client side (as document.cookie). This makes document.cookie a great target for exploitation if the attacker is able to find an XSS vulnerability: they can embed a script that does something evil with the values of the user’s cookies. The HttpOnly option tells the browsers that support it not to allow client-side access to the cookie. The cookie will still get sent along with future requests (so it can be used to maintain state on the server side), but won’t be visible to an attacker who manages to run a script on the page. Here’s what a Set-Cookie header might look like that includes HttpOnly: Set-Cookie: x=5; path=/; HttpOnly This option limits the surface area of XSS attacks, but (just like our discussion of X-Content-Security-Policy) is really just treating the symptoms and not the cause. The important thing is to prevent code injection altogether. Secure Cookies are also vulnerable when they are sent in the clear. Just like in the last post, the first step is to be sure you’re using SSL for all requests. The Strict-Transport-Security header comes in handy there (n.b. we recognize that this blog doesn’t use SSL - we’re hosting it on tumblr. We do use both of these options on Fiesta). Cookies also have a Secure option, which tells browsers to only transmit them over HTTPS. If you’re already using SSL for all requests, set the Secure bit on your cookies, too. Here’s an example header: Set-Cookie: x=5; path=/; HttpOnly; Secure Once you set the Secure option, browsers that support it will never transmit the cookie in the clear, only over https. That’s it for this post. If you have any questions or ideas for a future post let us know in the comments. We’re live-blogging MongoSV today. This is the last post, but here’s a link to all of the posts from the event. This talk is being given by Eliot (the first of his that I’ve seen all day!) Going to go over HA best practices: keeping data online and safe. What about a single node? This will have downtime. If the node crashes, intervention might be necessary. If it disappears will need a backup. Replica set v1: Single datacenter, single switch, single power source. But automatically recovers from a single node crash. A good start but not great. The next step up is still single datacenter, but w/ multiple power/network zones. Like EC2 in a single region but multiple AZs. Still some points of failure (datacenter / two node failure). With an arbiter, we can’t do w=2 writes and remain up. With 3 non-arbiters we can use w=2 but are still vulnerable to datacenter failure. The next step up is multi datacenter w/ a single DR (disaster recovery) node in a different DC. We can’t always stay up, but we at least have a DR option now. Now let’s look at the ideal: three datacenters, five nodes. One has a single delayed slave (that helps recover from fat-finger incidents like accidental db.drop). The other two DCs each have 2 active nodes. We can lose an entire DC and still have a majority w/ the other two DCs. Can do `w={dc: 2}` to guarantee write in 2 DCs. Moving on to HA sharding Each shard needs to be a replica set - same rules apply as above. Balancing can be run in a window (this is cool!) can set an activeWindow to only run the balancer at night. Sweet! Config servers need to be on at least 2 different power/network zones. Ideally just put in three separate DCs. Use host names rather than IP addresses: much easier to move a config server. Take backups of config servers. Important note (saw this earlier too in Richard’s talk): not a replica set. To bring new nodes online you need to manually move the data. Not a problem if a config server is down for a day or something: just won’t do splits/migrates. Run one mongos per app-server. Don’t need to worry about scaling mongos. Saves a network hop for many ops. If you really don’t like this, run a pool per power region with a load-balancer in front. Application-level tips: Handle spikes: queue non-synchronous writes, isolate components and features. Can your site handle going into a read-only mode? That helps a lot when dealing w/ issues. Monitor! Load, disk, CPU, but most importantly I/O (iostat). Alerts go hand in hand w/ monitoring. Have good procedures for backups, adding replica set members, adding shards, etc. Practice (in staging) (which ought to be the same as prod). Randomly shut down boxes & load test as much as possible. Recap We’re live-blogging from MongoSV today. Here’s a link to the entire series of posts. Presented by Greg Brockman from Stripe The actual title of this talk is “There’s a Moster in My Closest”, but I thought the subtitle would be more elucidating. This talk is packed! Actually, all of the talks so far today have been pretty packed - great crowd here. Monster is the name of the event processing system Greg built for Stripe. Been using it in production for a few months now, and it’s built on top of MongoDB. The concept of event processing is that you want to glean some information from lots of real-time events that are happening (incremental stats, real time analytics, trending topics, etc.). Stripe uses it for fraud detection, dashboards, and more. Now we’re going to get a live demo! He’s showing a blog-post generator that he’s written, going to use Monster to monitor the content of the posts that it’s spitting out. Live coding a “model”, which looks like sort of a quanta of reporting. Logging a new event per-sentence that gets generated. Now we need a consumer to actually do something with the events. The consumer gets streamed events and just needs to “do something”. Doesn’t worry about storage, generation, etc. Registers for classes of events and has a `consume()` method. Pretty simple, but flexible. Consumer is logging when generated sentences are “too long”. Question: Monster vs celery/beanstalkd/resque? Answer: when using a job queue the act of logging implies an “action”/job. With Monster/event queuing the goal is to totally decouple logging from performing actions on logs. Can add new consumers later, etc. Events persist, not ephemeral. Consumer uses polling to get new events. Now we’re hearing why they chose MongoDB. Replica sets are a major reason, for HA. They also wanted a document store: easy to use, so developers will all use it. They need atomic operations (talking about things like findAndModify). Seems like a lot of the talks today have been mentioning findAndModify. They like automatic collection creation, from a deployment perspective. No migrations, etc. Finally, background index building is really important for Stripe. Can create new indexes w/o compromising availability. Tradeoff: no transactions. This is the one thing they’d really like for Monster (mainly for DR). The particular case that they need it for is what they call a Stateful Consumer - can modify the state of an event while consuming it. They basically build transactions at the application layer here. Like the previous talk, they aren’t using capped collections. They don’t expire old events. They also aren’t using sharding (these are in response to audience questions again). Environment is a 3-node replica set on AWS (large instances). Not using EBS except for on one of the secondaries.
I am making a XMPP middleware in python which listens on a an address(host,port) and when it receives some connection on that port, it sends a XMPP message to a jid(XMPP user) on a server. A quick review of my setup For networking part I am using twisted For XMPP - SleekXMPP XMPP server - Openfire Now when I tried using sleekXMPP without importing anything from twistedm it was working fine. However I i try to mix sleekXMPP and twisted in one program (by importing them) I get the follwing error. Traceback (most recent call last): File "sleekXMPP/prog_3.py", line 124, in <module> main() File "sleekXMPP/prog_3.py", line 111, in main xmppThread = ClientThread(dir_q) File "sleekXMPP/prog_3.py", line 41, in __init__ self.xmpp = xmppClient(self.jid, self.password) File "sleekXMPP/prog_3.py", line 17, in __init__ sleekxmpp.ClientXMPP.__init__(self, jid, password) File "build\bdist.win32\egg\sleekxmpp\clientxmpp.py", line 65, in __init__ File "build\bdist.win32\egg\sleekxmpp\basexmpp.py", line 72, in __init__ File "build\bdist.win32\egg\sleekxmpp\jid.py", line 461, in __init__ File "build\bdist.win32\egg\sleekxmpp\jid.py", line 150, in _parse_jid File "build\bdist.win32\egg\sleekxmpp\jid.py", line 202, in _validate_domain File "C:\Python27\lib\site-packages\twisted-12.2.0-py2.7-win32.egg\twisted\python \compat.py", line 22, in inet_pton raise ValueError("Illegal characters: %r" % (''.join(x),)) ValueError: Illegal characters: u't' The code is as follows: import sleekxmpp import ssl import Queue import threading import time import logging import traceback import sys from twisted.internet import reactor, protocol , endpoints class xmppClient(sleekxmpp.ClientXMPP): """ This class defines the xmppClient object used to interact with the XMPP server """ def __init__(self, jid, password): # the constructor sleekxmpp.ClientXMPP.__init__(self, jid, password) self.add_event_handler('session_start', self.start) def start(self, event): self.send_presence() self.get_roster() def send_note(self): self.mssg = r"Hello from XMPP Service" self.recipient = r"testuser2@ghost" self.send_message(mto=self.recipient, mbody=self.mssg, mtype='chat') print "Message sent" class ClientThread(threading.Thread): def __init__(self, dir_q): super(ClientThread, self).__init__() self.dir_q = dir_q self.stoprequest = threading.Event() self.jid = 'testuser1@ghost' self.password = 'password' self.xmpp = xmppClient(self.jid, self.password) self.xmpp.register_plugin('xep_0030') # Service Discovery self.xmpp.register_plugin('xep_0004') # Data Forms self.xmpp.register_plugin('xep_0060') # PubSub self.xmpp.register_plugin('xep_0199') # XMPP Ping self.xmpp.ssl_version = ssl.PROTOCOL_SSLv3 if self.xmpp.connect(): print("Connected") self.xmpp.process(block=False) else: print("Unable to connect.") def run(self): while not self.stoprequest.isSet(): try: req = self.dir_q.get(True, 0.05) if req == 1: self.xmpp.send_note() except Queue.Empty: continue def join(self, timeout=None): self.stoprequest.set() super(ClientThread, self).join(timeout) class reqSimulator(threading.Thread): def __init__(self, dir_q): super(reqSimulator, self).__init__() self.dir_q = dir_q def run(self): while(1): self.dir_q.put(1) time.sleep(0.5) """class sendProtocol(protocol.Protocol): def connectionMade(self): r = reqSimulator(self.factory.dir_q) r.run() def connectionLost(self, reason): pass def dataReceived(self, data): self.transport.loseConnection()""" def main(): logging.basicConfig() dir_q = Queue.Queue() xmppThread = ClientThread(dir_q) xmppThread.start() sim = reqSimulator(dir_q) """factory = protocol.ServerFactory() factory.dir_q = dir_q factory.protocol = sendProtocol endpoints.serverFromString(reactor, "tcp:8001").listen(factory) reactor.run() """ sim.start() if __name__ == "__main__": main() Note that in this code, the actual networking code is commented and I am using a reqSimulator class to simulate the oncoming requests. I tried to google for any issued but got no result. Does anybody have an idea what is wrong here?
I am trying to create a list of the input with their corresponding values accessing from the global nested dictionary. Here is the code, import sys param_values = { 'vowels':{ 'aa' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,1.0), (-1,-1)], 'ae' : [(-1,-1), (-1,-1), (0.1,0.8), (-1,-1), (0.1,1.0), (-1,-1)], 'ah' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,1.0), (-1,-1)], 'ao' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.2,1.0), (-1,-1)], 'eh' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,1.0), (-1,-1)], 'er' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.15,0.7), (-1,-1)], 'ey' : [(-1,-1), (-1,-1), (0.3,1.0), (-1,-1), (0.1,0.5), (-1,-1)], 'ih' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.8), (-1,-1)], 'iy' : [(-1,-1), (-1,-1), (0.2,1.0), (-1,-1), (0.1,0.8), (-1,-1)], 'uh' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,1.0)], 'uw' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,1.0)], 'o' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.4,1.0)] }, 'consonants':{ 'b' : [(-1,-1), (0.0,0.0), (-1,-1), (-1,-1), (0.1,0.8), (-1,-1)], 'ch' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.4), (-1,-1)], 'd' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.4), (-1,-1)], 'dh' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.35), (-1,-1)], 'dx' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.35), (-1,-1)], 'f' : [(0.3,1.0), (-1,-1), (-1,-1), (-1,-1), (-1,-1), (-1,-1)], 'g' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.8), (-1,-1)], 'hh' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.8), (-1,-1)], 'jh' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.8), (-1,-1)], 'k' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.8), (-1,-1)], 'l' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.4), (-1,-1)], 'm' : [(-1,-1), (0.0,0.0), (-1,-1), (-1,-1), (0.1,0.8), (-1,-1)], 'n' : [(-1,-1), (0.1,1.0), (-1,-1), (0.3,1.0), (0.0,0.0), (-1,-1)], 'ng' : [(-1,-1), (0.1,1.0), (-1,-1), (-1,-1), (0.0,0.0), (-1,-1)], 'p' : [(-1,-1), (0.0,0.0), (-1,-1), (-1,-1), (0.1,0.8), (-1,-1)], 'r' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.4), (-1,-1)], 's' : [(-1,-1), (0.1,1.0), (-1,-1), (0.3,1.0), (0.0,0.0), (-1,-1)], 'sh' : [(-1,-1), (0.1,1.0), (-1,-1), (0.3,1.0), (0.0,0.0), (-1,-1)], 't' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.4), (-1,-1)], 'th' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.4), (-1,-1)], 'v' : [(0.3,1.0), (-1,-1), (-1,-1), (-1,-1), (-1,-1), (-1,-1)], 'w' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,1.0)], 'y' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.6), (-1,-1)], 'z' : [(-1,-1), (0.1,1.0), (-1,-1), (0.3,1.0), (0.0,0.0), (-1,-1)], 'zh' : [(-1,-1), (-1,-1), (-1,-1), (-1,-1), (0.1,0.6), (-1,-1)] } } diphthong = { 'aw' : ['ao' , 'uw'], 'ay' : ['ao' , 'ih'], 'ow' : ['o' , 'aa'], 'oy' : ['o' , 'ih'] } print "Usage :python co.py phonemeFile" def coart(phonemeFile): """ Function for generating parameter values from the global list """ phonemeList = [] with open("syllabifiedPhonemes.txt", "r") as pFile : for line in pFile : l = line.split() for phoneme in l : next_phoneme = diphthong.get(phoneme) if next_phoneme is None : # exploring the dict param_values extracting each nested dict for group in param_values.keys() : # 'group' refers to vowels or consonants # iterate over each nested dict for phones in param_values[group].keys() : # 'phones' refers to the phonemes present inside each group phonemeList.append((phones, param_values.get(param_values[group][phones]))) else : for group in param_values.keys() : # 'group' refers to vowels or consonants for phones in param_values[group].keys() : # 'phones' refers to the phonemes present inside each group phonemeList.extend([(phones, param_values.get(param_values[group][phones])) for phoneme in next_phoneme]) print "New List" print '\n'.join(str(l) for l in phonemeList) The input file syllabifiedPhonemes.txt has the following contents: s aa ' m ih ' k l eh k ' t aa ' n ih t ' g eh l ' v ae ' n ih ' k aa ' p l ay k The if-else statement is not right as far as I can see. I got the following error: Traceback (most recent call last): File "co.py", line 189, in <module> coart("syllabifiedPhonemes.txt") File "co.py", line 160, in coart phonemeList.append((phones, param_values.get(param_values[group][phones]))) TypeError: unhashable type: 'list' I changed the if-else statement to the following and got rid of the error. if next_phoneme is None : # exploring the dict param_values extracting each nested dict for group in param_values.keys() : # 'group' refers to vowels or consonants # iterate over each nested dict for phones in param_values[group].keys() : # 'phones' refers to the phonemes present inside each group phonemeList.append((phones, param_values[group][phones])) else : for group in param_values.keys() : # 'group' refers to vowels or consonants for phones in param_values[group].keys() : # 'phones' refers to the phonemes present inside each group phonemeList.extend([(phones, param_values[group][phones]) for phoneme in next_phoneme]) But now I got the output as a huge list and I presume the program iterates through the dict many times and prints the whole dict again and again instead of just displaying the values of the given input. Could someone point out where I am wrong? Thank you.
#1251 Le 05/02/2012, à 19:35 chaoswizard Re : TVDownloader: télécharger les médias du net ! Voilà, la version 0.5 est arrivée dans le PPA ! Ubuntu ==> Debian ==> Archlinux Hors ligne #1252 Le 05/02/2012, à 20:08 ynad Re : TVDownloader: télécharger les médias du net ! @Greg_lattice comme f.x0 avec la même ligne de commande tout fonctionne sans blème... Hors ligne #1253 Le 06/02/2012, à 19:45 grandtoubab Re : TVDownloader: télécharger les médias du net ! bonsoir Un message d'erreur final alors que le téléchargement est "apparement" complet @ubuntu-desktop:~/Vidéos$ pluzzdl http://www.pluzz.fr/faites-entrer-l-accuse.html [INFO ] PluzzDL.py Début du téléchargement des fragments [CRITICAL] PluzzDL.py Impossible de charger la vidéo @ubuntu-desktop:~/Vidéos$ ls -alrt faites* -rw-r--r-- 1 abc root 265409490 2012-02-06 18:03 faites-entrer-l-accuse.flv @ubuntu-desktop:~/Vidéos$ Malheureusement fichier incomplet, smplayer affiche une durée de 1h10mn 50 s mais la lecture s'arrête à 29 mn 11 s jusqu'à là tout va bien @ubuntu-desktop:~/Vidéos$ pluzzdl -f -b http://www.pluzz.fr/faites-entrer-l-accuse.html [INFO ] PluzzDL.py Début du téléchargement des fragments [INFO ] PluzzDL.py Avancement : 1 % [INFO ] PluzzDL.py Avancement : 2 % [INFO ] PluzzDL.py Avancement : 3 % [INFO ] PluzzDL.py Avancement : 4 % [INFO ] PluzzDL.py Avancement : 5 % [INFO ] PluzzDL.py Avancement : 6 % [INFO ] PluzzDL.py Avancement : 7 % [INFO ] PluzzDL.py Avancement : 8 % [INFO ] PluzzDL.py Avancement : 9 % [INFO ] PluzzDL.py Avancement : 10 % [INFO ] PluzzDL.py Avancement : 11 % [INFO ] PluzzDL.py Avancement : 12 % [INFO ] PluzzDL.py Avancement : 13 % [INFO ] PluzzDL.py Avancement : 14 % [INFO ] PluzzDL.py Avancement : 15 % [INFO ] PluzzDL.py Avancement : 16 % [INFO ] PluzzDL.py Avancement : 17 % [INFO ] PluzzDL.py Avancement : 18 % [INFO ] PluzzDL.py Avancement : 19 % [INFO ] PluzzDL.py Avancement : 20 % [INFO ] PluzzDL.py Avancement : 21 % [INFO ] PluzzDL.py Avancement : 22 % [INFO ] PluzzDL.py Avancement : 23 % [INFO ] PluzzDL.py Avancement : 24 % [INFO ] PluzzDL.py Avancement : 25 % [INFO ] PluzzDL.py Avancement : 26 % [INFO ] PluzzDL.py Avancement : 27 % [INFO ] PluzzDL.py Avancement : 28 % [INFO ] PluzzDL.py Avancement : 29 % [INFO ] PluzzDL.py Avancement : 30 % [INFO ] PluzzDL.py Avancement : 31 % ça se gâte [INFO ] PluzzDL.py Avancement : 32 %[CRITICAL] PluzzDL.py Impossible de charger la vidéo Dernière modification par grandtoubab (Le 06/02/2012, à 20:25) Linux tout seul sur le disque dur Acer Aspire T650 :=)) RAM: 1,7 Gio, CPU: Intel® Pentium(R) 4 CPU 3.06GHz × 2. Carte graphique Gallium 0.4 on ATI RC410 (Radeon Xpress200) Partition 1: Debian Jessie & Gnome Version 3.12, kernel 3.16.2-031602-generic Partition 2: Ubuntu Trusty 14.04 & Unity Hors ligne #1254 Le 06/02/2012, à 23:13 k3c Re : TVDownloader: télécharger les médias du net ! Bizarre, pour moi le même téléchargement est OK. Hors ligne #1255 Le 07/02/2012, à 16:30 bibichouchou Re : TVDownloader: télécharger les médias du net ! Bonjour à tous, 1/ j'arrive un peu tard mais voici mon grain de sel pour obtenir la progression. La taille finale du fichier est indiquée dans l'en-tête du fichier flv. On peut y accéder avec flvtool2 comme ceci: flvtool2 -P video_name.flv|grep filesize|grep -oE [0-9]\{1,\} On peut donc comparer la taille actuelle à la taille attendue pour obtenir la progression. Ce n'est pas complétement précis. D'après ce que j'ai remarqué, on peut s'attendre à des décalages de l'ordre de 1 ou 2%. J'imagine que faire appel à un programme tiers n'est pas très pythonique (corrigez-moi, je débute en python) mais je pense que l'on peut écrire un lecteur python des méta-données du flv. 2/ Ensuite, je voudrais savoir s'il est possible de rajouter une option "reprendre", dans le cas d'une connexion défaillante, histoire de ne pas télécharger encore et encore les mêmes fragments. Je ne pense pas que le programme le fasse actuellement et je ne sais pas si on peut le mettre en oeuvre facilement. Initialement, je m'étais écrit un code bash pour récupérer les émissions de pluzz, qui sauvegardait les fragments séparément et concaténait le tout à la fin, approche qui permet de savoir quel est le dernier fragment téléchargé. Le programme de chaoswizard concatène au fur et à mesure les fragments. 3/ est-ce que quelqu'un s'est penché sur le site de rattrapage de bfmtv et a pondu un joli programme ? je m'y suis cassé les dents. Bonne journée à tous ! Dernière modification par bibichouchou (Le 07/02/2012, à 16:50) Hors ligne #1256 Le 07/02/2012, à 16:32 Greg_lattice Re : TVDownloader: télécharger les médias du net ! @Greg_lattice comme f.x0 avec la même ligne de commande tout fonctionne sans blème... Merci je sais pourquoi ça ne fonctionne pas... C'est parce que je suis à l'étranger. Avant on pouvait accéder aux émissions de pluzz, mais apparement depuis 2semaines peu à peu on interdit aux expatriés l'accès à pluzz! Merci quand même!! Hors ligne #1257 Le 07/02/2012, à 18:28 grandtoubab Re : TVDownloader: télécharger les médias du net ! Bizarre, pour moi le même téléchargement est OK. Pire encore @ubuntu-desktop:~/Vidéos$ pluzzdl http://www.pluzz.fr/faites-entrer-l-accuse.html Traceback (most recent call last): File "/usr/share/pluzzdl/main.py", line 74, in <module> PluzzDL( args[ 0 ], options.fragments, options.proxy, options.progressbar ) File "/usr/share/pluzzdl/PluzzDL.py", line 70, in __init__ self.manifest = self.navigateur.getFichier( self.manifestURLToken ) File "/usr/share/pluzzdl/Navigateur.py", line 48, in getFichier page = self.urlOpener.open( requete, timeout = self.timeOut ) File "/usr/lib/python2.7/urllib2.py", line 400, in open response = meth(req, response) File "/usr/lib/python2.7/urllib2.py", line 513, in http_response 'http', request, response, code, msg, hdrs) File "/usr/lib/python2.7/urllib2.py", line 438, in error return self._call_chain(*args) File "/usr/lib/python2.7/urllib2.py", line 372, in _call_chain result = func(*args) File "/usr/lib/python2.7/urllib2.py", line 521, in http_error_default raise HTTPError(req.get_full_url(), code, msg, hdrs, fp) urllib2.HTTPError: HTTP Error 404: Not Found @ubuntu-desktop:~/Vidéos$ python --version Python 2.7.2+ @ubuntu-desktop:~/Vidéos$ Dernière modification par grandtoubab (Le 07/02/2012, à 18:35) Linux tout seul sur le disque dur Acer Aspire T650 :=)) RAM: 1,7 Gio, CPU: Intel® Pentium(R) 4 CPU 3.06GHz × 2. Carte graphique Gallium 0.4 on ATI RC410 (Radeon Xpress200) Partition 1: Debian Jessie & Gnome Version 3.12, kernel 3.16.2-031602-generic Partition 2: Ubuntu Trusty 14.04 & Unity Hors ligne #1258 Le 07/02/2012, à 18:42 k3c Re : TVDownloader: télécharger les médias du net ! J'ai la version 0.5 de pluzzdl, est-ce ton cas ? Hors ligne #1259 Le 07/02/2012, à 18:49 grandtoubab Re : TVDownloader: télécharger les médias du net ! J'ai la version 0.5 de pluzzdl, est-ce ton cas ? Clic sur l'image que j'ai posté et tu verras que oui. Mais je relance avec l'option -f et ca commence à télécharger (http://code.google.com/p/tvdownloader/wiki/pluzzdl) @ubuntu-desktop:~/Vidéos$ pluzzdl -f http://www.pluzz.fr/faites-entrer-l-accuse.html [INFO ] PluzzDL.py Début du téléchargement des fragments Linux tout seul sur le disque dur Acer Aspire T650 :=)) RAM: 1,7 Gio, CPU: Intel® Pentium(R) 4 CPU 3.06GHz × 2. Carte graphique Gallium 0.4 on ATI RC410 (Radeon Xpress200) Partition 1: Debian Jessie & Gnome Version 3.12, kernel 3.16.2-031602-generic Partition 2: Ubuntu Trusty 14.04 & Unity Hors ligne #1260 Le 07/02/2012, à 18:53 k3c Re : TVDownloader: télécharger les médias du net ! @ bibichouchou Pour 3) Récupérer les replay de BFM TV est faisable, vu que Captvty le fait. http://rectv.free.fr/viewforum.php?f=17 Je crois surtout que personne ne s'est penché dessus pour l'instant Hors ligne #1261 Le 07/02/2012, à 18:57 bibichouchou Re : TVDownloader: télécharger les médias du net ! @ k3c et oui j'avais bien vu, c'est pourquoi j'ai bon espoir pour un script pour linux. mais j'ai regardé et pour le coup, c'est hors de mes compétences... je lance une bouteille à la mer ! Hors ligne #1262 Le 07/02/2012, à 19:10 grandtoubab Re : TVDownloader: télécharger les médias du net ! k3c a écrit : J'ai la version 0.5 de pluzzdl, est-ce ton cas ? Clic sur l'image que j'ai posté et tu verras que oui. Mais je relance avec l'option -f et ca commence à télécharger (http://code.google.com/p/tvdownloader/wiki/pluzzdl)<metadata lang=CSV prob=0.07 /> @ubuntu-desktop:~/Vidéos$ pluzzdl -f http://www.pluzz.fr/faites-entrer-l-accuse.html [INFO ] PluzzDL.py Début du téléchargement des fragments Et là le fichier est complet @ubuntu-desktop:~/Vidéos$ pluzzdl -f http://www.pluzz.fr/faites-entrer-l-accuse.html [INFO ] PluzzDL.py Début du téléchargement des fragments [INFO ] PluzzDL.py Fin du téléchargement @ubuntu-desktop:~/Vidéos$ c'est quand même cool de regarder du flash sans entendre le PC ronfler à mort Linux tout seul sur le disque dur Acer Aspire T650 :=)) RAM: 1,7 Gio, CPU: Intel® Pentium(R) 4 CPU 3.06GHz × 2. Carte graphique Gallium 0.4 on ATI RC410 (Radeon Xpress200) Partition 1: Debian Jessie & Gnome Version 3.12, kernel 3.16.2-031602-generic Partition 2: Ubuntu Trusty 14.04 & Unity Hors ligne #1263 Le 07/02/2012, à 20:19 k3c Re : TVDownloader: télécharger les médias du net ! @ bibichouchou Un des programmes de récupération de replays sous Windows (TVRR) s'arrête http://rectv.free.fr/viewtopic.php?f=6&t=358 C'est dommage qu'il ne passe pas en open source, et que tout ce code disparaisse... Hors ligne #1264 Le 08/02/2012, à 11:43 k3c Re : TVDownloader: télécharger les médias du net ! Concernant TV5mondeplus, en fait il y a 2 cas - les urls de TV5mondeplus "de base", qui se terminent par -6chiffres. Le fichier .mp4 est dans une balise appleStreamingUrl - les urls de TV5monde Afrique, Asie, Amérique du Sud..., qui se terminent par 5 chiffres. Le fichier .mp4 est dans une balise vidéoUrl d'un fichier .smil J'ai donc fait une nouvelle version de mon script, qui récupère aussi ce deuxième cas de figure. #!/usr/bin/env python # -*- coding:utf-8 -*- # version 0.3 par k3c from lxml import etree from lxml import objectify import subprocess, optparse, re from urllib2 import URLError, Request, urlopen import socket timeout = 10 # timeout en secondes socket.setdefaulttimeout(timeout) usage = "usage: python tv5 [options] <url de l'emission>" parser = optparse.OptionParser( usage = usage ) parser.add_option( "--nocolor", action = 'store_true', default = False, help = 'desactive la couleur dans le terminal' ) parser.add_option( "-v", "--verbose", action = "store_true", default = False, help = 'affiche les informations de debugage' ) ( options, args ) = parser.parse_args() # Vérification du nombre d'arguments if( len( args ) != 1 or args[ 0 ] == "" ): parser.print_help() parser.exit( 1 ) if args[0][-4:] == "?t=1": xargs[0] = args[0][:-4] id = re.findall('[0-9]{5}[0-9]?$',args[0]) #conversion string numérique id = ''.join(id) # les infos sur la vidéo sont accessibles là the_url = "".join('http://www.tv5mondeplus.com/video-xml/get/') + str(id) try: file = urlopen(the_url) except URLError, e: print e.code data = file.read() file.close() root = objectify.fromstring(data) for element in root.iter(): if element.tag == "permalink": titre = element.text tit = titre.split('/')[-1] # on enlève les 7 derniers caractères qui sont - et 6 chiffres pour le nom du fichier et on ajoute tv5_ au début tit = tit[:-6] tit = "tv5_"+tit+".wmv" # 2 cas, la video est sur la balise appleStreamingUrl, ou dans le .smil à # la vidéo est sur appleStreamingUrl if element.tag == "appleStreamingUrl": video = element.text # la vidéo est sur videoUrl, dans un .smil, que l'on va ouvrir # http://fr.wikipedia.org/wiki/Synchronized_Multimedia_Integration_Language if element.tag == "videoUrl": video = element.text try: # exemple de fichier .smil # <?xml version="1.0"?> # <!DOCTYPE smil PUBLIC "-//W3C//DTD SMIL 2.0//EN" "http://www.w3.org/2001/SMIL20/SMIL20.dtd"> # <smil xmlns="http://www.w3.org/2001/SMIL20/Language"> # <head> # <meta name="title" content="COURTS SEJOURS" /> # <meta name="httpBase" content="http://vodhdflash.tv5monde.com/" /> # <meta name="rtmpAuthBase" content="" /> # </head> # <body> # <switch id="tv5_catchup"> # <video src="tv5mondeplus/hq/3137682.mp4" system-bitrate="500000"/> # <video src="tv5mondeplus/bq/3137682.mp4" system-bitrate="300000"/> # </switch> # </body> # </smil> # on enleve la première ligne blanche, sinon le parsing xml se plante video = urlopen(video) video.readline() video = video.read() root = objectify.fromstring(video) # le début de l'emplacement de la vidéo est à httpBase, c'est normalement # http://vodhdflash.tv5monde.com/ for x in root.head.getchildren(): if x.get('name') == 'httpBase': content = x.get('content') #la suite de l'emplacement de la video est à video src, # et on prend la ligne avec le bit-rate le plus élevé # exemple de suite de l'emplacement, à concaténer # tv5mondeplus/hq/3137682.mp4 itc = root.body.switch.getchildren() url = itc.pop() for x in itc: if x.get('system-bitrate') > url.get('system-bitrate'): url = x url = url.get('src') except IOError, e: if hasattr(e, 'reason'): print 'Nous avons échoué à joindre le serveur.' print 'Raison: ', e.reason elif hasattr(e, 'code'): print 'Le serveur n\'a pu satisfaire la demande.' print 'Code d\' erreur : ', e.code args = ['/usr/bin/msdl', content + url , '-o', tit] app = subprocess.Popen(args=args, stdout=open('somefile', 'w')) Dernière modification par k3c (Le 08/02/2012, à 11:47) Hors ligne #1265 Le 08/02/2012, à 15:36 Babar21 Re : TVDownloader: télécharger les médias du net ! Merci pour le script TV5, k3C ! Hors ligne #1266 Le 08/02/2012, à 15:37 Zoulou.4556 Re : TVDownloader: télécharger les médias du net ! Bonjour, je fais un retour de l'utilisation de tvdownloader avec les erreurs suivante : Arte certaines émissions seulement FR Inter et Radio France rien du tout Pluzz rien du tout W9 Replay rien du tout pour le reste c'est ok voilà se que ça donne en console: zoulou@zoulou-Linux:~$ tvdownloader Traceback (most recent call last): File "/usr/lib/python2.7/logging/__init__.py", line 842, in emit msg = self.format(record) File "/usr/lib/python2.7/logging/__init__.py", line 719, in format return fmt.format(record) File "/usr/lib/python2.7/logging/__init__.py", line 464, in format record.message = record.getMessage() File "/usr/lib/python2.7/logging/__init__.py", line 328, in getMessage msg = msg % self.args TypeError: not all arguments converted during string formatting Logged from file APIPrive.py, line 253 Traceback (most recent call last): File "/usr/share/tvdownloader/APIPrive.py", line 251, in pluginRafraichir self.listePluginActif[nomPlugin].rafraichir() File "plugins/W9Replay.py", line 51, in rafraichir page = urllib.urlopen( self.urlFichierXML ) File "/usr/lib/python2.7/urllib.py", line 84, in urlopen return opener.open(url) File "/usr/lib/python2.7/urllib.py", line 205, in open return getattr(self, name)(url) File "/usr/lib/python2.7/urllib.py", line 342, in open_http h.endheaders(data) File "/usr/lib/python2.7/httplib.py", line 951, in endheaders self._send_output(message_body) File "/usr/lib/python2.7/httplib.py", line 811, in _send_output self.send(msg) File "/usr/lib/python2.7/httplib.py", line 773, in send self.connect() File "/usr/lib/python2.7/httplib.py", line 754, in connect self.timeout, self.source_address) File "/usr/lib/python2.7/socket.py", line 553, in create_connection for res in getaddrinfo(host, port, 0, SOCK_STREAM): IOError: [Errno socket error] [Errno -2] Nom ou service inconnu ERROR Navigateur : timed out ERROR Navigateur : timed out ERROR Navigateur : timed out utilisation de tvdownloader sur ubuntu 11.04 64bits. bon courage pour la suite, merci pour vos développements et votre partage. Dernière modification par Zoulou.4556 (Le 08/02/2012, à 15:44) Asus X66IC Manjaro 64bits dual Ubuntu 14.04 64 bits / Dell Latitude D520 Xubuntu 14.04 32 bits/ Aurore BG6-I3-4-H10S1 SSD 120 go +DD 1To Manjaro 64 bits dual Ubuntu 64 bits Hors ligne #1267 Le 08/02/2012, à 16:02 k3c Re : TVDownloader: télécharger les médias du net ! @ Zoulou Pour récupérer les émissions de Pluzz, cela marche très bien, mais il faut utiliser PluzzDL 0.5, en attendant que cela soit à nouveau intégré dans TVDownloader. http://code.google.com/p/tvdownloader/wiki/pluzzdl Puis tu tapes, pour récupérer un Samantha par exemple $ pluzzdl -f http://www.pluzz.fr/samantha-oups-2012-02-07-20h15.html Je télécharge Missing sur W9replay, donc il y a au moins un replay qui fonctionne correctement sur cette chaîne. Est-ce que tu as essayé ce replay-là ? Pour Arte, je télécharge [HD] Son Jarocho de Veracruz, avec Son de Madera au Quai Branly mais effectivement il y a des choses qui ne fonctionnent pas. Je n'utilise jamais les radios, mais je vais regarder. Hors ligne #1268 Le 08/02/2012, à 16:05 Babar21 Re : TVDownloader: télécharger les médias du net ! Pour ce qui ne marche plus (pour l'instant) sur Arte dans TVDownloader, il y a Arte+7 recorder, qui ne fait que ça mais qui le fait bien. Hors ligne #1269 Le 08/02/2012, à 16:18 Zoulou.4556 Re : TVDownloader: télécharger les médias du net ! @k3c, je viens d'essayer avec PluzzDL ça fonctionne sans problème merci pour l'info. @Babar21, je l'utilise effectivement pour Arte, c'était surtout pour un retour que je l ai signalé. merci pour vos réponses. Dernière modification par Zoulou.4556 (Le 08/02/2012, à 16:19) Asus X66IC Manjaro 64bits dual Ubuntu 14.04 64 bits / Dell Latitude D520 Xubuntu 14.04 32 bits/ Aurore BG6-I3-4-H10S1 SSD 120 go +DD 1To Manjaro 64 bits dual Ubuntu 64 bits Hors ligne #1270 Le 08/02/2012, à 19:23 grandtoubab Re : TVDownloader: télécharger les médias du net ! Concernant TV5mondeplus, en fait il y a 2 cas Bord!!! j'ai pas de pot moi @ubuntu-desktop:~/Linux$ python k3c.py http://www.tv5mondeplusafrique.com/#serie_cinema_histoires_droles_et_droles_de_gens 404 Traceback (most recent call last): File "k3c.py", line 32, in <module> data = file.read() TypeError: descriptor 'read' of 'file' object needs an argument Par contre la Rama, je la récupère @ubuntu-desktop:~/Linux$ python k3c.py http://www.tv5mondeplus.com/video/08-02-2012/rama-yade-213995 download [ tv5_rama-yade-.wmv ] @ubuntu-desktop:~/Linux$ Host: [ 195.12.231.32:80 ] connected! DL: 3143308/2147483647 B -- 0% 87.6K/s ETA 06:09:5 DL: 39109792/39109792 B -- 100% 84.0K/s Complete finished!! FINISHED --00:07:46-- Donc y a un troisieme cas celui sans numéro à la fin Dernière modification par grandtoubab (Le 08/02/2012, à 19:50) Linux tout seul sur le disque dur Acer Aspire T650 :=)) RAM: 1,7 Gio, CPU: Intel® Pentium(R) 4 CPU 3.06GHz × 2. Carte graphique Gallium 0.4 on ATI RC410 (Radeon Xpress200) Partition 1: Debian Jessie & Gnome Version 3.12, kernel 3.16.2-031602-generic Partition 2: Ubuntu Trusty 14.04 & Unity Hors ligne #1271 Le 08/02/2012, à 20:19 k3c Re : TVDownloader: télécharger les médias du net ! @ Grandtoubab Chic, un cas non géré ! Je regarde... Edit : en fait, sur tv5mondeplus.com, quand on clique sur mappemonde, - on a un certain nombre de reportages visibles sur la carte - on peut aussi sélectionner Afrique, Amérique du Sud, Australie... C'est ces cas que mon script est censé gérer, vu que je n'ai vu que des adresses html se terminant par - et 5 chiffres Dernière modification par k3c (Le 09/02/2012, à 11:18) Hors ligne #1272 Le 12/02/2012, à 16:25 coucou123 Re : TVDownloader: télécharger les médias du net ! Salut, je me permet un petit retour: Quand je télécharge certaines émissions sur M6replay, la barre de téléchargement reviens sans cesse à 0% (à peine passé le cap des 10%). De plsu lorsque le téléchargement est terminé, il s'avère que le fichier est incomplet. Mais sinon, c'est une excellent initiative ce logiciel, d'autant plus que je n'arrive plus à voir quoi que ce soit sur M6Replay ! Precise Pangolin 64bits Lucid Lynx 64bits Hors ligne #1273 Le 13/02/2012, à 09:00 k3c Re : TVDownloader: télécharger les médias du net ! @coucou123 Ca se produit avec quelles émissions sur M6 ? Hier j'ai téléchargé sans problème une série, puis un Turbo. Hors ligne #1274 Le 13/02/2012, à 15:32 coucou123 Re : TVDownloader: télécharger les médias du net ! oups, désolé, en fait il se trouve que l'émission était bien en entier, mais que c'était la barre de défilement de VLC qui n'allait pas jusqu'à la fin. Le fichier est peut-être légèrement corrompu, je ne sais pas... Par contre il est vrai que la barre de progression de téléchargement de TVDownloader bugue un peu. L'émission en question c'était Top Chef (qui dure 2h30), et la barre de progression de téléchargement revenait à zéro dès qu'elle dépassait 9%, ce qui fait qu'il était impossible de savoir quand le téléchargement allait se terminer. Mais sinon, ce logiciel est vraiment chouette. Il faudrait juste que l'on puisse ajouter soi-même des sites, ou alors que le choix s'agrandisse au fil du temps. Peut-être aussi qu'un outil pour signaler les plugins qui ne fonctionnent plus, ce serait pas mal. Merci pour ton boulot, c'est du bon boulot ! Precise Pangolin 64bits Lucid Lynx 64bits Hors ligne #1275 Le 13/02/2012, à 18:53 grandtoubab Re : TVDownloader: télécharger les médias du net ! @ Grandtoubab Chic, un cas non géré ! Je regarde... Edit : en fait, sur tv5mondeplus.com, quand on clique sur mappemonde, - on a un certain nombre de reportages visibles sur la carte - on peut aussi sélectionner Afrique, Amérique du Sud, Australie... C'est ces cas que mon script est censé gérer, vu que je n'ai vu que des adresses html se terminant par - et 5 chiffres Et donc pour le reste c'est foutu? Linux tout seul sur le disque dur Acer Aspire T650 :=)) RAM: 1,7 Gio, CPU: Intel® Pentium(R) 4 CPU 3.06GHz × 2. Carte graphique Gallium 0.4 on ATI RC410 (Radeon Xpress200) Partition 1: Debian Jessie & Gnome Version 3.12, kernel 3.16.2-031602-generic Partition 2: Ubuntu Trusty 14.04 & Unity Hors ligne
Dernière news : Fedora-Fr aux 15èmes Rencontres Mondiales du Logiciel Libre Récemment, deux bugs ont été introduits dans le processus de mise à jour de Fedora 13, chacun de ces deux là ayant pour conséquence le fait que les mises à jour disponibles ne sont plus notifiées. Le seul remède est malheureusement de faire une mise à jour via la ligne de commande tant redoutée cris d'effrois dans l'assemblée Pour s'en sortir donc, il suffit de suivre la procédure suivante : 1. ouvrir un terminal, par exemple en ouvrant le menu Applications -> Outils système -> Terminal la foule hurle en courant les bras en l'air 2. s'identifier en tant qu'utilisateur root grâce à la commande : $ su - puis en tapant le mot de passe root 3. lancer la mise à jour des paquets problématiques avec la commande : # yum -y update gnome-packagekit selinux-policy 4. redémarrer votre ordinateur Les notifications de mise à jour devrait alors réapparaitre. soupirs de soulagement NB: Les mises à jour manuelle via l'outil graphique sont aussi impactées par ces deux bugs, ils est donc nécessaire de faire les mises à jour en ligne de commande comme indiqué ci-dessus. Celle-ci corrige donc bien évidemment aussi ce problème, et les mises à jour manuelles via l'outil graphique deviendront ainsi de nouveau possibles. acclamations, la foule est en délire L'annonce officielle du tout nouveau Fedora Project Leader est disponible (en anglais) sur les archives de la liste de diffusion es annonces globales. Il va sans dire que cette annonce est à diffuser le plus largement possible, pour toucher aussi les utilisateurs qui ne suivent pas le forum. Arg, entre ça et les dépendances cassés lors de mise-à-jours.... Décidément, aucune distribution n'est parfaites. (Mais sans ça Fedora le serait.) Merci beaucoup bochecha. Edit: Tien, c'est bizzare... Chez moi impossible de faire cette mise-à-jour manuelle. Dernière modification par korbé (31/07/2010 15:24:58) Portable: Lynx Levio 3510 (Gigabyte I1320) avec Fedora 15 Gnome. Media-Center: Assus EEEBox avec Fedora 15 XBMC. Idem pour moi : aucun paquet marqué pour mise à jour. Il est vrai que j'attends rarement d'avoir une notification, je procède toujours par yum update. Dernière modification par Tlaloc (31/07/2010 22:07:28) «Time present and time past Are both perhaps present in time future, And time future contained in time past.» T.S Eliot ça ne concerne que les utilisateurs de gnome et ceux qui auraient SElinux d'activé? Dernière modification par madko (31/07/2010 18:33:43) Linux, ya moins bien, mais c'est plus chèr!!! Fedora 19 x86_64 sur Asus N550JV.208 et Fedora 19 x86_64 sur Samsung NP740u3e | Mainteneur du paquet Darktable et glances OVH ADSL pour Internet, c'est plus libre que Free Comment ça elle ne marche pas? Linux, ya moins bien, mais c'est plus chèr!!! Fedora 19 x86_64 sur Asus N550JV.208 et Fedora 19 x86_64 sur Samsung NP740u3e | Mainteneur du paquet Darktable et glances OVH ADSL pour Internet, c'est plus libre que Free Comment ça elle ne marche pas? Bah cette mise à jour manuelle chez moi ne donne toujours rien, je voulais savoir si vous aussi. Lorsque je faisais la mise à jour conseillée, il ne se passait rien. Cependant, comme j'ai l'habitude de faire mes mises à jour en console, il est fort possible qu'elle ait déjà été faite. En effet, il y a quelques minutes je viens d'avoir une notification. Pour une fois j'ai fait les mises à jour par PackageKit : Il y en avait un certain nombre dont un changement de kernel. Dernière modification par Tlaloc (03/08/2010 18:32:01) «Time present and time past Are both perhaps present in time future, And time future contained in time past.» T.S Eliot madko a écrit : Comment ça elle ne marche pas? Bah cette mise à jour manuelle chez moi ne donne toujours rien, je voulais savoir si vous aussi. Ben, c'est que tu l'as déjà faite. De +, si tu relis le post initial, ça concerne gnome-packagekit et d'après ta signature, tu as sous KDE ! F20 / KDE 4.12 Il date de quand votre dernier backup ? Je suis sous KDE sous Arch64 (c'est mon laboratoire d'appli) Et sous gnome sur fedora. Tu as mal lu ma signature !! Mais pas de problème, j'ai du effectué cette mise à jour. Donc oubliez ma remarque J'ai vu ça qui s'est inscris dans la page : http://start.fedoraproject.org Après je fais mes mises-à-jour via yum update donc pas de problème Fedora 19 : 1 Dell XPS M1330, 1 fixe custom et 1 Dell Latitude 6430u J'avais pris la bonne habitude de ne pas passer par PackageKit mais par yum lors des mises-à-jour, j'ai bien fait. Il n'a jamais été très au point ce truc, il est tout juste bon à afficher les notifications. Linux, c'est la porte ouverte à toutes les fenêtres. Fil Rouge a écrit :madko a écrit : Comment ça elle ne marche pas? Bah cette mise à jour manuelle chez moi ne donne toujours rien, je voulais savoir si vous aussi. Ben, c'est que tu l'as déjà faite. De +, si tu relis le post initial, ça concerne gnome-packagekit et d'après ta signature, tu as sous KDE ! Donc j'ai fait une erreur dans le post original :-) Le problème se situe entre PackageKit et SELinux. Donc seuls ceux qui ne font les mises à jour que via PackageKit sur un système ayant SELinux d'activé ont pu être concernés. merci ça répond à ma question en #4 :) Linux, ya moins bien, mais c'est plus chèr!!! Fedora 19 x86_64 sur Asus N550JV.208 et Fedora 19 x86_64 sur Samsung NP740u3e | Mainteneur du paquet Darktable et glances OVH ADSL pour Internet, c'est plus libre que Free yum update Modules complémentaires chargés : refresh-packagekit Erreur : Cannot retrieve repository metadata (repomd.xml) for repository: fedora. Please verify its path and try again Bonjour, J'ai cette erreur suite a l'installation de F13 j'ai suivit cette procedure mais cela ne marche pas. J'ai bien accés à internet depuis l'ordinateur. J'ai fais l'installation depuis un live cd # yum -y update gnome-packagekit selinux-policy Modules complémentaires chargés : refresh-packagekit Erreur : Cannot retrieve repository metadata (repomd.xml) for repository: fedora. Please verify its path and try again merci Que renvoi "yum repolist"? Je pense pas que le problème soit le sujet de ce post. Linux, ya moins bien, mais c'est plus chèr!!! Fedora 19 x86_64 sur Asus N550JV.208 et Fedora 19 x86_64 sur Samsung NP740u3e | Mainteneur du paquet Darktable et glances OVH ADSL pour Internet, c'est plus libre que Free Perso çà renvoit : # yum -y update gnome-packagekit selinux-policy Modules complémentaires chargés : presto, refresh-packagekit Configuration du processus de mise à jour Aucun paquet marqué pour mise à jour En revanche la mise à jour de network manager empêche celui-ci de fonctionner, je suis avec le wi-fi, mise à jour d'aujourd'hui. Quelqu'un a t'il le souci ? yum repolist id du dépôt nom du dépôt statut adobe-linux-i386 Adobe Systems Incorporated 18 fedora Fedora 13 - i386 16 814 fedora-chromium Chromium web browser and deps 6 rpmfusion-free RPM Fusion for Fedora 13 - Free 400 rpmfusion-free-updates RPM Fusion for Fedora 13 - Free - Updates 578 rpmfusion-nonfree RPM Fusion for Fedora 13 - Nonfree 134 rpmfusion-nonfree-updates RPM Fusion for Fedora 13 - Nonfree - Updates 162 updates Fedora 13 - i386 - Updates 5 181 repolist: 23 293 Dernière modification par Phantom (08/09/2010 23:00:09) Que renvoi "yum repolist"? Je pense pas que le problème soit le sujet de ce post. ou plutôt un yum repolist all ... F19-x86_64 - KDE4.11.2 - ASUS M3A32-MVP Deluxe - AMD Phenom 8650 X3 - Nvidia GTX 260 - 4 Go RAM Corsair - 2 disques durs de 500 Go en Sata3 F18-x86_64 - KDE4.10.5 - Centos 6.0 - Windows 8 firefox-24.0-1.fc19.x86_64 - thunderbird-24.0-3.fc19.x86_64 Mise à Jour de PackageKit ce matin, après reboot les notifications graphiques de mise à jour reviennent. Version 0.6.6-2 de PackageKit ce matin me semble. J'étais pas réveillé. D'autres MàJ sont apparues que je n'ai pas faites dont un kernel 2.6.34.6-54 par exemple. Que renvoi "yum repolist"? Je pense pas que le problème soit le sujet de ce post. # yum repolist Modules complémentaires chargés : refresh-packagekit Erreur : Cannot retrieve metalink for repository: fedora. Please verify its path and try again # yum repolist all Modules complémentaires chargés : refresh-packagekit Erreur : Cannot retrieve metalink for repository: fedora. Please verify its path and try again toujours même erreur pb resolu j'avais pas la bonne date a mon ordinateur. Dernière modification par gpasquier (09/09/2010 18:49:44) En revanche la mise à jour de network manager empêche celui-ci de fonctionner, je suis avec le wi-fi, mise à jour d'aujourd'hui. Quelqu'un a t'il le souci ? Oui pour cnetworkmanager et non pour NetworkManager [Casper ~ 502]$ cnetworkmanager -a Traceback (most recent call last): File "/usr/bin/cnetworkmanager", line 178, in <module> aap = dev["ActiveAccessPoint"] File "/usr/lib/python2.6/site-packages/dbusclient/__init__.py", line 174, in __getitem__ value = super(DBusClient, self).__getitem__(key) File "/usr/lib/python2.6/site-packages/dbusclient/__init__.py", line 77, in __getitem__ return pmi.Get(iface, key, byte_arrays=True) File "/usr/lib/python2.6/site-packages/dbus/proxies.py", line 68, in __call__ return self._proxy_method(*args, **keywords) File "/usr/lib/python2.6/site-packages/dbus/proxies.py", line 140, in __call__ **keywords) File "/usr/lib/python2.6/site-packages/dbus/connection.py", line 630, in call_blocking message, timeout) dbus.exceptions.DBusException: org.freedesktop.DBus.Error.AccessDenied: Property "ActiveAccessPoint" of interface "org.freedesktop.NetworkManager.Device" isn't exported (or may not exist) [Casper ~ 503]$ Il y a quelques erreurs effectivement... edit: faut que j'essaie nmcli (NetworkmanagerClient pour la console) Dernière modification par C@sp€r (09/09/2010 18:51:22) Bonjour j'ai le même message d'erreur que Phantom lorsuqe je lance la commande yum -y update gnome-packagekit selinux-policy le messgae suivant s'affiche Modules complémentaires chargés : presto, refresh-packagekit Configuration du processus de mise à jour Aucun paquet marqué pour mise à jour et la commande yum repolist renvoi Modules complémentaires chargés : presto, refresh-packagekitid du dépôt nom du dépôt statutfedora Fedora 13 - x86_64 activé: 20 840fedora-debuginfo Fedora 13 - x86_64 - Deb désactivéfedora-source Fedora 13 - Source désactivéremi Les RPM de remi pour Fed désactivéremi-test Les RPM de remi en test désactivérpmfusion-free RPM Fusion for Fedora 13 activé: 457rpmfusion-free-debuginfo RPM Fusion for Fedora 13 désactivérpmfusion-free-rawhide RPM Fusion for Fedora Ra désactivérpmfusion-free-rawhide-debuginfo RPM Fusion for Fedora Ra désactivérpmfusion-free-rawhide-source RPM Fusion for Fedora Ra désactivérpmfusion-free-source RPM Fusion for Fedora 13 désactivérpmfusion-free-updates RPM Fusion for Fedora 13 activé: 469rpmfusion-free-updates-debuginfo RPM Fusion for Fedora 13 désactivérpmfusion-free-updates-source RPM Fusion for Fedora 13 désactivérpmfusion-free-updates-testing RPM Fusion for Fedora 13 désactivérpmfusion-free-updates-testing-debuginfo RPM Fusion for Fedora 13 désactivérpmfusion-free-updates-testing-source RPM Fusion for Fedora 13 désactivéupdates Fedora 13 - x86_64 - Upd activé: 6 278updates-debuginfo Fedora 13 - x86_64 - Upd désactivéupdates-source Fedora 13 - Updates Sour désactivéupdates-testing Fedora 13 - x86_64 - Tes désactivéupdates-testing-debuginfo Fedora 13 - x86_64 - Tes désactivéupdates-testing-source Fedora 13 - Test Updates désactivérepolist: 28 044[root@localhost Documents]# yum -y update gnome-packagekit selinux-policyModules complémentaires chargés : presto, refresh-packagekitConfiguration du processus de mise à jourAucun paquet marqué pour mise à jour[root@localhost Documents]# yum repolistModules complémentaires chargés : presto, refresh-packagekitid du dépôt nom du dépôt statutfedora Fedora 13 - x86_64 20 840rpmfusion-free RPM Fusion for Fedora 13 - Free 457rpmfusion-free-updates RPM Fusion for Fedora 13 - Free - Updates 469updates Fedora 13 - x86_64 - Updates 6 278repolist: 28 044 al'aide pafin que je puisse mettre a jour fedora 13 car j'arrive pas a écouter du son lorsque je lis des fichier video avec vlc seule les images sont ok ais pas de son. merci Quel rapport avec la mise à jour ? «Time present and time past Are both perhaps present in time future, And time future contained in time past.» T.S Eliot
plasticgoat [Résolu] message d'erreur au lancement de synaptic Voilà le message d'erreur : W: Impossible de localiser la liste des paquets sources http://fr.archive.ubuntu.com breezy/universe Packages (/var/lib/apt/lists/fr.archive.ubuntu.com_ubuntu_dists_breezy_universe_binary-i386_Packages) - stat (2 Aucun fichier ou répertoire de ce type) W: Impossible de localiser la liste des paquets sources http://fr.archive.ubuntu.com breezy/main Packages (/var/lib/apt/lists/fr.archive.ubuntu.com_ubuntu_dists_breezy_main_binary-i386_Packages) - stat (2 Aucun fichier ou répertoire de ce type) W: Impossible de localiser la liste des paquets sources http://fr.archive.ubuntu.com breezy/restricted Packages (/var/lib/apt/lists/fr.archive.ubuntu.com_ubuntu_dists_breezy_restricted_binary-i386_Packages) - stat (2 Aucun fichier ou répertoire de ce type) W: Impossible de localiser la liste des paquets sources http://fr.archive.ubuntu.com breezy/multiverse Packages (/var/lib/apt/lists/fr.archive.ubuntu.com_ubuntu_dists_breezy_multiverse_binary-i386_Packages) - stat (2 Aucun fichier ou répertoire de ce type) W: Impossible de localiser la liste des paquets sources http://fr.archive.ubuntu.com breezy-updates/main Packages (/var/lib/apt/lists/fr.archive.ubuntu.com_ubuntu_dists_breezy-updates_main_binary-i386_Packages) - stat (2 Aucun fichier ou répertoire de ce type) W: Impossible de localiser la liste des paquets sources http://fr.archive.ubuntu.com breezy-updates/restricted Packages (/var/lib/apt/lists/fr.archive.ubuntu.com_ubuntu_dists_breezy-updates_restricted_binary-i386_Packages) - stat (2 Aucun fichier ou répertoire de ce type) c grave docteur(s) ? Dernière modification par plasticgoat (Le 20/10/2005, à 08:17) Hors ligne sylware Re : [Résolu] message d'erreur au lancement de synaptic A priori, cela est du à la surcharge des serveurs à cause des download des ISOs de CD/DVD de la Debian Ubuntu Breezy GNU/Linux. Mais pour être sûr, poste dans ce fil de discussion ton fichier /etc/apt/sources.list. Hors ligne plasticgoat Re : [Résolu] message d'erreur au lancement de synaptic voilà mon fichier deb cdrom:[Ubuntu 5.10 _Breezy Badger_ - Release i386 (20051012)]/ breezy main restricted deb-src http://fr.archive.ubuntu.com/ubuntu breezy main restricted ## Major bug fix updates produced after the final release of the ## distribution. deb http://fr.archive.ubuntu.com/ubuntu breezy-updates main restricted deb-src http://fr.archive.ubuntu.com/ubuntu breezy-updates main restricted ## Uncomment the following two lines to add software from the 'universe' ## repository. ## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu ## team, and may not be under a free licence. Please satisfy yourself as to ## your rights to use the software. Also, please note that software in ## universe WILL NOT receive any review or updates from the Ubuntu security ## team. deb http://fr.archive.ubuntu.com/ubuntu breezy universe main restricted multiverse # deb-src http://fr.archive.ubuntu.com/ubuntu breezy universe ## Uncomment the following two lines to add software from the 'backports' ## repository. ## N.B. software from this repository may not have been tested as ## extensively as that contained in the main release, although it includes ## newer versions of some applications which may provide useful features. ## Also, please note that software in backports WILL NOT receive any review ## or updates from the Ubuntu security team. # deb http://fr.archive.ubuntu.com/ubuntu breezy-backports main restricted universe multiverse # deb-src http://fr.archive.ubuntu.com/ubuntu breezy-backports main restricted universe multiverse # deb http://security.ubuntu.com/ubuntu breezy-security main restricted # deb-src http://security.ubuntu.com/ubuntu breezy-security main restricted # deb http://security.ubuntu.com/ubuntu breezy-security universe # deb-src http://security.ubuntu.com/ubuntu breezy-security universe Hors ligne sylware Re : [Résolu] message d'erreur au lancement de synaptic Voilà le mien qui marche: deb cdrom:[Ubuntu 5.10 _Breezy Badger_ - Release powerpc (20051012)]/ breezy main restricted deb http://fr.archive.ubuntu.com/ubuntu breezy main restricted universe multiverse deb-src http://fr.archive.ubuntu.com/ubuntu breezy main restricted universe multiverse ## Major bug fix updates produced after the final release of the ## distribution. deb http://fr.archive.ubuntu.com/ubuntu breezy-updates main restricted universe multiverse deb-src http://fr.archive.ubuntu.com/ubuntu breezy-updates main restricted universe multiverse ## Uncomment the following two lines to add software from the 'universe' ## repository. ## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu ## team, and may not be under a free licence. Please satisfy yourself as to ## your rights to use the software. Also, please note that software in ## universe WILL NOT receive any review or updates from the Ubuntu security ## team. # deb http://fr.archive.ubuntu.com/ubuntu breezy universe # deb-src http://fr.archive.ubuntu.com/ubuntu breezy universe ## Uncomment the following two lines to add software from the 'backports' ## repository. ## N.B. software from this repository may not have been tested as ## extensively as that contained in the main release, although it includes ## newer versions of some applications which may provide useful features. ## Also, please note that software in backports WILL NOT receive any review ## or updates from the Ubuntu security team. # deb http://fr.archive.ubuntu.com/ubuntu breezy-backports main restricted universe multiverse # deb-src http://fr.archive.ubuntu.com/ubuntu breezy-backports main restricted universe multiverse deb http://security.ubuntu.com/ubuntu breezy-security main restricted universe multiverse deb-src http://security.ubuntu.com/ubuntu breezy-security main restricted universe multiverse # deb http://security.ubuntu.com/ubuntu breezy-security universe # deb-src http://security.ubuntu.com/ubuntu breezy-security universe Hors ligne plasticgoat Re : [Résolu] message d'erreur au lancement de synaptic finalement le message n'apparait plus, c'était surement du à la surcharge des serveurs. Est ce que je dois quand même modifier mon fichier ? Hors ligne calimarno Re : [Résolu] message d'erreur au lancement de synaptic Salut plasticgoat, Non, tu peux garder le tien qui est tout à fait correct! Bonne journée! Hors ligne sylware Re : [Résolu] message d'erreur au lancement de synaptic Si tu pouvais éditer ton premier message pour ajouter au début du titre résolu... merfi... Hors ligne plasticgoat Re : [Résolu] message d'erreur au lancement de synaptic oups j'avis oublié ... Hors ligne
McPeter Re : Un 'autre' générateur de sources.list en ligne Je viens de rectifier le soucis sur le nom du fichier bash. Par contre je ne vois aucun soucis à l'exécution :\ pourrais tu me dire quel navigateur tu as utilisé et quel message d'erreur ça te renvoit ? (un copié/collé du message) le chmod +x est inutile là puisque qu'on utilise directement 'bash fichier' (plus simple poru le commun des mortels ) Hors ligne McPeter Re : Un 'autre' générateur de sources.list en ligne Hors ligne McPeter Re : Un 'autre' générateur de sources.list en ligne noruas beep ?! Hors ligne SoCko Re : Un 'autre' générateur de sources.list en ligne Merci à toi McPeter pour ce générateur. Très utile et ergonomique, on a un sources.list en une dizaine de secondes! Hors ligne tripes_inn Re : Un 'autre' générateur de sources.list en ligne merci je reinstalle tout et je suis bien content de ne pas avoir à tout retrouver. j'ai dù corriger le fichier pour que synaptic l'accepte. je joins l'original ############################################################################### ## UBUNTU LUCID LYNX - 10.04 - Date : 11/09/2010 ## ## SOURCES.LIST GENERATOR Version 0.3-10.04 ## http://sources-list.ubuntu-fr-secours.org ############################################################################### # deb cdrom:[Ubuntu 10.04 LTS _Lucid Lynx_ - Release i386 (20100429)]/ lucid main restricted ## DEPOTS PRINCIPAUX deb http://archive.ubuntu.com/ubuntu lucid main # deb-src http://archive.ubuntu.com/ubuntu lucid main ## DEPOTS RESTRICTED deb http://archive.ubuntu.com/ubuntu lucid restricted # deb-src http://archive.ubuntu.com/ubuntu lucid restricted ## DEPOTS UNIVERSE (logiciels libres) deb http://archive.ubuntu.com/ubuntu lucid universe # deb-src http://archive.ubuntu.com/ubuntu lucid universe ## DEPOTS MULTIVERSE (logiciels non-libres) deb http://archive.ubuntu.com/ubuntu lucid multiverse # deb-src http://archive.ubuntu.com/ubuntu lucid multiverse ## DEPOTS DE MISES A JOUR EN PRES VERSION # deb http://archive.ubuntu.com/ubuntu lucid-proposed main restricted universe multiverse # deb-src http://archive.ubuntu.com/ubuntu lucid-proposed ## DEPOTS NON PRIS EN CHARGE # deb http://archive.ubuntu.com/ubuntu lucid-backports main restricted universe multiverse # deb-src http://archive.ubuntu.com/ubuntu lucid-backports ## DEPOTS DE MISES A JOUR DE SECURITE deb http://archive.ubuntu.com/ubuntu lucid-security main restricted universe multiverse # deb-src http://archive.ubuntu.com/ubuntu lucid-security ## DEPOTS DE MISES A JOUR IMPORTANTES deb http://archive.ubuntu.com/ubuntu lucid-updates main restricted universe multiverse # deb-src http://archive.ubuntu.com/ubuntu lucid-updates ## DEPOTS COMMERCIAUX deb http://archive.canonical.com/ubuntu lucid partner # deb-src http://archive.canonical.com/ubuntu lucid partner deb http://archive.canonical.com/ubuntu lucid-security partner # deb-src http://archive.canonical.com/ubuntu lucid-security partner deb http:// j'ai supprimer deb http:// à la fin et c bon Samsung N145 plus ubuntu 11.10 64 bits Hors ligne alej Re : Un 'autre' générateur de sources.list en ligne Et en ne gardant que les lignes utiles, en synthèse, ça donne ça... deb http://archive.ubuntu.com/ubuntu lucid main restricted universe multiverse deb http://archive.ubuntu.com/ubuntu lucid-security main restricted universe multiverse deb http://archive.ubuntu.com/ubuntu lucid-updates main restricted universe multiverse deb http://archive.canonical.com/ubuntu lucid partner deb http://archive.canonical.com/ubuntu lucid-security partner Un peu + digeste, non ? Hors ligne McPeter Re : Un 'autre' générateur de sources.list en ligne Tu as surtout loupé ton copié/collé ... le dépôt canonical c'est ça : ## DEPOTS COMMERCIAUX deb http://archive.canonical.com/ubuntu lucid partner # deb-src http://archive.canonical.com/ubuntu lucid partner deb http://archive.canonical.com/ubuntu lucid-security partner # deb-src http://archive.canonical.com/ubuntu lucid-security partner deb http://archive.canonical.com/ubuntu lucid-updates partner # deb-src http://archive.canonical.com/ubuntu lucid-updates partner Donc soit y'a eu un bug au téléchargement du sources.list soit tu t'es loupé au copié/collé ... mais c'est pas juste une histoire de "supprimer la fin". merci je reinstalle tout et je suis bien content de ne pas avoir à tout retrouver. j'ai dù corriger le fichier pour que synaptic l'accepte. je joins l'original ... ## DEPOTS COMMERCIAUX deb http://archive.canonical.com/ubuntu lucid partner # deb-src http://archive.canonical.com/ubuntu lucid partner deb http://archive.canonical.com/ubuntu lucid-security partner # deb-src http://archive.canonical.com/ubuntu lucid-security partner deb http:// j'ai supprimer deb http:// à la fin et c bon Hors ligne McPeter Re : Un 'autre' générateur de sources.list en ligne Et en ne gardant que les lignes utiles, en synthèse, ça donne ça... deb http://archive.ubuntu.com/ubuntu lucid main restricted universe multiverse deb http://archive.ubuntu.com/ubuntu lucid-security main restricted universe multiverse deb http://archive.ubuntu.com/ubuntu lucid-updates main restricted universe multiverse deb http://archive.canonical.com/ubuntu lucid partner deb http://archive.canonical.com/ubuntu lucid-security partner Un peu + digeste, non ? Pas compris l'intervention là :\ ça vient faire quoi dans la choucroute ? Hors ligne jajaX Re : Un 'autre' générateur de sources.list en ligne salut la mise à jour pour avoir un source.list pour la 10.10 est prévue pour quand ? @+ jajaX [Membre de Breizhtux : LUG de Saint Brieuc]ACER Aspire 8930G 904G50Bn & HP dv7 2230f sous Kubuntu 14.04 The Trusty Tahr (64 bits) & KDE SC 4.13.2/Amarok 2.8ACER Aspire 5612 WLMI & HP Compaq NX6310 sous kubuntu 14.04 The Trusty Tahr (32 bits) & KDE SC 4.13.2/Amarok 2.8 Hors ligne noruas Re : Un 'autre' générateur de sources.list en ligne Désolé, j'étais parti faire un tour sur Arch ^^ Au sujet de mon problème, j'ai essayé toutes les possibilités de ton générateur sur 5 machines, dont une m'appartenant, j'ai toujours eu des soucis. La génération des scripts automatiques se fait chez moi (sources.list_autoFull ou sources.list_autoFiles), mais ne renvois que des erreurs lorsque j'arrive à l'étape 5... donc échec. Je suis donc obligé de me rabattre sur la génération d'un "sources.list" et d'un "sources.list_key", mais même là, j'ai des soucis. En fait, j'ai le même problème que "tripes_inn", mon fichier "sources.list" n'est jamais généré complètement, il m'en manque plus de la moitié et il se termine en général par "deb http://"... Par contre, j'ai beau le régénérer plusieurs fois, ce n'est jamais à la même ligne que ça s'arrête, mais il n'est jamais complet. Pour solutionner ça, je copie colle le sources.list affiché dans le récapitulatif dans le fichier qu'il était censé me générer. Ensuite, tout se passe à merveille. Hors ligne McPeter Re : Un 'autre' générateur de sources.list en ligne Oui j'ai malheureusement vu qu'il y avait un gros bug sur le script de génération Je vais donc reprendre tout ça proprement La version 10.10 arrive bientôt Hors ligne noruas Re : Un 'autre' générateur de sources.list en ligne Oui et avec la version 10.10 le dépot officiel EXTRAS. PS: après test sur ma Maverick fraichement installée, il n'y a que le dépot Exaile qui n'existe pas encore pour cette version d'Ubuntu, les autres sont déjà opérationels. Dernière modification par noruas (Le 11/10/2010, à 14:59) Hors ligne Isaric Re : Un 'autre' générateur de sources.list en ligne Je n'ai pas vu les dépôts alors, je les proposes : ekiga (pas encore en 10.10 - 64) ppa:sevmek/ppa%%%empathy%%% ppa:telepathy/ppa SFLphone doc deb http://ppa.launchpad.net/savoirfairelinux/ppa/ubuntu YOUR_UBUNTU_VERSION_HERE mainBrasero (pas encore en 10.10 - 64) ppa:renbag/ppaVlc ppa:n-muench/vlcGnome Nanny (pour les version antérieures à 10.10) ppa:nanny/ppaXbmc (doit être vide en 10.10-64) ppa:henningpingel/xbmc "Être bahá'í signifie simplement aimer la terre toute entière, aimer l'humanité et essayer de la servir, travailler pour la paix universelle et la famille humaine" 'Abdul'l-Bahá "Vouloir s'ouvrir aux autres n'est pas une preuve de faiblesse, c'est une preuve d'intelligence" Matthieu Ricard. ma config Hors ligne Isaric Re : Un 'autre' générateur de sources.list en ligne Aussi gThumb : ppa:webupd8team/gthumb "Être bahá'í signifie simplement aimer la terre toute entière, aimer l'humanité et essayer de la servir, travailler pour la paix universelle et la famille humaine" 'Abdul'l-Bahá "Vouloir s'ouvrir aux autres n'est pas une preuve de faiblesse, c'est une preuve d'intelligence" Matthieu Ricard. ma config Hors ligne Isaric Re : Un 'autre' générateur de sources.list en ligne Dernière modification par Isaric (Le 20/10/2010, à 12:14) "Être bahá'í signifie simplement aimer la terre toute entière, aimer l'humanité et essayer de la servir, travailler pour la paix universelle et la famille humaine" 'Abdul'l-Bahá "Vouloir s'ouvrir aux autres n'est pas une preuve de faiblesse, c'est une preuve d'intelligence" Matthieu Ricard. ma config Hors ligne caracolito Re : Un 'autre' générateur de sources.list en ligne Excellent travail Génial ! il manque seulement la dernière version: la 10.10 Les arbres enseignent la patience: Ils ne baissent pas les bras à la première tempête venue. [C.Beaupré] Quand je ne sais pas traduire je me fais aider par: translate.google.com Hors ligne McPeter Re : Un 'autre' générateur de sources.list en ligne ça va venir ... ainsi que la correction du bug sur la création des sources auto Hors ligne serviteur Re : Un 'autre' générateur de sources.list en ligne salut, J'ai installé recement Ubuntu Ultimate Edition 2.7 base sur lucid 10.04 LTS ; Voici mon ficuier source.list : #deb cdrom:[Ubuntu 10.04 LTS _Lucid Lynx_ - Release i386 (20100429)]/ lucid main restricted # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to # newer versions of the distribution. deb http://us.archive.ubuntu.com/ubuntu/ lucid main restricted deb-src http://us.archive.ubuntu.com/ubuntu/ lucid main restricted ## Major bug fix updates produced after the final release of the ## distribution. deb http://us.archive.ubuntu.com/ubuntu/ lucid-updates main restricted deb-src http://us.archive.ubuntu.com/ubuntu/ lucid-updates main restricted ## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu ## team. Also, please note that software in universe WILL NOT receive any ## review or updates from the Ubuntu security team. deb http://us.archive.ubuntu.com/ubuntu/ lucid universe deb-src http://us.archive.ubuntu.com/ubuntu/ lucid universe deb http://us.archive.ubuntu.com/ubuntu/ lucid-updates universe deb-src http://us.archive.ubuntu.com/ubuntu/ lucid-updates universe ## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu ## team, and may not be under a free licence. Please satisfy yourself as to ## your rights to use the software. Also, please note that software in ## multiverse WILL NOT receive any review or updates from the Ubuntu ## security team. deb http://us.archive.ubuntu.com/ubuntu/ lucid multiverse deb-src http://us.archive.ubuntu.com/ubuntu/ lucid multiverse deb http://us.archive.ubuntu.com/ubuntu/ lucid-updates multiverse deb-src http://us.archive.ubuntu.com/ubuntu/ lucid-updates multiverse ## Uncomment the following two lines to add software from the 'backports' ## repository. ## N.B. software from this repository may not have been tested as ## extensively as that contained in the main release, although it includes ## newer versions of some applications which may provide useful features. ## Also, please note that software in backports WILL NOT receive any review ## or updates from the Ubuntu security team. # deb http://us.archive.ubuntu.com/ubuntu/ lucid-backports main restricted universe multiverse # deb-src http://us.archive.ubuntu.com/ubuntu/ lucid-backports main restricted universe multiverse ## Uncomment the following two lines to add software from Canonical's ## 'partner' repository. ## This software is not part of Ubuntu, but is offered by Canonical and the ## respective vendors as a service to Ubuntu users. # deb http://archive.canonical.com/ubuntu lucid partner # deb-src http://archive.canonical.com/ubuntu lucid partner deb http://security.ubuntu.com/ubuntu lucid-security main restricted deb-src http://security.ubuntu.com/ubuntu lucid-security main restricted deb http://security.ubuntu.com/ubuntu lucid-security universe deb-src http://security.ubuntu.com/ubuntu lucid-security universe deb http://security.ubuntu.com/ubuntu lucid-security multiverse deb-src http://security.ubuntu.com/ubuntu lucid-security multiverse je n'arrive pas à faire une mise à jour pour pourvoir installé des logiciels dans les dépots que ce par le terminal ou synaptic en tapant sudo apt-get update voici les erreurs : creation@creation-Jesus:~$ sudo apt-get update Atteint http://archive.canonical.com lucid Release.gpg Ign http://archive.canonical.com/ubuntu/ lucid/partner Translation-fr Réception de : 1 http://security.ubuntu.com lucid-security Release.gpg [198B] Ign http://security.ubuntu.com/ubuntu/ lucid-security/main Translation-fr Atteint http://packages.medibuntu.org karmic Release.gpg Ign http://packages.medibuntu.org/ karmic/free Translation-fr Ign http://packages.medibuntu.org/ karmic/non-free Translation-fr Atteint http://ppa.launchpad.net lucid Release.gpg Ign http://ppa.launchpad.net/compiz/ubuntu/ lucid/main Translation-fr Réception de : 2 http://us.archive.ubuntu.com lucid Release.gpg [189B] Réception de : 3 http://us.archive.ubuntu.com/ubuntu/ lucid/main Translation-fr [452kB] Atteint http://archive.canonical.com lucid Release Ign http://security.ubuntu.com/ubuntu/ lucid-security/restricted Translation-fr Ign http://security.ubuntu.com/ubuntu/ lucid-security/universe Translation-fr Ign http://security.ubuntu.com/ubuntu/ lucid-security/multiverse Translation-fr Réception de : 4 http://security.ubuntu.com lucid-security Release [38,5kB] Atteint http://packages.medibuntu.org karmic Release Atteint http://ppa.launchpad.net lucid Release Réception de : 5 http://downloadue.info lucid Release.gpg Ign http://downloadue.info/repo/ lucid/all Translation-fr Atteint http://archive.canonical.com lucid/partner Packages Atteint http://packages.medibuntu.org karmic/free Packages Atteint http://ppa.launchpad.net lucid/main Packages Ign http://downloadue.info lucid Release Atteint http://packages.medibuntu.org karmic/non-free Packages Ign http://deb.playonlinux.com lucid Release.gpg Ign http://downloadue.info lucid/all Packages Ign http://deb.playonlinux.com/ lucid/main Translation-fr Réception de : 6 http://deb.playonlinux.com lucid Release [1 722B] Ign http://downloadue.info lucid/all Packages Réception de : 7 http://us.archive.ubuntu.com/ubuntu/ lucid/restricted Translation-fr [2 628B] Réception de : 8 http://us.archive.ubuntu.com/ubuntu/ lucid/universe Translation-fr [702kB] Ign http://deb.playonlinux.com lucid/main Packages Err http://downloadue.info lucid/all Packages Connexion à downloadue.info: 80 (174.120.62.91) impossible. - connect (110: Connexion terminée par expiration du délai d'attente) Ign http://security.ubuntu.com lucid-security/main Packages Réception de : 9 http://security.ubuntu.com lucid-security/restricted Packages [14B] Réception de : 10 http://security.ubuntu.com lucid-security/main Sources [35,2kB] Réception de : 11 http://us.archive.ubuntu.com/ubuntu/ lucid/multiverse Translation-fr [83,2kB] Réception de : 12 http://us.archive.ubuntu.com lucid-updates Release.gpg [198B] Ign http://us.archive.ubuntu.com/ubuntu/ lucid-updates/main Translation-fr Ign http://us.archive.ubuntu.com/ubuntu/ lucid-updates/restricted Translation-fr Ign http://us.archive.ubuntu.com/ubuntu/ lucid-updates/universe Translation-fr Ign http://us.archive.ubuntu.com/ubuntu/ lucid-updates/multiverse Translation-fr Réception de : 13 http://us.archive.ubuntu.com lucid Release [57,2kB] Ign http://security.ubuntu.com lucid-security/restricted Sources Réception de : 14 http://us.archive.ubuntu.com lucid-updates Release [44,7kB] Réception de : 15 http://security.ubuntu.com lucid-security/universe Packages [47,5kB] Réception de : 16 http://us.archive.ubuntu.com lucid/main Packages [1 386kB] Ign http://deb.playonlinux.com lucid/main Packages Réception de : 17 http://security.ubuntu.com lucid-security/universe Sources [12,9kB] Atteint http://deb.playonlinux.com lucid/main Packages Réception de : 18 http://security.ubuntu.com lucid-security/multiverse Packages [2 013B] Ign http://security.ubuntu.com lucid-security/multiverse Sources Ign http://security.ubuntu.com lucid-security/main Packages Ign http://security.ubuntu.com lucid-security/restricted Sources Ign http://security.ubuntu.com lucid-security/multiverse Sources Réception de : 19 http://security.ubuntu.com lucid-security/main Packages [129kB] Réception de : 20 http://security.ubuntu.com lucid-security/restricted Sources [20B] Réception de : 21 http://security.ubuntu.com lucid-security/multiverse Sources [572B] Ign http://us.archive.ubuntu.com lucid/restricted Packages Réception de : 22 http://us.archive.ubuntu.com lucid/main Sources [659kB] Réception de : 23 http://us.archive.ubuntu.com lucid/restricted Sources [3 775B] Réception de : 24 http://us.archive.ubuntu.com lucid/universe Packages [5 448kB] Réception de : 25 http://us.archive.ubuntu.com lucid/universe Sources [3 165kB] Réception de : 26 http://us.archive.ubuntu.com lucid/multiverse Packages [180kB] Réception de : 27 http://us.archive.ubuntu.com lucid/multiverse Sources [119kB] Réception de : 28 http://us.archive.ubuntu.com lucid-updates/main Packages [343kB] Réception de : 29 http://us.archive.ubuntu.com lucid-updates/restricted Packages [3 240B] Réception de : 30 http://us.archive.ubuntu.com lucid-updates/main Sources [134kB] Réception de : 31 http://us.archive.ubuntu.com lucid-updates/restricted Sources [1 443B] Réception de : 32 http://us.archive.ubuntu.com lucid-updates/universe Packages [146kB] Réception de : 33 http://us.archive.ubuntu.com lucid-updates/universe Sources [56,8kB] Réception de : 34 http://us.archive.ubuntu.com lucid-updates/multiverse Packages [7 373B] Réception de : 35 http://us.archive.ubuntu.com lucid-updates/multiverse Sources [3 669B] Ign http://us.archive.ubuntu.com lucid/restricted Packages Réception de : 36 http://us.archive.ubuntu.com lucid/restricted Packages [6 133B] 13,3Mo réceptionnés en 36min 59s (5 977o/s) W: Impossible de récupérer http://downloadue.info/repo/dists/lucid/all/binary-i386/Packages.gz Connexion à downloadue.info: 80 (174.120.62.91) impossible. - connect (110: Connexion terminée par expiration du délai d'attente) E: Le téléchargement de quelques fichiers d'index a échoué, ils ont été ignorés, ou les anciens ont été utilisés à la place. creation@creation-Jesus:~$ J'aimerai avoir si c'est possible une source.list adaptée avec le serveur national d' Afrique du Sud car il est proche du pays où je reside. Cordialement " Le disciple (serviteur) n'est pas plus que le maître; mais tout disciple accompli sera comme son maître." Luc 6:40; Jean. 13:16 '' J'ai été crucifié avec Christ et je suis une nouvelle création" GAL2: 20, 2Cor5: 17, Rom 6:6-7 HP xw4600 Workstation: Intel Core 2 Quad Q9300, 4 GB RAM, Nvidia Geforce GTX 580 Hors ligne jajaX Re : Un 'autre' générateur de sources.list en ligne salut tu as 2 serveurs qui ne répondent pas. pas grave retente plus tard ou demain. @+ jajaX [Membre de Breizhtux : LUG de Saint Brieuc]ACER Aspire 8930G 904G50Bn & HP dv7 2230f sous Kubuntu 14.04 The Trusty Tahr (64 bits) & KDE SC 4.13.2/Amarok 2.8ACER Aspire 5612 WLMI & HP Compaq NX6310 sous kubuntu 14.04 The Trusty Tahr (32 bits) & KDE SC 4.13.2/Amarok 2.8 Hors ligne wendyam Re : Un 'autre' générateur de sources.list en ligne Bonjour les amis!!! J'ai télécharger ubuntu sur le même site qui a une taille de 695 Mo en image et je n'arrive pas a le gravé ni de l'installer en utilisant un cd virtuel. Alors ou je peu trouve le bon pour télécharger ? Merci Hors ligne noruas Re : Un 'autre' générateur de sources.list en ligne @Isaric Je ne comprend pas bien tes demandes, je suis également sous 10.10 64bits et tous les logiciels que tu cites sont disponibles chez moi, certains faisant partie intégrante de l'installation de base d'Ubuntu (Brasero, Empathy par exemple....) Hors ligne Isaric Re : Un 'autre' générateur de sources.list en ligne En général c'est les dernières versions, que l'on trouve rarement pas dans les dépôts. Dernière modification par Isaric (Le 28/11/2010, à 18:52) "Être bahá'í signifie simplement aimer la terre toute entière, aimer l'humanité et essayer de la servir, travailler pour la paix universelle et la famille humaine" 'Abdul'l-Bahá "Vouloir s'ouvrir aux autres n'est pas une preuve de faiblesse, c'est une preuve d'intelligence" Matthieu Ricard. ma config Hors ligne noruas Re : Un 'autre' générateur de sources.list en ligne Très bonne remarque, même s'il est vrai que je n'utilise les ppa que lorsqu'un soft me pose problèmes ou n'est pas stable sur ma machine. D'ailleurs, ne serait-il pas judicieux de rajouter un onglet dans le générateur pour les dépots ppa les plus couramment demandés/utilisés ? Ainsi nous garderions un onglet des logiciels additionnels (non installés par défaut dans la distribution) et un autre avec les dépôts ppa pour bénéficier d'une version plus récente et "en général" moins buggée d'une appli... PS: j'aime bien ton blog "feuille de route" et astuces ^^ Dernière modification par noruas (Le 28/11/2010, à 19:11) Hors ligne blattes86 Re : Un 'autre' générateur de sources.list en ligne Bonjour @ tous. Je viens pour lancer un appel a Azema McPeter. Je vous explique, pour mon boulot ( je travaille dans une association prônant le logiciel libre ) je vais avoir besoin de créer une grosse documentation afin d'aider les personnes voulant reproduire notre association. Utilisant le logiciel apt-mirror , je comptais créer une générateur permettant d'afficher aussi bien un sources.list qu'un mirror.list afin d'éviter aux personnes de ce prendre autant la tête que moi. Néanmoins, n'aimant pas réinventé la roue et étant aussi un peu fainéant, j'aurais voulu savoir si un projet tel que celui-ci pouvait vous intéresser ou au pire si votre code source était disponible dans un coin perdu du net. En espérant, une réponse favorable de votre part. Dernière modification par blattes86 (Le 13/01/2011, à 16:21) Hors ligne
Goffi [résolu] [Kubuntu Dapper] paquet cassé Bonjour, j'ai fait la mise à jour récemment pour Dapper, et je me retrouve avec des paquets cassés. outre amarok (résolu en ajoutant le dépôt deb http://kubuntu.org/packages/amarok-14 dapper main), je ne peux pas installer libsdl-gfx1.2-dev, j'obtiens le message d'erreur suivant: Les paquets suivants contiennent des dépendances non satisfaites : libsdl-gfx1.2-dev: Dépend: libsdl1.2-dev (>= 1.2.5-3) mais ne sera pas installé E: Paquets défectueux zsh: exit 100 sudo apt-get install libsdl-gfx1.2-dev Puis en remontant les dépendances, le paquet incriminé est mesa-common-dev: Les paquets suivants contiennent des dépendances non satisfaites : libgl1-mesa-dev: Dépend: mesa-common-dev (= 6.4.1-0ubuntu8) mais 6.5.1-0ubuntu14 devra être installé E: Paquets défectueux Certainement un problème dans mon source.list (j'ai fait la mise à jour sous vi avec un s/breezy/dapper/g), le voici: deb http://us.archive.ubuntu.com/ubuntu dapper main restricted deb-src http://us.archive.ubuntu.com/ubuntu dapper main restricted deb http://us.archive.ubuntu.com/ubuntu dapper-updates main restricted deb-src http://us.archive.ubuntu.com/ubuntu dapper-updates main restricted deb http://us.archive.ubuntu.com/ubuntu dapper universe multiverse deb-src http://us.archive.ubuntu.com/ubuntu dapper universe multiverse deb http://us.archive.ubuntu.com/ubuntu dapper-backports main restricted universe multiverse deb-src http://us.archive.ubuntu.com/ubuntu dapper-backports main restricted universe multiverse deb http://security.ubuntu.com/ubuntu dapper-security main restricted deb-src http://security.ubuntu.com/ubuntu dapper-security main restricted deb http://security.ubuntu.com/ubuntu dapper-security universe multiverse deb-src http://security.ubuntu.com/ubuntu dapper-security universe multiverse #wormux deb http://download.gna.org/wormux/debs dapper-amd64/ # Ubuntu Breezy amd64 deb http://archive.czessi.net/ubuntu/ dapper main #pour XGL deb http://www.beerorkid.com/compiz/ dapper main deb http://xgl.compiz.info dapper main #Amarok deb http://kubuntu.org/packages/amarok-14 dapper main Bon j'ai d'autres problèmes, mais celui-ci est le plus urgent . Merci d'avance Dernière modification par Goffi (Le 10/06/2006, à 21:55) http://www.goffi.org Hors ligne Smarter Re : [résolu] [Kubuntu Dapper] paquet cassé L'erreur vient du fait que tu utilises les dépots xgl qui fournissent une version trop récente de mesa mais j'ai essayé che zmoi avec ces dépots et ça n'a pas posé de problèmes mon libgl1-mesa-dev dépend bien de la 8.5.1 et non de la 8.4.1 donc avec les dépots que tu as il ne devrait pas y avoir de problemes... Je comprends pas là a tout hasard v'la mon sources.list il te manque p'tete quelque chose: ## CD #deb cdrom:[Ubuntu 6.04 _Dapper Drake_ - Alpha i386 (20060217.2)]/ dapper main restricted ## Main deb http://archive.ubuntu.com/ubuntu dapper main restricted deb-src http://archive.ubuntu.com/ubuntu dapper main restricted ## Updates deb http://archive.ubuntu.com/ubuntu dapper-updates main restricted deb-src http://archive.ubuntu.com/ubuntu dapper-updates main restricted ## Universe deb http://archive.ubuntu.com/ubuntu dapper universe deb-src http://archive.ubuntu.com/ubuntu dapper universe ## Multiverse deb http://archive.ubuntu.com/ubuntu dapper multiverse deb-src http://archive.ubuntu.com/ubuntu dapper multiverse ## Backports deb http://archive.ubuntu.com/ubuntu dapper-backports main restricted universe multiverse deb-src http://archive.ubuntu.com/ubuntu dapper-backports main restricted universe multiverse ## Proposés deb http://archive.ubuntu.com/ubuntu dapper-proposed main restricted universe multiverse deb-src http://archive.ubuntu.com/ubuntu dapper-proposed main restricted universe multiverse ## Sécurité deb http://security.ubuntu.com/ubuntu dapper-security main restricted deb-src http://security.ubuntu.com/ubuntu dapper-security main restricted deb http://security.ubuntu.com/ubuntu dapper-security universe deb-src http://security.ubuntu.com/ubuntu dapper-security universe ## Dépôts PLF Breezy (pas encore dispo pour Dapper) deb http://packages.freecontrib.org/ubuntu/plf/ breezy free non-free deb http://packages.freecontrib.org/ubuntu/freecontrib/ breezy free non-free ## Dépot marillat (Pour debian à ne pas utiliser sous ubuntu) # deb http://ftp.nerim.net/debian-marillat/ sarge main # deb http://ftp.nerim.net/debian-marillat/ sid main # deb http://ftp.nerim.net/debian-marillat/ etch main ## Wine deb http://wine.budgetdedicated.com/apt dapper main ## Xmltv deb http://staff.akumiitti.fi/~pvakevai/debian unstable main ## Tea,Gaim,VLC,... deb http://freetux37.free.fr/ubuntu breezy-freetux main # Cipherfunk multimedia packages deb ftp://cipherfunk.org/pub/packages/ubuntu/ breezy main ## Amarok deb http://kubuntu.org/packages/amarok-latest dapper main ## Compiz # deb http://gilir.free.fr/ubuntu ./ deb http://compiztools.free.fr/debian unstable main deb http://www.beerorkid.com/compiz/ dapper main deb http://xgl.compiz.info dapper main deb http://yakablog.info/share/ubuntu dapper main ## KDE deb http://www.kubuntu.org/packages/kde-353 dapper main ## KOffice deb http://kubuntu.org/packages/koffice-151 dapper main ## Xvidcap deb http://www.jarre-de-the.net/computing/debian/ stable main ## Freevo deb http://ubuntu.geole.de/ dapper universe multiverse ## Samba deb http://www.linux2go.dk/ubuntu dapper main ## VLC deb http://nightlies.videolan.org/build/dapper-i386 / ## Apt-build deb file:/var/cache/apt-build/repository apt-build main ## Firefox deb http://asher256-repository.tuxfamily.org dapper main dupdate french deb http://asher256-repository.tuxfamily.org ubuntu main dupdate french ## Seveas #deb http://seveas.ubuntulinux.nl/ dapper-seveas custom extras freenx java seveas-meta all ## Opera (marche pas sous dapper hmm) #deb http://deb.opera.com/opera etch non-free ## GCompris, Televidilo, Kdocker,... deb http://thomas.enix.org/pub/debian/packages/ dapper main ## Kopete 0.12 deb http://www.ubuntu-zh.org/~freeflying/packages / regarde surtout du coté des dépots qui commencent par ## Compiz Hors ligne Goffi Re : [résolu] [Kubuntu Dapper] paquet cassé Non, même avec ton source.list au complet ça ne marche pas. Je suis sur un amd64, ça doit venir de là. Je me tate pour virer les dépôt XGL, de toute manière ça ne fonctionne pas chez moi. Merci pour ton aide Edit: bon j'ai viré tout ce qui avait trait à XGL, et là ça marche Dommage, XGL marchait impécable avec Kororaa (mais c'était en 32 bits), ça ne semble pas être le cas en 64 bits... je chercherai ça plus tard. Dernière modification par Goffi (Le 10/06/2006, à 21:54) http://www.goffi.org Hors ligne
The goal is to get the array of family members made and print out the results in the order created. Is there any way to tidy this up :)? // Our Person constructor function Person(name,age) { this.name=name; this.age=age; } // Now we can make an array of people var family=new Array(); family[0]=new Person("alice", 40); family[1]=new Person("bob", 42); family[2]=new Person("michelle", 8); family[3]=new Person("timmy", 6); // loop through our new array for(i=0;i<family.length;i++) { console.log(family[i].name); } There are some ways to tidy things up. // Our Person constructor This comment doesn't explain anything that isn't already known. Comments should not say what is happening, they should explain the purpose instead, where the code can not be made clear enough to provide such an explanation for you. Instead of explaining what the code is already fully capable of showing you from the code itself, it's better to use comments to explain why things occur, or to provide a broad overview instead. function Person(name,age) Spacing. A space after the comma is best practice to help aid in readability. { Coding techniques from other programming languages, such as putting the curly brace at the start of the next line, is not just a bad idea - it can actually break JavaScript. Due to syntax limitations in JavaScript and how automatic semicolon insertion works, it is a fundamental principle in JavaScript that placing the curly brace at the start of a new line, is a practice that results in more fragile code. this.name=name; this.age=age; Spacing before and after the equals sign is best practice to help improve readability of the code. } That line is good, it has no problems. // Now we can make an array of people Explaining what is happening once again, which is not a good use for comments. var family=new Array(); family[0]=new Person("alice", 40); family[1]=new Person("bob", 42); family[2]=new Person("michelle", 8); family[3]=new Person("timmy", 6); It is preferred for the sake of consistency to use [] instead of new Array. You can also create those new Person objects at the same time as defining the array, for example with: It is also a common convention in JavaScript to use single quotes to delimit strings instead of double. Both work, but single provides more convenience than double. var family = [ new Person('alice', 40), new Person('bob', 42), new Person('michelle', 8), new Person('timmy', 6) ]; Due to the duplication that we see here though, it may be worth considering storing the name/age in an array, and using a loop to create the new Person objects instead. Especially if more people are planned to be added in further development. // loop through our new array Another useless comment. for(i=0;i<family.length;i++) The i variable has not been declared so it is going to be a global one instead. That's a bad mistake. The best way to remedy that is to declare the i variable right at the start of the code, with the other var declaration. Spacing should be added after the for word, which helps to reinforce that it is not a function name being called. Also, spacing before and after the equals sign, after the semicolons, and before and after the less than sign for improved readability. { Should come after the closing parenthesis of the for statement, with a space separating the two. console.log(family[i].name); Be aware that Internet Explorer doesn't support console.log - there are shims though that help to provide support, such as https://github.com/kayahr/console-shim } No troubles with this line So what we end up with is something like the following: // Create and show family info function Person(name, age) { this.name = name; this.age = age; } var family = [ new Person('alice', 40), new Person('bob', 42), new Person('michelle', 8), new Person('timmy', 6) ], i; for (i = 0; i < family.length; i += 1) { console.log(family[i].name); } I have also replaced i++ with i += 1 because I agree with Douglas Crockford, that incrementors and decrementors are not as good to use as other techniques that make it clearer what is going on. For some details, see http://stackoverflow.com/questions/971312/why-avoid-increment-and-decrement-operators-in-javascript Comments-I didn't create the comments. I just kept them there because of being lazy. So I'm ignoring any suggestions for that :). No offense to you. As for readability, I prefer no spaces in stuff such as (name,age). It helps my readability. I was mainly just asking for advice on how to shorten code, which you were able to answer with the array advice. console.log is just the part of the program that I'm supposed to write. I mean, I'm using codecademy.com and they force me to write console.log. Though again, comments based on that don't help me shorten the code. Thank you Paul for the array advice :). Another potential way to do that is to use the forEach method instead, which allows you to simplify things, and you won't need that variable either. family.forEach(function (person) { console.log(person.name); }); The forEach method is available on all modern web browsers. If you want it to succeed on Internet explorer, the forEach documentation page has compatibility code that adds the capability for web browsers that don't know how. Wow, that's some efficient looking code. Thanks for the advice Paul.
Epydoc's default markup language is epytext, a lightweight markup language that's easy to write and to understand. But if epytext is not powerful enough for you, or doesn't suit your needs, epydoc also supports three alternate markup languages: To specify the markup language for a module, you should define amodule-level string variable __docformat__, containingthe name of the module's markup language. The name of the markuplanguage may optionally be followed by a language code (such asen for English). Conventionally, the definition of the__docformat__ variable immediately follows the module'sdocstring: # widget.py """ Graphical support for `gizmos` and `widgets`. """ __docformat__ = "restructuredtext en" [...] To change the default markup language from the command line, usethe --docformat option. For example, the followingcommand generates API documentation for the existing regularexpression package re, which uses plaintext markup: [epydoc]$ epydoc --docformat plaintext re reStructuredText is a markup language that was developed in conjunction with Docutils. In order to parse reStructuredText docstrings, Docutils 0.3 or higher must be installed. If Docutils is not installed, then reStructuredText docstrings will be rendered as plaintext. Docutils can be downloaded from the Docutils SourceForge page. In addition to the standard set offields, the reStructruedText parser also supportsconsolidated fields, which combine the documentation forseveral objects into a single field. For more information, see themarkup-specific notes for reStructuredTextfields. The epydoc reStructuredText reader also defines several custom directives, which can be used to automatically generate a variety of graphs. The following custom directives are currently defined: Directive Description .. classtree:: [ Display a class hierarchy for the given class or classes (includingall superclasses & subclasses). If no class is specified, and thedirective is used in a class's docstring, then that class's classhierarchy will be displayed. The dir option specifiesthe orientation for the graph (default=down). .. packagetree:: [ Display a package hierarchy for the given module or modules (includingall subpackages and submodules). If no module is specified, and thedirective is used in a module's docstring, then that module's packagehierarchy will be displayed. The dir option specifiesthe orientation for the graph (default=down). Thestyle option specifies whether packages should bedisplayed in a tree, or using nested UML symbols. .. importgraph:: [ Display an import graph for the given module or modules. If no moduleis specified, and the directive is used in a module's docstring, thenthat module's import graph will be displayed. The diroption specifies the orientation for the graph(default=left). .. callgraph:: [ Display a call graph for the given function or functions. If no functionis specified, and the directive is used in a function's docstring, thenthat function's call graph will be displayed. Thedir option specifies the orientation for the graph(default=right). .. dotgraph:: [ Display a custom Graphviz dot graph. The body of the directive(graph...) should contain the body of a dot graph. Theoptional title argument, if specified, is used as thetitle of the graph. The optional caption option can beused to provide a caption for the graph. Javadoc is a markup language developed by Sun Microsystems for documenting Java APIs. The epydoc implementation of Javadoc is based on the Javadoc 1.4.2 reference documentation. However, there are likely to be some minor incompatibilities between Sun's implementation and epydoc's. Known incompatibilities include: @serial.{@docroot}, {@inheritdoc}, {@value}. @var, @type, and @group. Home Installing Epydoc Using Epydoc Epytext
xxkirastarothxx Re : MegaUpload : BotMU v1.0.1 Tucan a l'avantage de gérer les captcha pour les services qui en ont (enfin... un popup s'ouvre avec l'image, et l'utilisateur doit saisir lui même le texte) Aaah ok c'est comme ça qu'il fait. Par-ce que j'ai déjà essayé de pété des capcha avec des OCR, pour certains ça fonctionne, mais pour les capcha "recapcha" (de google de crois) j'ai jammais reussis a le passer automatiquement. Mais alors, quel interet par rapport a un navigateur web, puisque de coup je suppose que tant que le capcha est pas rentré manuellement les téléchargements sont en attente !? ah oui! au fait: j'adore le logo/icone (fan de SW) Merci lol ^^ C'est le premier truc qui m'est passé par la tête en pensant à un robot alors... Hors ligne loubrix Re : MegaUpload : BotMU v1.0.1 Par-ce que j'ai déjà essayé de pété des capcha avec des OCR, pour certains ça fonctionne, mais pour les capcha "recapcha" (de google de crois) j'ai jammais reussis a le passer automatiquement. Mais alors, quel interet par rapport a un navigateur web, puisque de coup je suppose que tant que le capcha est pas rentré manuellement les téléchargements sont en attente !? c'est ça, chaque fois qu'un téléchargement se termine, il faut saisir le captcha du suivant, donc être devant l'écran (ce qui fait perdre tout l'intéret de se genre de soft). c'est pour ça que je préfère MU... de toute façon te casse pas la tête: les captcha sont conçus justement pour qu'aucun logiciel ne puisse les lire; à chaque fois qu'on fait des progrès dans la reconnaissance de caractère, les captcha sont complexifiés (si bien que c'est même devenu difficile de les lire avec un oeil humain): c'est une guerre sans fin... à mon avis la faille doit se situer dans la partie accès au déficient visuel, parce que je crois qu'une bibliothèque de reconnaissance vocale doit faire l'affaire (mais peut-être que je m'égare). mais il vaut mieux se concentrer sur les services qui n'ont pas de captcha, et l'ajout de la gestion du compte premium (et là le code de Tucan doit pouvoir t'aider). Hors ligne xxkirastarothxx Re : MegaUpload : BotMU v1.0.1 Update: Mise à jour 1.0.1 Disponible n'hesitez pas a me prévenir si vous rencontrez des bugs (vu l'urgence je n'ai pas eu le temps de trop pousser les tests avant de publier) /* ------------ Changelog ------------- */ Mise à jour plateforme MegauploadAjout de la convertion en mp3 sur la platforme Youtube Ajout d'une fenêtre popup plus complete pour l'ajout de fichier Fenêtre d'options avancées Fonction Up et Down Focus du fichier nouvellement ajouté Rennome les fichier de façon "Fichier-1", "Fichier-2" tant que ce fichier existe déjà Couleur des progress bars en fonction du status: -> rouge: stop/erreur -> bleu: attente/téléchargement -> vert: terminé Correction bug d'alerte lorsque le programme était minimisé Correction de l'annulation pendant la récupération des noms de fichier Correction de la récupération des nom de fichier auto (nommé automatiquement "lien invalide" par exemple) Correction du bug lors du lancement du téléchargement si le disque n'existe pas/n'est pas monté /* ---------------------------------------- */ Bon téléchargements à tous Dernière modification par xxkirastarothxx (Le 16/12/2011, à 17:02) Hors ligne loubrix Re : MegaUpload : BotMU v1.0.1 Bon ben on va tester; ça tombe bien, j'ai des trucs à aller chercher sur MU... Edit: ça a l'air de fonctionner; j'ai téléchargé 2 fichiers sur MU sans problème... juste je comprenais pas pourquoi le deuxième avait été renommé "-1." (sans extension), alors que le premier portait son vrai nom, mais en lisant le changelog, j'ai compris (quoique c'est bizarre qu'il aie su récupérer le nom du premier fichier et pas du deuxième). sinon, deux ou trois remarques (que j'espère constructives): -la barre d'avancement du bas: avant de m'en servir, je pensais qu'elle allait faire une synthèse de tous les téléchargements, mais en fait, elle est calquée sur celle de chaque fichier, et du coup, ne sert pas à grand chose et fait doublon. -en parlant de doublons, les boutons "départ" et "stop" en bas font doublons avec ceux de la barre d'outils. -j'allais faire remarquer que les tooltips sont illisibles (blancs sur jaune-clair), mais c'est vrai que BotMU est en Qt, et qu'il doit falloir paramètrer l'intégration au sein de Gnome-Unity (ce que je n'ai pas encore fait). -dans le changelog, tu parles d'une fenêtre "options avancées", mais je la trouve pas (dans "préférences", je n'ai que 4 onglets: "général", "megaupload", "youtube" et "classique"). voilà, je n'ai que des bricoles à te rapporter; dans l'ensemble, soft génial, qui fait ce qu'on lui demande sans sourciller... j'ai déjà viré Tucan de mon dock (mais je le garde pour le cas où je suis obligé d'utiliser un autre service que MU), et si tu as besoin d'aide pour la suite, demande... Dernière modification par loubrix (Le 16/12/2011, à 21:57) Hors ligne atichu Re : MegaUpload : BotMU v1.0.1 Bon va faloir torturé le programme... 3 nouvelle dépendance^..^ il a un probléme de detection des liens.. j'ai detecté 1lien après il ne veut plus en detecté. j'ai redémarré sa semble marché^^. j'ai fait une mise à jour.. je suis sous précise^^.. pour le moment rien a dire. j'ai rentré un lien en manuel en mettant auto en nom il a cherché le nom Dernière modification par atichu (Le 16/12/2011, à 22:07) Je ne suis pas un geek ni un informaticien mais j'aime linux/ubuntu. acer aspire5741G (unity 12.04))+une vieillebousse(xfce 12.04) +teste de la version instable Hors ligne xxkirastarothxx Re : MegaUpload : BotMU v1.0.1 Alors, pour le problème de renommage, j'avoue que la fonction est un peu jeune, et je n'ai pas eu trop le temps de la tester encore, je reverrai ça pour la prochaine update ^^ La barre, je l'ai simplement laissée Enfait, au début du programme il n'y avait pas de barre de téléchargement par fichier mais seulement cette barre principale; mais l'idée d'en faire une barre de progression générale est sympa, je garde ça dans un coin Pour les boutons, tu suggères de supprimer ceux de la toolbar ? J'avoue que le "doublon" ne me gène pas spécialement ici; à voir ce qu'en pensent d'autres = ) Sinon, Les "options avancées" sont justement ces quatre fenêtres qui permettent de gérer chaque plateforme indépendamment ; avant ça il n'y avait que l'onglet général. C'est vrai que pour les nouveaux utilisateurs je me suis mal exprimé ^^ Et pour les tooltips merci beaucoup, je n'avais encore aucune idée de pourquoi ça plantait, puisque sur ma debian, ma vm xp et ubuntu (10.04) ça fonctionne bien. Maintenant j'ai une piste à explorer 3 nouvelle dépendance^..^ Et oui ^^ j'ai pas le code sous les yeux mais de mémoire, y'a ffmpeg pour la conversion mp3, chardet pour une meilleur gestion de l'encodage du presse-papier, et je me demande si j'ai pas laisser la 3ème par erreur lors de mes test pour la conversion en mp3 ; ou sinon c'est une dépendance nécessaire pour ffmpeg, il faut que je vérifie ça je sais plus trop xD. Et sinon, pour fini j'ai pus trouver (encore :'( lol) quelques bugs, comme une erreur 416 qui se place de temps en temps, peux être un mauvais calcul de ma part sur la plage de bits lors de la reprise d'un téléchargement. Et je me demande également si l'updater ne déconne pas, mais bon ça ne concerne pas les utilisateurs de linux ^^' Merci bien pour toutes ces petites remarques qui me permettront d'avancer plus vite Hors ligne atichu Re : MegaUpload : BotMU v1.0.1 je confirme au boud d'un certain temps sur précise il veut plus ajouté de lien.. faudrai que je teste sur maverick et onerick hybride:p Dernière modification par atichu (Le 18/12/2011, à 01:34) Je ne suis pas un geek ni un informaticien mais j'aime linux/ubuntu. acer aspire5741G (unity 12.04))+une vieillebousse(xfce 12.04) +teste de la version instable Hors ligne JLK Re : MegaUpload : BotMU v1.0.1 Donc BotMU, c'est un Jdownloader mais Java en moins, et donc plus réactif et beaucoup moins boufficiel, si je comprends bien ? Hors ligne helly Re : MegaUpload : BotMU v1.0.1 Ho le vilain troll ^^. Hors ligne loubrix Re : MegaUpload : BotMU v1.0.1 Pour les boutons, tu suggères de supprimer ceux de la toolbar ? J'avoue que le "doublon" ne me gène pas spécialement ici; à voir ce qu'en pensent d'autres = ) Pourquoi pas ? ça libèrerait de la place, pour, par exemple, agrandir la taille de la fenêtre de download... Et pour les tooltips merci beaucoup, je n'avais encore aucune idée de pourquoi ça plantait, puisque sur ma debian, ma vm xp et ubuntu (10.04) ça fonctionne bien. chez moi, ça ne plante pas, il y a juste un problème de couleur qui les rend illisible, mais je pense qu'il faudrait suivre un tuto dans ce genre pour régler ça... pour le problème de renommage, j'avoue que la fonction est un peu jeune, et je n'ai pas eu trop le temps de la tester encore, je reverrai ça pour la prochaine update il faudrait au moins que l'extension soit conservée (en pensant au type qui a téléchargé une video, mais n'a pas vu qu'elle était dans un zip). je pense aussi au gars qui a téléchargé les 24 épisodes de sa série préférée: pour peu qu'il n'aie pas ajouter les liens dans l'ordre, il va ramer pour trier... Donc BotMU, c'est un Jdownloader mais Java en moins, et donc plus réactif et beaucoup moins boufficiel, si je comprends bien ? autant faire plus léger que Jdownloader me semble simple, autant faire plus lourd frôle l'impossible (quitte à troller, hein ) plus sérieusement, ce n'est pas comparable: Jdownloader est une usine à gaz qui peut (presque) tout faire, BotMU se contente (pour le moment) de Megaupload, Youtube, et les téléchargements classiques; à chacun de décider de ce dont il a besoin... Hors ligne atichu Re : MegaUpload : BotMU v1.0.1 une question^^ botmu pourrai vérifié si la taille final correspond a celle de départ? non car la j'ai des 100% alors que j'ai même pas 10% des fichier.. Je ne suis pas un geek ni un informaticien mais j'aime linux/ubuntu. acer aspire5741G (unity 12.04))+une vieillebousse(xfce 12.04) +teste de la version instable Hors ligne xxkirastarothxx Re : MegaUpload : BotMU v1.0.1 Donc BotMU, c'est un Jdownloader mais Java en moins, et donc plus réactif et beaucoup moins boufficiel, si je comprends bien ? Je pense que loubrix a bien répondu, je n'ai pas encore parcouru l'étendue des possibilités de Jdownloader mais je suis certain qu'il des fonctionnalités que BotMU ne reprendra sûrement jamais. Simplement par-ce que je compte bien à ce que BotMU reste une logiciel léger. Déjà en utilisant Python plutôt que Java, je pense être sur la bonne voie ^^' (quoi ? on à tous le droits de troller un tout petit peu ) ; ça aurais sûrement été encore mieux en C/C++ mais je n'y connais strictement rien donc... Une autre raison est que je suis seul à programmer sur ce projet, en tout cas pour le moment, et qu'il y a de nombreuse fonctionnalités qui me prendraient un temps pas possible à reproduire, et ceci n'était pas le but de mon logiciel ^^ A la base je voulais simplement me faire un petit robot pour megaupload, et en profiter pour le partager. Par la suite de nombreuses idées m'ont plues et je les aies ajoutées petit à petit; et d'autres viendrons encore, vu comme c'est parti Donc, en bref, tout est une question de choix: performance VS fonctionnalités @loubrix: Merci pour le lien,. J'ai commencé à le lire, mais ça à l'air de plutôt être des manip à faire coté utilisateur non ? enfin y'a des infos intéressantes quand même. En tout cas maintenant que je sais d'où viens le problème je vais voir si j'arrive à régler ça. Et pour le renommage, je fais ça ^^ une question^^ botmu pourrai vérifier si la taille final correspond à celle de départ? non car la j'ai des 100% alors que j'ai même pas 10% des fichier... Bah, ça je pensais le faire pour la dernière mise à jour, mais vu que megaupload à changé son interface j'ai pas eu le temps de le faire ^^ Prochainement, donc = ) Dernière modification par xxkirastarothxx (Le 19/12/2011, à 12:42) Hors ligne loubrix Re : MegaUpload : BotMU v1.0.1 J'ai commencé à le lire, mais ça à l'air de plutôt être des manip à faire coté utilisateur non ? enfin y'a des infos intéressantes quand même. bien sûr, c'est à l'utilisateur de gérer l'intégration les applications Qt dans son environnement Gtk; ceci dit, même en suivant les indications, rien à faire, les tooltips sont toujours de la mauvaise couleur... edit: téléchargement de 9 videos, voici ce que j'ai à la fin: david@phobos:~$ ls ~/Téléchargements/ -1. -1-2-3. -1-2-3-4-5. -1-2-3-4-5-6-7. -1-2-3-4-5-6-7-8-9. -1-2. -1-2-3-4. -1-2-3-4-5-6. -1-2-3-4-5-6-7-8. si je comprends bien le code (mais Python et moi pas trop copains), les fichiers ne devraient être renommés que s'ils existent déjà dans le répertoire de destination, mais là en fait, il renomme s'il y a déjà un fichier dans le rep, sans se soucier de son nom, donc le problème semble venir de là (megaupload.py): if continuedl==0: nbFile = 0 while os.path.exists(os.path.join(self.parent.destination, dest)): nbFile+=1 if len(dest.split("."))>0: filename = str("".join(dest.split(".")[0:-1])) ext = str("".join(dest.split(".")[-1])) dest = "%s-%s.%s" % (filename, nbFile, ext) else: dest = "%s-%s" % (dest, nbFile) self.filename = dest ce serait pas "nbFile = 0" le problème ? Dernière modification par loubrix (Le 20/12/2011, à 23:32) Hors ligne xxkirastarothxx Re : MegaUpload : BotMU v1.0.1 C'est assez bizzare quand même: chez moi la foncion de renommage déconne effectivement, mais pas a ce point: moi il me renomme mes fichiers "nomdufichier-1-1-1.ext" alors que chez toi, la variable "dest" à l'air vide Y'avait le même problème dans Youtube.py Mais je te rassure j'ai déjà réglé le problème (je dois encore tester un peu), ainsi que celui des tooltips (simplement en forçant la couleur ) Si vous voyez d'autres bugs urgents, c'est le moment, sinon je publie une correction de mise à jours d'ici quelques jours. [HS] Et pour continuer dans un légé hs: Python c'est cro biiieenn ^^' J'ai toucher à pas mal de langage, et Python est vraiment celui qui ma taper dans l'oeil: c'est l'un des rares langages avec lequel, sans aucun notions de celui ci, j'ai tout de même réussis a tout comprendre très facilement. En plus de ça, le système de syntaxe est geniale je trouve, puisque l'indentation est une obligation; donc même quand on tombe sur un code d'un mec qui sait pas coder, le code reste propre, et ça c'est que du bonheur Par contre, la librairie graphique Qt ma saouler xD [/HS] Dernière modification par xxkirastarothxx (Le 21/12/2011, à 11:30) Hors ligne atichu Re : MegaUpload : BotMU v1.0.1 mega on encore changé les temps d'attente...^^ j'ai du 10 seconde parfois Je ne suis pas un geek ni un informaticien mais j'aime linux/ubuntu. acer aspire5741G (unity 12.04))+une vieillebousse(xfce 12.04) +teste de la version instable Hors ligne loubrix Re : MegaUpload : BotMU v1.0.1 effectivement, j'ai même eu 5 secondes ce matin... par contre, mes problèmes de renommage de fichier ont cessé, bizarrement: maintenant, tout arrive avec le bon nom... Hors ligne xxkirastarothxx Re : MegaUpload : BotMU v1.0.1 Ouais j'ai eu l'occasion de m'en rendre compte. Sur la dernière update j'ai laissé à 65sec par défaut, c'est le max je crois; mais dans la prochaine version je verrai a récupérer le vrai temps d'attente. Sinon j'ai réussis à ajouter Rapidshare qui serra donc dans la prochaine release. par contre, mes problèmes de renommage de fichier ont cessé, bizarrement: maintenant, tout arrive avec le bon nom... Peut etre une instabilité temporaire de megaupload... je t'avoue que je ne suis pas encore tombé sur ton problème. Dans tout les cas la fonction de renommage de fichier a été complètement remodelée. Bonne année à tous. Dernière modification par xxkirastarothxx (Le 03/01/2012, à 18:07) Hors ligne kboo Re : MegaUpload : BotMU v1.0.1 Coucou, après avoir essayé pendant une bonne semaine ce soft, je me suis résigner à en utiliser un autre: quelque plantages (rien de méchant) mais surtout des téléchargement fini à 100% alors qu'il n'y a rien dans le répertoire.... j'ai beau chercher si j'ai pas fait une mauvaise manip mais rien à faire ---> out Je reviendrai quand ça marchera mieux (aussi bien que JDownloader ça serai bien ! même si ça fonctionne que pour MU) Bon courage Hors ligne xxkirastarothxx Re : MegaUpload : BotMU v1.0.1 Salut, Tant pis, je comprends, snif ^^ je dois admettre que j'ai eu tendance à publier trop rapidement mes patchs; surtout avec la dernière, du fait de la modification de l'interface de Megaupload. J'ai faillis refaire la même bêtise dernièrement, mais, cette fois, je vais attendre avant de mètre en ligne la prochaine version. Histoire de faire une bonne grosse batterie de test. il faut aussi que j’approfondisse ma connaissance en SVN afin de pouvoir faire deux branches différentes de mon soft (sable et test) Je pense également mètre en place un système d’envoi de log, avec accord de l'utilisateur évidement, afin de faciliter le debug. en attendant, kboo, si tu as encore ton /home/[user]/.BotMU/botmu.log, pourrais tu le poster ? Voila, j’espère que la prochaine édition pourra satisfaire plus de monde. Dans tout les cas, merci pour ton retour. Hors ligne atichu Re : MegaUpload : BotMU v1.0.1 botmu au placard XD enfin l'onglet mega XD Je ne suis pas un geek ni un informaticien mais j'aime linux/ubuntu. acer aspire5741G (unity 12.04))+une vieillebousse(xfce 12.04) +teste de la version instable Hors ligne Siap Re : MegaUpload : BotMU v1.0.1 Salut, xxkirastarothxx te prend pas trop la tête quand même: http://trucbuntu.wordpress.com/2012/01/ … disparait/ t'as le temps maintenant pour publier les patchs Hors ligne xxkirastarothxx Re : MegaUpload : BotMU v1.0.1 ,-------. / ,' `. ,--' ,' `. ,-;-- _.- pow! / \ ---;-' _.=.---'' +-------------+ ; X X ---=-----'' _.------- | ----- |--| \-----=---:i- +XX|'i:'''''''' : ;`--._ ''---':---- /X+-) \ / ''--._ `- .XXX|) `. ____ ,' ''---. X\/) `. ,' `- \ `---+---' \ | \. | `-------------------+ edit: ... heureusement que j'ai intégré rapidshare (et peut etre mediafire) dans la prochaine version (tant pis pour la surprise ^^''') Dernière modification par xxkirastarothxx (Le 20/01/2012, à 10:29) Hors ligne JLK Re : MegaUpload : BotMU v1.0.1 Que devient BotMU depuis que les gros serveurs sont atteint de panique ? Hors ligne xxkirastarothxx Re : MegaUpload : BotMU v1.0.1 Salut, Et bien pour le moment le logiciel est plus ou moins mort, bien qu'il fonctionne toujours pour youtube; le but principale était tout de même megaupload ^^ Donc, J'attends un peu que ça ce calme histoire de voir qui reste, de plus que pas mal de site en on profité pour changer leur fonctionnement interne/scripts. Une fois que tout serra stabilisé je compte bien le remettre en route. A l'heure actuelle, j'ai prévu d'ajouter rapidshare et mediafire (disponible sur la version svn) mais ça ne fonctionne pas très bien (surtout mediafire, puisque de temps à autre il y a un capcha), pas pour tout les liens, et j'ai l'impression que ça déconne complètement sur windows alors que mieux sous Linux... comprends pas trop pourquoi Pour ceux qui voudraient tester: mkdir botmu_svn svn co https://botmu.svn.sourceforge.net/svnroot/botmu botmu_svn cd botmu_svn python BotMU.py je bosse dessus donc vous aurez la dernière version en cours, mais c'est pas forcement stable ^^ Dernière modification par xxkirastarothxx (Le 03/02/2012, à 11:44) Hors ligne
So I have a model called Car with a foreign key of a Manufacturer model. I also have a CarCharacterisitcs model with a foreign key of Car. This is what the code looks like: class Car(models.Model): idcar = models.AutoField(primary_key=True) manufacturer = models.ForeignKey(Manufacturer, null=True, blank=True, on_delete=models.SET_NULL) name = models.CharField(max_length=765) class Manufacturer(models.Model): idmanufacturer = models.AutoField(primary_key=True) name = models.CharField(max_length=765) class CarCharacteristics(models.Model): idcar_characteristics = models.AutoField(primary_key=True) car = models.ForeignKey(Car, on_delete=models.PROTECT) I am making a page that lets you add CarCharacteristics. This is my form and view: class CarCharacteristicsForm(ModelForm): class Meta: model = CarCharacterisitics def add_car_characteristics(request): if request.method == 'POST': car_char_form = CarCharacterisiticsForm(request.POST) if car_char.is_valid(: car_char_form.save() return HttpResponseRedirect('/cars/car-characterisitcs') else: car_char_form = CarCharacterisiticsForm() return render(request, 'car_characteristics/add_car_characteristics.html',{'car_char_form': car_char_form,}) In my html page, I have this: <h1>Car Characteristics</h1> <form action="." method="POST"> {% csrf_token %} <table id="characteristics_table"> <tr> <td> <table id="car_characterisitcs_table"> <td>{{ char_char_form.as_table }}</td> </table> </td> </tr> </table> <p><input type="submit" value="Submit"></p> </form> When this form is displayed, I have a drop down select field with all my possible Car models. What I want is to have two select fields. I want the first to let you be able to select the Manufacturer and then the second to display all the possible Cars that have that manufacturer. Then when you submit the form, it assigns the Car you selected to the Foreign Key of the CarCharacterisitcs model. If you go look at Advance Auto Parts, when you click "Your Vehicle", that is what I want to have. How do I do that?
No, for the same reason as this: >>> class Foo(object): ... bar = 'Foo attribute' ... >>> f = Foo() >>> f.bar 'Foo attribute' >>> Foo.bar 'Foo attribute' >>> f.bar = 'instance attribute' >>> f.bar 'instance attribute' >>> Foo.bar 'Foo attribute' When you assign an attribute to an object, a class attribute of the same name will be "eclipsed" by the object's. On attribute lookup, however, if the object in question does not define said attribute, the class one will be returned, instead. In Django, those class attributes are used by the ORM layer to generate the mechanism that translates to SQL queries and operations (deep, metaclass magic going on behind-the-scenes). edit: To answer your question-- To understand that, you need to understand a little bit about Python's data model. Essentially, both classes and objects have namespaces. This is apparent if you peek into their special __dict__attribute: >>> print Foo.__dict__ {'__dict__': <attribute '__dict__' of 'Foo' objects>, '__weakref__': <attribute '__weakref__' of 'Foo' objects>, '__module__': '__main__', 'bar': 'Foo attribute ', '__doc__': None} >>> f = Foo() >>> print f.__dict__ {} When the object f is first created, it has an empty namespace. When you do a lookup, f.bar, this namespace (really, a dictionary) is looked up. Since there is no 'bar' attribute found there, f's class, Foo, is looked up. We find 'bar': 'Foo attribute' there. So that's what's going to be returned: >>> f.bar 'Foo attribute' Now, when you assign an attribute value to an object, and said attribute name does not yet exist in its namespace, it is created: >>> f.bar = 'instance attribute' >>> print f.__dict__ {'bar': 'instance attribute'} >>> f.bar 'instance attribute' Now, you know what happens the next time f.bar is looked up! f.__dict__['bar'] exists and will be returned before we even look at Foo's namespace. Of course, if your intent is to always access and manipulate a class' attribute instead of the instance's, you'd need to use the class' name. >>> Foo.bar 'Foo attribute' >>> Foo.__dict__['bar'] 'Foo attribute'
I can't get into the specifics, for a variety of reasons, but here's the essential architecture of what I'm working with I have a C++ framework, which uses C++ object files built by me to execute a dynamic simulation. The C++ libraries call, among other things, a shared (.so) library, written in Ada. As best as I can tell, the Ada library (which is a large collection of nontrivial code) is generating exceptions on fringe cases, but I'm having trouble isolating the function that is generating the exception. Here's what I'm using: CentOS 4.8 (Final) gcc 3.4.6 (w/ gnat) gdb 6.3.0.0-1.162.el4rh This is the error I get under normal execution: terminate called without an active exception raised PROGRAM_ERROR : unhandled signal I can get gdb to catch the exception as soon as it returns to the C++, but I can't get it to catch inside the Ada code. I've made sure to compile everything with -g, but that doesn't seem to help the problem. When I try to catch/break on the signal/exception in gdb (which politely tells me Catch of signal not yet implemented), I get this: [Thread debugging using libthread_db enabled] [New thread -1208371520 (LWP 14568)] terminate called without an active exception Program received signal SIGABRT, Aborted. [Switching to thread -1208371520 (LWP 14568)] 0x001327a2 in _dl_sysinfo_int80 () from /lib/ld-linux.so.2 I believe the terminate called [...] line is from the framework. When I try to capture that break, then run a backtrace (bt), I get something like this: #0 0x001327a2 in gdb makes me want to flip tables. #1 0x00661825 in raise () from /lib/tls/libc.so.6 #2 0x00663289 in abort () from /lib/tls/libc.so.6 #3 0x0061123e in __gnu_cxx: __verbose_terminate_handler () from /usr/lib/libstdc++.so.6 #4 0x0060eed1 in __xac_call_unexpected () from /usr/lib/libstdc++.so.6 #5 0x0060ef06 in std::terminate () from /usr/lib/libstdc++.so.6 #6 0x0060f0a3 in __xax_rethrow () from /usr/lib/libstdc++.so.6 #7 0x001fe526 in cpputil::ExceptionBase::Rethrow (scope=@0xbfe67470) at ExceptionBase.cpp:140 At that point, it's into the framework code. I've read several guides and tutorials and man pages online, but I'm at a bit of a loss. I'm hoping that someone here can help get me pointed in the right direction.
How do i check if a user has a permission in pyramid. For example, I want to show some HTML only if a user has some permission, but have the view available for everybody. The usual method is: from pyramid.security import has_permission has_permission('view', someresource, request) See also http://docs.pylonsproject.org/projects/pyramid/1.0/narr/security.html#debugging-imperative-authorization-failures and http://docs.pylonsproject.org/projects/pyramid/1.0/api/security.html#pyramid.security.has_permission
You're on the right track. Let's take a look at your example: for(int i = 0; i < data.Length; i++) data[i] = (byte)(256 * Math.Sin(i)); OK, you've got 11025 samples per second. You've got 60 seconds worth of samples. Each sample is a number between 0 and 255 which represents a small change in air pressure at a point in space at a given time. Wait a minute though, sine goes from -1 to 1, so the samples go from -256 to +256, and that is larger than the range of a byte, so something goofy is going on here. Let's rework your code so that the sample is in the right range. for(int i = 0; i < data.Length; i++) data[i] = (byte)(128 + 127 * Math.Sin(i)); Now we have smoothly varying data that goes between 1 and 255, so we are in the range of a byte. Try that out and see how it sounds. It should sound a lot "smoother". The human ear detects incredibly tiny changes in air pressure. If those changes form a repeating pattern then the frequency at which the pattern repeats is interpreted by the cochlea in your ear as a particular tone. The size of the pressure change is interpreted as the volume. Your waveform is sixty seconds long. The change goes from the smallest change, 1, to the largest change, 255. Where are the peaks? That is, where does the sample attain a value of 255, or close to it? Well, sine is 1 at π/2 , 5π/2, 9π/2, 13π/2, and so on. So the peaks are whenever i is close to one of those. That is, at 2, 8, 14, 20,... How far apart in time are those? Each sample is 1/11025th of a second, so the peaks are about 2π/11025 = about 570 microseconds between each peak. How many peaks are there per second? 11025/2π = 1755 Hz. (The Hertz is the measure of frequency; how many peaks per second). 1760 Hz is two octaves above A 440, so this is a slightly flat A tone. How do chords work? Are they the average of the pitches? No. A chord which is A440 and an octave above, A880 is not equivalent to 660 Hz. You don't average the pitch. You sum the waveform. Think about the air pressure. If you have one vibrating source that is pumping pressure up and down 440 times a second, and another one that is pumping pressure up and down 880 times a second, the net is not the same as a vibration at 660 times a second. It's equal to the sum of the pressures at any given point in time. Remember, that's all a WAV file is: a big list of air pressure changes. Suppose you wanted to make an octave below your sample. What's the frequency? Half as much. So let's make it happen half as often: for(int i = 0; i < data.Length; i++) data[i] = (byte)(128 + 127 * Math.Sin(i/2.0)); Note it has to be 2.0, not 2. We don't want integer rounding! The 2.0 tells the compiler that you want the result in floating point, not integers. If you do that, you'll get peaks half as often: at i = 4, 16, 28... and therefore the tone will be a full octave lower. (Every octave down halves the frequency; every octave up doubles it.) Try that out and see how you get the same tone, an octave lower. Now add them together. for(int i = 0; i < data.Length; i++) data[i] = (byte)(128 + 127 * Math.Sin(i)) + (byte)(128 + 127 * Math.Sin(i/2.0)); That probably sounded like crap. What happened? We overflowed again; the sum was larger than 256 at many points. Halve the volume of both waves: for(int i = 0; i < data.Length; i++) data[i] = (byte)(128 + (63 * Math.Sin(i/2.0) + 63 * Math.Sin(i))); Better. "63 sin x + 63 sin y" is between -126 and +126, so this can't overflow a byte. (So there is an average: we are essentially taking the average of the contribution to the pressure of each tone, not the average of the frequencies.) If you play that you should get both tones at the same time, one an octave higher than the other. That last expression is complicated and hard to read. Let's break it down into code that is easier to read. But first, sum up the story so far: 128 is halfway between low pressure (0) and high pressure (255). the volume of the tone is the maximum pressure attained by the wave a tone is a sine wave of a given frequency the frequency in Hz is the sample frequency (11025) divided by 2π So let's put it together: double sampleFrequency = 11025.0; double multiplier = 2.0 * Math.PI / sampleFrequency; int volume = 20; // initialize the data to "flat", no change in pressure, in the middle: for(int i = 0; i < data.Length; i++) data[i] = 128; // Add on a change in pressure equal to A440: for(int i = 0; i < data.Length; i++) data[i] = (byte)(data[i] + volume * Math.Sin(i * multiplier * 440.0))); // Add on a change in pressure equal to A880: for(int i = 0; i < data.Length; i++) data[i] = (byte)(data[i] + volume * Math.Sin(i * multiplier * 880.0))); And there you go; now you can generate any tone you want of any frequency and volume. To make a chord, add them together, making sure that you don't go too loud and overflow the byte. How do you know the frequency of a note other than A220, A440, A880, etc? Each semitone up multiplies the previous frequency by the 12th root of 2. So compute the 12th root of 2, multiply that by 440, and that's A#. Multiply A# by the 12 root of 2, that's B. B times the 12th root of 2 is C, then C#, and so on. Do that 12 times and because it's the 12th root of 2, you'll get 880, twice what you started with. How is the length of time to play each note specified, when the contents of the wav file is a waveform? Just fill in the sample space where the tone is sounding. Suppose you want to play A440 for 30 seconds and then A880 for 30 seconds: // initialize the data to "flat", no change in pressure, in the middle: for(int i = 0; i < data.Length; i++) data[i] = 128; // Add on a change in pressure equal to A440 for 30 seconds: for(int i = 0; i < data.Length / 2; i++) data[i] = (data[i] + volume * Math.Sin(i * multiplier * 440.0))); // Add on a change in pressure equal to A880 for the other 30 seconds: for(int i = data.Length / 2; i < data.Length; i++) data[i] = (byte)(data[i] + volume * Math.Sin(i * multiplier * 880.0))); how is the result of multiple notes being inverse FFT'd converted to an array of bytes, which make up the data in a wav file? The reverse FFT just builds the sine waves and adds them together, just like we're doing here. That's all it is! any other relevant information relating to this? See my articles on the subject. http://blogs.msdn.com/b/ericlippert/archive/tags/music/ Parts one through three explain why pianos have twelve notes per octave. Part four is relevant to your question; that's where we build a WAV file from scratch. Notice that in my example I am using 44100 samples per second, not 11025, and I am using 16 bit samples that range from -16000 to +16000 instead of 8 bit samples that range from 0 to 255. But aside from those details, it's basically the same as yours. I would recommend going to a higher bit rate if you are going to be doing any kind of complex waveform; 8 bits at 11K samples per second is going to sound terrible for complex waveforms. 16 bits per sample with 44K samples per second is CD quality. And frankly, it is a lot easier to get the math right if you do it in signed shorts rather than unsigned bytes. Part five gives an interesting example of an auditory illusion. Also, try watching your wave forms with the "scope" visualization in Windows Media Player. That will give you a good idea of what is actually going on. UPDATE: I have noticed that when appending two notes together, you can end up with a popping noise, due to the transition between the two waveforms being too sharp (e.g ending at the top of one and starting at the bottom of the next). How can this problem be overcome? Excellent follow-up question. Essentially what's happening here is there is an instantaneous transition from (say) high pressure to low pressure, which is heard as a "pop". There are a couple of ways to deal with that. Technique 1: Phase shift One way would be to "phase shift" the subsequent tone by some small amount such that the difference between the starting value of the subsequent tone and the ending value of the previous tone. You can add a phase shift term like this: data[i] = (data[i] + volume * Math.Sin(phaseshift + i * multiplier * 440.0))); If the phaseshift is zero, obviously that is no change. A phase shift of 2π (or any even multiple of π) is also no change, since sin has a period of 2π. Every value between 0 and 2π shifts where the tone "begins" by a little bit further along the wave. Working out exactly what the right phase shift is can be a bit tricky. If you read my articles on generating a "continuously descending" Shepard illusion tone, you'll see that I used some simple calculus to make sure that everything changed continuously without any pops. You can use similar techniques to figure out what the right shift is to make the pop disappear. I am trying to work out how to generate the phaseshift value. Is "ArcSin(((first data sample of new note) - (last data sample of previous note))/noteVolume)" right? Well, the first thing to realize is that there might not be a "right value". If the ending note is very loud and ends on a peak, and the starting note is very quiet, there might be no point in the new tone that matches the value of the old tone. Assuming there is a solution, what is it? You have an ending sample, call it y, and you want to find the phase shift x such that y = v * sin(x + i * freq) when i is zero. So that's x = arcsin(y / v) However, that might not be quite right! Suppose you have and you want to append There are two possible phase shifts: and Take a wild guess as to which one sounds better. :-) Figuring out whether you are on the "upstroke" or the "downstroke" of the wave can be a bit tricky. If you don't want to work out the real math, you can do some simple heuristics, like "did the sign of the difference between successive data points change at the transition?" Technique 2: ADSR envelope If you are modeling something that is supposed to sound like a real instrument then you can get good results by changing the volume as follows. What you want to do is have four different sections for each note, called the attack, decay, sustain and release. The volume of a note played on an instrument can be modeled like this: /\ / \__________ / \ / \ A D S R The volume starts at zero. Then the attack happens: the sound ramps up to its peak volume quickly. Then it decays slightly to its sustain level. Then it stays at that level, perhaps declining slowly while the note plays, and then it releases back down to zero. If you do that then there's no pop because the start and the end of each note are at zero volume. The release ensures that. Different instruments have different "envelopes". A pipe organ, for example, has incredibly short attack, decay and release; it is all sustain, and the sustain is infinite. Your existing code is like a pipe organ. Compare with, say, a piano. Again, short attack, short decay, short release, but the sound does get gradually quieter during the sustain. The attack, decay, and release sections can be very short, too short to hear but long enough to prevent the pop. Experiment around with changing the volume as the note plays and see what happens.
Are there any canned Python methods to convert an Integer (or Long) into a binary string in Python? There are a myriad of dec2bin() functions out on Google... But I was hoping I could use a built-in function / library. Are there any canned Python methods to convert an Integer (or Long) into a binary string in Python? There are a myriad of dec2bin() functions out on Google... But I was hoping I could use a built-in function / library. Python's string format method can take a format spec. >>> "{0:b}".format(10) '1010' If you're looking for Example: >>> bin(10) '0b1010' No language or library will give its user base Such as: def bin(i): if i == 0: return "0" s = '' while i: if i & 1 == 1: s = "1" + s else: s = "0" + s i >>= 1 return s which will construct your binary string based on the decimal value. The idea is to use code from (in order of preference): As a reference: def toBinary(n): return ''.join(str(1 & int(n) >> i) for i in range(64)[::-1]) This function can convert a positive integer as large as It can be modified to serve a much larger integer, though it may not be as handy as If you want a textual representation without the 0b-prefix, you could use this: getBin = lambda x: x >= 0 and str(bin(x))[2:] or "-" + str(bin(x))[3:] print(getBin(3)) >>> 11 print(getBin(-3)) >>> -11 When you want a n-bit representation: getBin = lambda x, n: x >= 0 and str(bin(x))[2:].zfill(n) or "-" + str(bin(x))[3:].zfill(n) >>> getBin(12,32) '00000000000000000000000000001100' >>> getBin(-12,32) '-00000000000000000000000000001100' Unless I'm misunderstanding what you mean by binary string I think the module you are looking for is struct You could use this wonderful trick. a = 10 print "{0:b}".format(a) 1010 a = 100 print "{0:b}".format(a) 1100100 Or you could use it this way: a = 15 b = "{0:b}".format(a) print b 1111 Here is the code I've just implemented. This is not a def inttobinary(number): if number == 0: return str(0) result ="" while (number != 0): remainder = number%2 number = number/2 result += str(remainder) return result[::-1] # to invert the string Somewhat similar solution def to_bin(dec): flag = True bin_str = '' while flag: remainder = dec % 2 quotient = dec / 2 if quotient == 0: flag = False bin_str += str(remainder) dec = quotient bin_str = bin_str[::-1] # reverse the string return bin_str here is simple solution using the divmod() fucntion which returns the reminder and the result of a division without the fraction. def dectobin(number): bin = '' while (number >= 1): number, rem = divmod(number, 2) bin = bin + str(rem) return bin Along a similar line to Yusuf Yazici's answer def intToBin(n): if(n < 0): print "Sorry, invalid input." elif(n == 0): print n else: result = "" while(n != 0): result += str(n%2) n /= 2 print result[::-1] I adjusted it so that the only variable being mutated is result (and n of course). If you need to use this function elsewhere (i.e., have the result used by another module), consider the following adjustment: def intToBin(n): if(n < 0): return -1 elif(n == 0): return str(n) else: result = "" while(n != 0): result += str(n%2) n /= 2 return result[::-1] So -1 will be your
App-engine asynchronous example: from google.appengine.api import urlfetch rpc = urlfetch.create_rpc() urlfetch.make_fetch_call(rpc, "http://www.google.com/") try: result = rpc.get_result() if result.status_code == 200: text = result.content # ... except urlfetch.DownloadError: raise return text How do I do this in tornado? I've tried (using swirl) with something like: import swirl http = tornado.httpclient.AsyncHTTPClient() uri = 'http://www.google.com/' try: response = yield lambda cb: http.fetch(uri, cb) if response.code == 200: text = result.content # ... except tornado.httpclient.HTTPError: raise return text But I get a Syntax Error because I can't have a return and a yield in the same function...
fgin Impossible définir les langues du système/ kcmshell4 language-selector Je viens d'installer 12.10, depuis le DVD d'install. Je veux installer le chinois, pour une utilisation dans toutes las applications. Ibus s'intalle sans problème, de meme que tous les packs de langue. MAIS, impossible de changer les langues du systèmes (et donc de configuer chinois / Ibus). La commande : kcmshell4 language-selector retourne : kcmshell(2650)/python (plugin): Failed to import module Traceback (most recent call last): File "/usr/share/kde4/apps/language-selector/language-selector.py", line 4, in <module> from LanguageSelector.qt.QtLanguageSelector import QtLanguageSelector ImportError: No module named LanguageSelector.qt.QtLanguageSelector kcmshell(2650)/python (plugin): Failed to import module kcmshell(2650)/kcontrol KCModuleLoader::loadModule: This module has no valid entry symbol at all. The reason could be that it's still using K_EXPORT_COMPONENT_FACTORY with a custom X-KDE-FactoryName which is not supported anymore Et l'écran indique "Le service "Langues du système" ne fournit pas d'interface "KCModule" avec le mot clé "language-selector/language-selector.py". J'ai suivi un fil de discussion précèdent ( "[Résolu]language selector" ... en fait pas du tout résolu... ), j'ai réinstallé les languages pack, ibus, python, im-selector. Le problème reste le meme, et j'ai vraiment de besoin de configuer le chinois avec IME ibus :-(. Que puis-je faire ? Hors ligne φlip Re : Impossible définir les langues du système/ kcmshell4 language-selector Essaie ça: sudo apt-get update sudo apt-get install --reinstall python-kde4 sudo apt-get install language-selector-kde language-pack-kde-zh-hans + sans garantie: im-switch -s ibus Dernière modification par φlip (Le 18/12/2012, à 00:24) Hors ligne fgin Re : Impossible définir les langues du système/ kcmshell4 language-selector Merci φlip. Malheureusement aucun effet. - pour la partie update/reinstall, je l'avais déjà faite (pas dans cet ordre).. J'ai réssayé par acquit de conscience mais aucun changement. - pour im-switch, j'ai réinstallé im-switch ( vs im-config), et essayé, mais le problème n'est pas vraiment avec l'IME je crois, mais avant : c'est au moment de définirdes langues supplémentaires dans le système, indépendamment de tout IME qu'on voit le pb ( commandekcmshell4 language-selector) . Y a-t-il des utilisateurs 12.10 qui ont réussi à installer d'autres langues ? Je n'ai vraiment rien fait de particulier à l'install de Kubuntu !... En partant du lien fourni, j'ai l'impression qu'il y a un BUG lié à ceci ( [Bug 1013626] Re: systemsettings crashed with ImportError in /usr/share/kde4/apps/language-selector/language -selector.py: No module named LanguageSelector.qt.QtLanguageSelector - msg#37715), mais je n'arrive pas comprendre ce qu'il y vraiment derrière ce bug, et s'il y a des workarounds. Mais alors je ne devrais vraiment pas etre seul à voir ce problème ?? Toute suggestion est la bienvenue. Hors ligne φlip Re : Impossible définir les langues du système/ kcmshell4 language-selector Ce bug de language -selector.py est effectivement parfaitement connu. Je l'ai eu sur certaines installations. Ce qui me surprend est qu'il subsiste dans une installation Quantal updatée. Plus de souvenir clair de la solution, à part un update. L'origine du problème est la transition python 2->3, language-selector-common dépendant maintenant de python3. que dit ceci: dpkg -l language-selector* python[2-3]*|grep ^i|cut -b4-60|sort Dernière modification par φlip (Le 18/12/2012, à 15:51) Hors ligne fgin Re : Impossible définir les langues du système/ kcmshell4 language-selector Résultat de dpkg -l language-selector* python[2-3]*|grep ^i|cut -b4-60|sort language-selector-common 0.90 language-selector-kde 0.90 python2.7 2.7.3-5ubuntu4 python2.7-minimal 2.7.3-5ubuntu4 python3.2 3.2.3-6ubuntu3 python3.2-minimal 3.2.3-6ubuntu3 python3 3.2.3-5ubuntu1 python3-apport 2.6.1-0ubuntu9 python3-apt 0.8.7ubuntu4 python3-aptdaemon 0.45+bzr861-0u python3-aptdaemon.pkcompat 0.45+bzr861-0u python3-dbus 1.1.1-1 python3-defer 1.0.6-2 python3-distupgrade 1:0.190.4 python3-gdbm 3.3.0-1 python3-gi 3.4.0-1ubuntu0 python3-minimal 3.2.3-5ubuntu1 python3-pkg-resources 0.6.28-1ubuntu python3-problem-report 2.6.1-0ubuntu9 python3-pykde4 4:4.9.3-0ubunt python3-pyqt4 4.9.3-4 python3-sip 4.13.3-2 python3-software-properties 0.92.9 python3-update-manager 1:0.174.3 python3-xkit 0.5.0 Hors ligne φlip Re : Impossible définir les langues du système/ kcmshell4 language-selector Je pense que le mélange des versions python fait de la mayonnaise: 2.7 - 3 - 3.2 python2.7 2.7.3-5ubuntu4 python3 3.2.3-5ubuntu1 python3.2 3.2.3-6ubuntu3 python-kde4 dépend de: python (>= 2.7.1-0ubuntu2), python (<< 2.8) Le problème est que les installations patatoïdales complètes genre DVD installent tellement de choses, que la probabilité de conflit est accrue. La piste intéressante serait de comparer à un autre utilisateur qui fait des installations liveCD complète pour voir s'il a le même problème avec Quantal. Ou qu'un expert Python démerde le sac de nœuds entre versions python 3/3.2 (variables d'environnement etc). Pour information, sans Python 3.2 installé, je n'ai pas ce problème. que dit le retour de ceci: grep -r LanguageSelector.qt.QtLanguageSelector /usr/lib/python* ls /usr/lib/python*/dist-packages/LanguageSelector/qt/QtLanguageSelector.py grep python /usr/share/kde4/apps/language-selector/language-selector.py Et aussi... language-selector-kde est actuellement en version 0.98 (au lieu de 0.90 dans Quantal). Tu ne prendrais pas de grands risques en essayant ceci: cd /tmp wget http://mirror.ovh.net/ubuntu//pool/universe/l/language-selector/language-selector-kde_0.98_all.deb sudo dpkg -i language-selector-kde_0.98_all.deb Dernière modification par φlip (Le 19/12/2012, à 16:52) Hors ligne fgin Re : Impossible définir les langues du système/ kcmshell4 language-selector C'est vraiment sympa de ta part de m'aider sur ce point. Merci. Voici le résultat des commandes. grep -r LanguageSelector.qt.QtLanguageSelector /usr/lib/python* : /usr/lib/python3/dist-packages/language_selector-0.1.egg-info/SOURCES.txt:LanguageSelector/qt/QtLanguageSelector.py /usr/lib/python3/dist-packages/language_selector-0.1.egg-info/SOURCES.txt:LanguageSelector/qt/QtLanguageSelectorGUI.py /usr/lib/python3/dist-packages/LanguageSelector/qt/QtLanguageSelector.py:from LanguageSelector.qt.QtLanguageSelectorGUI import Ui_QtLanguageSelectorGUI ls /usr/lib/python*/dist-packages/LanguageSelector/qt/QtLanguageSelector.py: /usr/lib/python3/dist-packages/LanguageSelector/qt/QtLanguageSelector.py grep python /usr/share/kde4/apps/language-selector/language-selector.py : #!/usr/bin/python3 Quant à ta dernière proposition, j'ai une dépendance avec language-selector-common 0.98 que je n'ai pas réussi à résoudre. dpkg: des problèmes de dépendances empêchent la configuration de language-selector-kde : language-selector-kde dépend de language-selector-common (= 0.98) ; cependant la version de language-selector-common sur le système est 0.90. Je vais essayer de regarder de plus prés les modules Python. Hors ligne φlip Re : Impossible définir les langues du système/ kcmshell4 language-selector "dépendance avec language-selector-common 0.98 que je n'ai pas réussi à résoudre" Oubli de ma part: cd /tmp wget http://mirror.ovh.net/ubuntu//pool/main/l/language-selector/language-selector-common_0.98_all.deb sudo dpkg -i language-selector-common_0.98_all.deb sudo apt-get install -f Dernière modification par φlip (Le 19/12/2012, à 19:36) Hors ligne fgin Re : Impossible définir les langues du système/ kcmshell4 language-selector Merci. ... malheureusement aucun changement. Meme message d'erreur. kcmshell4 language-selector kcmshell(4572)/python (plugin): Failed to import module Traceback (most recent call last): File "/usr/share/kde4/apps/language-selector/language-selector.py", line 4, in <module> from LanguageSelector.qt.QtLanguageSelector import QtLanguageSelector ImportError: No module named LanguageSelector.qt.QtLanguageSelector kcmshell(4572)/python (plugin): Failed to import module kcmshell(4572)/kcontrol KCModuleLoader::loadModule: This module has no valid entry symbol at all. The reason could be that it's still using K_EXPORT_COMPONENT_FACTORY with a custom X-KDE-FactoryName which is not supported anymore [ Je viens de trouver ce bug : Bug 1066771 Re: language-selector-kde unusable qui décrit exactement mon problème... Et apparemment pas de solution. Je me demande si je ne devrais pas retourner sur une version antérieure de KDE, ou essayer Gnome-Deskop en attendant un fix ? Hors ligne φlip Re : Impossible définir les langues du système/ kcmshell4 language-selector Oui ce bug est bien connu, comme déjà dit. Je l'ai connu aussi avec le module imprimante (problème de pythonerie), bien avant Quantal et dans Debian. C'est la transition python 2->3 qui n'est pas clean et stabilisée. Mais ..... pourquoi ce bug n'existe pas dans certaines installations Quantal, et pourquoi dans d'autres ? C'est le point qui reste encore un peu mystérieux pour le moment. Personnellement, je n'installe jamais en "liveCD" et j'ai donc une installation plus propre. Mais je disais: La piste intéressante serait de comparer à un autre utilisateur qui fait des installations liveCD complète pour voir s'il a le même problème avec Quantal. "Je me demande si je ne devrais pas retourner sur une version antérieure de KDE, ou essayer Gnome-Deskop en attendant un fix ?" Ou l'inverse ... pousser vers raring. Mais je dis ça, je dis rien. Dernière modification par φlip (Le 19/12/2012, à 20:14) Hors ligne
#2826 Le 12/03/2013, à 22:08 k3c Re : TVDownloader: télécharger les médias du net ! @ mulder29 Tu ne donnes pas de renseignements pour qu'on puisse t'aider as-tu passé la commande sudo apt-get ... tu as eu un message d'erreur ? Hors ligne #2827 Le 12/03/2013, à 22:57 mulder29 Re : TVDownloader: télécharger les médias du net ! Ben oui, j'ai passé la commande sudo apt-get install build-essential Ensuite, je passe la commande make posix make mingw et c'est ce qui me donne "commande introuvable" o_O Hors ligne #2828 Le 14/03/2013, à 21:57 Tuxmouraille Re : TVDownloader: télécharger les médias du net ! Bonsoir, Juste comme ça ne passant. Je crois que le problème est que la commande mingw n'existe tout simplement pas sur Ubuntu. MinGW est un outils pour compiler sous Windows des logiciels conçus pour Linux. http://www.mingw.org/ http://fr.wikipedia.org/wiki/Mingw Dernière modification par Tuxmouraille (Le 14/03/2013, à 21:59) Le support d'Optimus pour Linux. Ubuntu 12.10 64 bits, portable ASUS N53SN-SZ161V, Intel® Core™ i5-2410M @ 2.30GHz, 8080MB SODIM Ram, NVIDIA® GeForce™ GT 550M Hors ligne #2829 Le 15/03/2013, à 00:00 thom83 Re : TVDownloader: télécharger les médias du net ! Bonsoir, Au vu de la réponse donnée par Tuxmouraille, j'ai regardé le README en question et effectivement il est que - pour Linux il faut faire «make posix» - pour Windows il faut faire «make mingw» Le problème vient du fait que le README est ambigu au premier abord. Bien sur il faut que cela soit fait dans le dossier où les sources ont été décompressées. Dernière modification par thom83 (Le 15/03/2013, à 00:03) Hors ligne #2830 Le 15/03/2013, à 17:15 11gjm Re : TVDownloader: télécharger les médias du net ! Bonjour , Juste pour info : épisodes 11 et 12 d' E N G R E N A G E S , sur D8 . Ceci pour indiquer les pistes de recherches . Si , non téléchargement . @echo on rtmpdump -r "rtmpe://geo2-vod-fms.canalplus.fr:1935/ondemand" -a "ondemand?ovpfv=1.1" -f "WIN 11,5,502,110" -W "http://player.canalplus.fr/site/flash/player.swf" -p "http://www.d8.tv/d8-series/pid5210-d8-engrenages.html?vid=829164" -C Z: -y "mp4:/ondemand/geo2/1303/1052501_11_800k.mp4" -o 1052501_11_800k.flv pause rtmpdump -r "rtmpe://geo2-vod-fms.canalplus.fr:1935/ondemand" -a "ondemand?ovpfv=1.1" -f "WIN 11,5,502,110" -W "http://player.canalplus.fr/site/flash/player.swf" -p "http://www.d8.tv/d8-series/pid5210-d8-engrenages.html?vid=829148" -C Z: -y "mp4:/ondemand/geo2/1303/1052501_12_800k.mp4" -o 1052501_12_800k.flv pause :fin Cordialement . Hors ligne #2831 Le 15/03/2013, à 19:30 mulder29 Re : TVDownloader: télécharger les médias du net ! Ah okk, et... comment je fais moi ? o_O Hors ligne #2832 Le 15/03/2013, à 20:45 thom83 Re : TVDownloader: télécharger les médias du net ! Avec Ubuntu 12.04, la commande python Bureau/Scripts-TV-Replay/D8.py http://www.d8.tv/d8-series/pid5210-d8-engrenages.html?vid=829164 lance celle-ci rtmpdump -r "rtmp://geo2-vod-fms.canalplus.fr/ondemand/geo2/1303/1052501_12_1500k.mp4" -o "Engrenages_-_Episode_11.mp4" --resume pour l'épisode 11 @ mulder29 Quelle version de linux utilise-tu ? Hors ligne #2833 Le 15/03/2013, à 23:54 mulder29 Re : TVDownloader: télécharger les médias du net ! Linux version 3.2.0-38-generic (buildd@panlong) (gcc version 4.6.3 (Ubuntu/Linaro 4.6.3-1ubuntu5) ) #61-Ubuntu SMP Tue Feb 19 12:20:02 UTC 2013 Voila, j'espère que ça suffira ? Hors ligne #2834 Le 16/03/2013, à 00:02 thom83 Re : TVDownloader: télécharger les médias du net ! @ k3c Il y a un petit problème dans le code de D8.py qui établit la commande rtmpdump. En effet, celle utilisée pour l'épisode 11 charge l'épisode 12. Au #2832 ci-dessus, l'erreur est visible si l'on compare «1052501_12_1500k.mp4» et «Episode_11.mp4». En substituant le bon numéro à la position 1, le résultat est bon à l'arrivée. Hors ligne #2835 Le 16/03/2013, à 00:18 thom83 Re : TVDownloader: télécharger les médias du net ! @ mulder29 Le paquet rtmpdump présent à cette adresse http://packages.ubuntu.com/fr/precise/rtmpdump devrait convenir, me semble-t-il. Télécharger la version qui va bien (i386 ou amd64 suivant l'architecture) et l'installer grâce à gdebi qui demandera éventuellement les éléments nécessaires pour satisfaire les dépendances (il faudra chaque fois ajouter ces dépendances pour pouvoir continuer). Bon courage. Hors ligne #2836 Le 16/03/2013, à 02:29 mulder29 Re : TVDownloader: télécharger les médias du net ! Et maintenant ? La commande : python d8.py http://www.d8.tv/d8-divertissement/pid5 … vid=828406 que je dois taper dans le Terminal lancé à partir du fichier D8 ne fonctionne plus au fait. Hors ligne #2837 Le 16/03/2013, à 10:02 thom83 Re : TVDownloader: télécharger les médias du net ! Bonjour mulder29 ainsi que les autres participants, Si le lien ci-dessus n'est pas tronqué, c'est normal que D8.py ne fonctionne pas car j'obtiens l'erreur suivante : Erreur Page indisponible Vérifiez que l'adresse ne contient pas une erreur de frappe Si l'installation du paquet rtmpdump que j'ai mentionné s'est bien passée, D8.py doit remplir son rôle (à quelques détails près). Dernière modification par thom83 (Le 16/03/2013, à 10:04) Hors ligne #2838 Le 16/03/2013, à 10:10 k3c Re : TVDownloader: télécharger les médias du net ! @ k3c Il y a un petit problème dans le code de D8.py qui établit la commande rtmpdump. En effet, celle utilisée pour l'épisode 11 charge l'épisode 12. Au #2832 ci-dessus, l'erreur est visible si l'on compare «1052501_12_1500k.mp4» et «Episode_11.mp4». En substituant le bon numéro à la position 1, le résultat est bon à l'arrivée. je sais. les vidéos sont sur une page se terminant par d8 ou cplus et je n'ai pas trouvé de méthode simple pour savoir sur quelle page chercher je n'ai pas accès à un ordi avant mardi Hors ligne #2839 Le 16/03/2013, à 10:42 thom83 Re : TVDownloader: télécharger les médias du net ! @ k3c Le problème n'est pas trop génant dans la mesure où on se montre attentif au résultat. En effet, il semble qu'il suffise de modifier le numéro de l'épisode dans la commande pour réussir le chargement. Par exemple, pour ceux qui auraient loupé un épisode de la saison, il est encore possible de le récupérer en mettant le bon numéro dans la commande. Encore une fois , c'est une chance d'avoir cet outil. Merci. Hors ligne #2840 Le 16/03/2013, à 10:52 k3c Re : TVDownloader: télécharger les médias du net ! Je vais re-examiner ce que fait le plugin xbmc pour différencier les vidéos d8 et cplus il y a peut-être plus simple, faut que je voie ce qu'il y a de commun dans le html des différentes pages Hors ligne #2841 Le 16/03/2013, à 17:53 mulder29 Re : TVDownloader: télécharger les médias du net ! Bonjour mulder29 ainsi que les autres participants, Si le lien ci-dessus n'est pas tronqué, c'est normal que D8.py ne fonctionne pas car j'obtiens l'erreur suivante : Erreur Page indisponible Vérifiez que l'adresse ne contient pas une erreur de frappe Si l'installation du paquet rtmpdump que j'ai mentionné s'est bien passée, D8.py doit remplir son rôle (à quelques détails près). J'obtiens Traceback (most recent call last): File "d8.py", line 8, in <module> a = urlopen(sys.argv[1]).read() File "/usr/lib/python2.7/urllib2.py", line 126, in urlopen return _opener.open(url, data, timeout) File "/usr/lib/python2.7/urllib2.py", line 406, in open response = meth(req, response) File "/usr/lib/python2.7/urllib2.py", line 519, in http_response 'http', request, response, code, msg, hdrs) File "/usr/lib/python2.7/urllib2.py", line 444, in error return self._call_chain(*args) File "/usr/lib/python2.7/urllib2.py", line 378, in _call_chain result = func(*args) File "/usr/lib/python2.7/urllib2.py", line 527, in http_error_default raise HTTPError(req.get_full_url(), code, msg, hdrs, fp) urllib2.HTTPError: HTTP Error 404: Not Found Donc c'est normal ? o_O Hors ligne #2842 Le 16/03/2013, à 18:28 thom83 Re : TVDownloader: télécharger les médias du net ! @ mulder29 16 lignes d'erreur... Le message d'erreur est-il complet ou pas ? Il correspond à quelle commande ? Le fichier D8.py est-il celui créé pour être utilisé avec linux ou celui pour windows ? La question «Donc c'est normal ?» est-elle ironique ou pas ? Des questions simples et détaillées permettraient probablement de renseigner mieux. Je vais bientôt m'absenter pour la soirée et ne serai probablement en mesure de répondre que demain. À + Dernière modification par thom83 (Le 16/03/2013, à 18:30) Hors ligne #2843 Le 16/03/2013, à 19:08 mulder29 Re : TVDownloader: télécharger les médias du net ! @thom83 Les lignes d'erreur correspondent au résultat quand je tapes la commande suivante : python d8.py http://www.d8.tv/d8-divertissement/pid5 … vid=828406 sur le terminal que je lance dans le fichier D8 qui contient le fichier python en question le fichier D8.py a été créé pour être utilisé avec linux. La question "donc c'est normal ?" c'était juste pour vérifier que l'on que lorsque tu parles du message d'erreur un peu plus haut : Erreur Page indisponible Vérifiez que l'adresse ne contient pas une erreur de frappe On parlait bien du même résultat, pour vérifier qu'il n'y avait pas de méprise. ;-) Hors ligne #2844 Le 16/03/2013, à 19:34 thom83 Re : TVDownloader: télécharger les médias du net ! Quand je clique sur le lien en couleur, c-à-d http://www.d8.tv/d8-divertissement/pid5 , j'obtiens effectivement l'erreur e00n question sur la page ouverte. Manifestement l'adresse de l'émission est incomplète. Sur la page où j'ai l'erreur, si je clique sur divertissement, toutes les émissions comportent un nombre à 4 chiffres suivi d'autres mentions. C'est l'adresse complète qui doit figurer dans la commande «python D8.py ....» Hors ligne #2845 Le 16/03/2013, à 20:43 ynad Re : TVDownloader: télécharger les médias du net ! bonsoir, ça marche avec: python d8-0.2.py http://www.d8.tv/d8-series/pid5210-d8-engrenages.html Hors ligne #2846 Le 16/03/2013, à 22:50 11gjm Re : TVDownloader: télécharger les médias du net ! Bonjour , Pour les mélomanes , ci-après un lien pour "visionner" "La folle journée de Nantes 2013". http://forum.ubuntu-fr.org/viewtopic.ph … #p12928221 Cordialement . Hors ligne #2847 Le 17/03/2013, à 14:24 mulder29 Re : TVDownloader: télécharger les médias du net ! Ah, donc, j'ai tapé ma ligne de commande sur le terminal lancé à partir du fichier D8 : python d8.py http://www.d8.tv/d8-divertissement/pid5 … vid=828406 Même chose qu'auparavant : le téléchargement et lorsque ça atteint 99,99% et "dowload complte", ça efface le fichier et relance le téléchargement. Je pensais arrêté le processus à 99,5% de façon à avoir au moins un fichier, mais la vidéo serait elle lisible ? Hors ligne #2848 Le 17/03/2013, à 16:32 11gjm Re : TVDownloader: télécharger les médias du net ! Bonjour , @mulder : "c'est dur la culture !!!" >j'ai tapé "python d8.py ..." Il faut utiliser le "D8 0.2" , voir post #2816 . > Même chose qu'auparavant ... D'après ce que tu indiquais tu n'arrivais à rien . Ça semble avancer (???) . > Je pensais arrêter le processus à 99,5% de façon à avoir au moins un fichier, mais la vidéo serait elle lisible ? 1) avec le "D8 0.2" , normalement ça ne devrait pas recommencer . 2) sinon , fais l'essai de lecture , tu verras bien ce que cela donne . A+ . Hors ligne #2849 Le 17/03/2013, à 18:19 mulder29 Re : TVDownloader: télécharger les médias du net ! @mulder : "c'est dur la culture !!!" >j'ai tapé "python d8.py ..." Il faut utiliser le "D8 0.2" , voir post #2816 . Euh un post que je ne comprends absolument pas... o_O Pour mulder29 tu noteras la ligne qui permet de retrouver l'identifiant de la vidéo id = s.findAll('div',attrs={"class":u"block-common block-player-programme"})[0]('canal:player')[0]['videoid'] Noter ? C'est à dire ? je le note sur le terminal ? comment je l'utilise ? sans un parser HTML, c'est galère... C'est quoi un parser HTML ? le code #!/usr/bin/env python # -*- coding:utf-8 -*- # D8 version 0.2 par k3c from urllib2 import urlopen from lxml import objectify import bs4 as BeautifulSoup import sys, subprocess, re a = urlopen(sys.argv[1]).read() s = BeautifulSoup.BeautifulSoup(a) url = '' def get_HD(d8_cplus): zz = urlopen('http://service.canal-plus.com/video/rest/getVideosLiees/'+d8_cplus+'/'+id).read() root = objectify.fromstring(zz) for element in root.iter(): if element.tag == 'HD': Que je tapes où ? dans le Terminal ? Dernière modification par mulder29 (Le 17/03/2013, à 18:20) Hors ligne #2850 Le 17/03/2013, à 19:07 11gjm Re : TVDownloader: télécharger les médias du net ! Bonjour , @mulder29 : Tu retournes au post #2816 . Sous la ligne "le code" , il y a une fenêtre noire contenant des écritures . Avec la souris , tu sélectionnes tous les écrits , tu effectues un copier . Tu ouvres un éditeur de texte , tu y colles les écrits précédents . Tu sauvegardes le fichier sous le nom de D8_02.py . Dans le même répertoire que ton D8.py , qui semble fonctionner (enfin presque !!!) . Dans la fenêtre du terminal , tu tapes toute la commande : python D8_02.py "http://www.d8.tv/d8-divertissement/pid5 … vid=828406" et tu tapes sur la touche entrée , pour lancer le process . Normalement , cela devrait fonctionner . Je ne comprends pourquoi tu coinces , alors que la procédure est simple . A+ . Hors ligne
Using operators" /> The Operator class and its subclasses are function factories. It means that in order to perform computations using an operator class, we first need to create an instance of it: A = FFTOperator(1024) and then use the instance as a function. The object A is a Python callable and takes numpy ’s N-dimensional array object as inputs and outputs: >>> pyoperators.memory.verbose = True >>> x = np.arange(1024, dtype=complex) >>> y = A(x) Info: Allocating (1024,) complex128 = 0.015625 MiB in FFTOperator. When using the operator in such a way, a no-side-effect policy is enforced: it is garanteed that the input array will not be modified and as a consequence, a new buffer will be allocated every time the operator is called. >>> pyoperators.memory.verbose = True >>> y = np.empty(1024, dtype=complex) >>> A(x, out=y) # or simply A(x, y) In-place operations can be performed in a similar way by providing the input array as the output. In this documentation, the same term operator will be used for the Operator class, subclasses and instances, though the context will disambiguate the meaning. Operators are easy to manipulate: they can be multiplied by a scalar, combined by addition, element-wise multiplication, composition or partition. The synthax uses the usual arithmetic signs, but be careful that the * sign depends on the operands’ linear flag: if both operands are linear, it stands for composition otherwise it stands for element-wise multiplication. >>> A = DiagonalOperator([1., 2.]) >>> B = DiagonalOperator([1., 1.]) >>> A.flags.linear, B.flags.linear (True, True) >>> x = [2, 2] >>> np.allclose((3 * A)(x), 3 * A(x)) True >>> np.allclose((A + B)(x), A(x) + B(x)) True >>> np.allclose(A(B)(x), A(B(x))) True >>> np.allclose((A * B)(x), A(B(x))) # because A and B are linear True >>> np.allclose(MultiplicationOperator([A, B])(x), A(x) * B(x)) True Unless an algebraic simplification is possible, the resulting operator is a composite operator: addition AdditionOperator \( A(x) + B(x) \) element-wise multiplication MultiplicationOperator \( A(x) \times B(x) \) composition CompositionOperator \( A(B(x)) \) partition BlockColumnOperator \( \begin{bmatrix} A \\ B \end{bmatrix} \) BlockDiagonalOperator \( \begin{bmatrix} A & 0 \\ 0 & B \end{bmatrix} \) BlockRowOperator \( \begin{bmatrix} A & B \end{bmatrix} \) The conjugate, transpose, adjoint and inverse operators, when defined, can be obtained by using the following attributes: conjugate .C transpose .T adjoint .H inverse .I For example: >>> o = Operator() >>> o.T.C is o.H True >>> o = Operator(flags='real,symmetric') >>> o.T is o.H is o True Operators can be created in two ways. The first one is to define a direct function which will replace the usual matrix-vector operation and to instantiate the Operator class by passing the function as the first argument: >>> def f(x, out): ... out[...] = 2 * x >>> P = Operator(f) Transforming a single-argument ufunc into an Operator instance is straightforward: >>> sqrt = Operator(np.sqrt) The alternative way is more flexible, it consists in subclassing the Operator class: >>> class MyOperator(Operator): ... def __init__(self, coef): ... self.coef = coef ... Operator.__init__(self) ... def direct(self, x, out): ... out[...] = self.coef * x ... >>> P = MyOperator(2) The operator’s data type is a data type object (an instance of numpy.dtype class). It can be set using the dtype keyword. >>> a = Operator(dtype=np.float32) >>> a.dtype dtype('float32') It is used to determine the output data type, as being the common type of the operator and input data types, following the standard coercion rules. For example: >>> a = DiagonalOperator([1, 2]) >>> a.dtype dtype('int64') >>> a([1, 1j]).dtype dtype('complex128') >>> a(np.array([2, 2], np.uint8)).dtype dtype('int64') The operator’s data type can be equal to None (the default), in which case the output data type is always the input data type. An operator P can have the following algebraic properties, accessible with the flags attribute: linear real conj P = P symmetric P T = P hermitian P H = P idempotent P P = P involutary P P = I orthogonal P T P = I unitary P H P = I They can be set using the flags keyword: >>> def f(x, out): ... out[...] = -x >>> P = Operator(direct=f, flags='symmetric,involutary') or by using operator decorators: >>> @pyoperators.flags.symmetric ... @pyoperators.flags.involutary ... class MyOperator(Operator): ... def direct(self, x, out): ... out[...] = -x ... >>> P = MyOperator() These flags are used to simplify expressions: >>> C = Operator(flags='idempotent') >>> C * C is C True >>> D = Operator(flags='involutary') >>> D * D IdentityOperator() Other properties are described by the flags attribute: square input and output have the same shape separable can be separated if applied by or applying on a block operator inplace handle inplace operations (more info) update_output can update the operator output (more info) alignment_input only handle aligned input alignment_output only handle aligned output contiguous_input only handle contiguous input contiguous_output only handle contiguous output shape_input explicit, implicit or unconstrained (more info) shape_output explicit, implicit or unconstrained (more info) The Operator class has a flexible way to deal with inputs and outputs. Its output shape can be: explicit: the operator only returns arrays of a specified shape. implicit: the output shape is derived from the input shape through the method reshapein. unconstrained: the output shape does not depend on the input shape. Likewise, its input shape can be: explicit: the operator only applies over arrays of a specified shape. implicit: the operator can only apply over arrays whose shape is derived from the output shape through the method reshapeout. unconstrained: any input shape can be handled by the operator. The operator’s flags record this information in the shape_input and shape_output attributes. If the output (input) shape is shapeout(in) is equal to that tuple. The reshapein(out) method should not be overridden.shapeout(in) is None and the method reshapein(out) applied over the input (output) shape must return the output (input) shape.shapeout(in) is None. The reshapein(out) should not be overridden. The explicit shapes are specified using the shapein and shapeout keywords. The following example appends a zero to a fixed-size input. >>> class Op(Operator): ... def __init__(self, value): ... Operator.__init__(self, shapein=3, shapeout=4) ... self.value = value ... def direct(self, input, output): ... output[0:3] = input ... output[3] = self.value ... >>> op = Op(0.) >>> op.shapein, op.shapeout ((3,), (4,)) >>> op([3., 2. ,1.]) array([3., 2., 1., 0.]) This case does not happen because the input shape can be obtained from the explicit output shape and the reshapeout method. Such operator is obtained by only setting the shapeout keyword. The following example returns an array of size 2 whose elements are the sum and product of the input elements. >>> class Op(Operator): ... def __init__(self): ... Operator.__init__(self, shapeout=2) ... def direct(self, input, output): ... output[0] = np.sum(input) ... output[1] = np.product(input) ... >>> op = Op() >>> op.shapein, op.shapeout (None, (2,)) >>> op([1., 2., 3., 4.]) array([ 10., 24.]) This case does not happen because the output shape can be obtained from the explicit input shape and the reshapein method. This operator defines both reshapein and reshapeout methods. Square-decorated operators are examples of this kind of operators: their reshapein and reshapeout methods simply are lambda x:x by default. The interest for an implicit output shape operator of also handling implicit input shapes arises when a reverse operator such as .T or .H is defined because in which the reshapein and reshapeout methods are swapped. The operator in the following example adds a zero to its input. >>> @pyoperators.flags.linear ... class Op(Operator): ... def direct(self, input, output): ... output[:-1] = input ... output[-1] = 0 ... def transpose(self, input, output): ... output[...] = input[:-1] ... def reshapein(self, shapein): ... return (shapein[0]+1,) ... def reshapeout(self, shapeout): ... return (shapeout[0]-1,) ... >>> op = Op() >>> op.shapein, op.shapeout (None, None) >>> op([1,2,3]) array([1, 2, 3, 0]) >>> op.T([1, 2, 3, 0]) array([1, 2, 3]) >>> np.array_equal(op.T.todense(4), op.todense(3).T) True This operator can handle arbitrary inputs and its output dimensions are derived from those of the input. To obtain such operator, one simply has to define a reshapein method, which takes the input shape as input and returns the output shape. The following example stretches the input by a factor 2 along the first dimension. >>> class Op(Operator): ... def direct(self, input, output): ... output[::2,...] = input ... output[1::2,...] = input ... def reshapein(self, shapein): ... return (2*shapein[0],) + shapein[1:] ... >>> op = Op() >>> op.shapein, op.shapeout (None, None) >>> op.reshapein((2,3)) (4,3) >>> op([[1, 2, 3], ... [2, 3, 4]]) array([[1, 2, 3], [1, 2, 3], [2, 3, 4], [2, 3, 4]]) Such operator is obtained by only setting the shapein keyword. The following example fills the output with an arithmetic progression whose coefficients are given by the two elements of the input. >>> class Op(Operator): ... def __init__(self): ... Operator.__init__(self, shapein=2) ... def direct(self, input, output): ... output[...] = input[0] + input[1] * np.arange(output.size).reshape(output.shape) ... >>> op = Op() >>> op.shapein, op.shapeout ((2,), None) >>> y = np.empty((2,2), int) >>> op([1, 2], out=y) array([[1, 3], [5, 7]]) This kind of operator is of limited interest and is shown here for completeness. Since there is no way to compute the output shape from the input, the output argument must be provided. The operator in this example returns the input from which the last element has been removed. >>> class Op(Operator): ... def direct(self, input, output): ... output[...] = input[:-1] ... def reshapeout(self, shapeout): ... return (shapeout[0] + 1,) ... >>> op = Op() >>> op.shapein, op.shapeout (None, None) >>> op([1,2,3]) ValueError: The output shape of an implicit input shape and unconstrained output shape operator cannot be inferred. >>> y = np.empty(4) >>> op([1.,2.,3.,4.,5.], out=y) array([ 1., 2., 3., 4.]) >>> op(np.arange(4.), out=y) ValueError: The input has an invalid shape '(4,)'. Expected shape is '(5,)'. This kind of operator is the default: this behaviour is obtained by not setting the shapein and shapeout keywords and not defining the reshapein and reshapeout methods. ConstantOperator is an example of such an operator. If the output is not provided as argument, it is assumed that the output shape is that of the input. The following example fills the output with the value of the sum of the elements of the input. >>> class Op(Operator): ... def __init__(self): ... Operator.__init__(self) ... def direct(self, input, output): ... output[...] = np.sum(input) ... >>> op = Op() >>> op.shapein, op.shapeout (None, None) >>> y = np.empty((2,2), int) >>> op([2, 1], out=y) array([[3, 3], [3, 3]]) It is possible to validate the shapes of the input or output of an operator by defining the validatein or validateout methods, which take the shapes of the input or output and raise a ValueError exception to signal a validation failure. There are two advantages of using these methods instead of performing the validation in the direct method: The following example checks the number of axes and the dimension along the axes. >>> class Op(Operator): ... def direct(self, input, output): ... output[...] = 1 ... def validatein(self, shapein): ... if len(shapein) != 2: ... raise ValueError('The number of axes of the input is not 2.') ... if shapein[0] % 16 != 0 or shapein[1] % 16 != 0: ... raise ValueError('The input dimensions are not a multiple of 16.') ... >>> op = Op() >>> op(np.ones(10)) ValueError: The number of axes of the input is not 2. >>> op(np.ones(16,17)) ValueError: The input dimensions are not a multiple of 16. An operator propagates the class of the input array, if the latter subclasses numpy.ndarray: >>> class ndarray2(np.ndarray): ... pass >>> type(I(ndarray2((2,2)))) __main__.ndarray2 It is possible to change the subclass too, by using the classout keyword: >>> class ndarray3(np.ndarray): ... pass >>> I2 = IdentityOperator(classout=ndarray3) >>> type(I2(ndarray((2,2)))) __main__.ndarray3 >>> type(I2(ndarray2((2,2)))) __main__.ndarray3 An operator also propagates attributes: >>> class ndarray2(np.ndarray): ... pass ... >>> x = ndarray2((2,2)) >>> x.foo = 'bar' >>> I(x).foo 'bar' It is also possible to add or change an attribute, by setting the attrout keyword to a dictionary whose keys are the attribute names and whose values are the attribute values: >>> I2 = IdentityOperator(attrout={'foo':'new_bar'}) >>> I2(x).foo 'new_bar' >>> x.foo 'bar' >>> I2(np.ones((2,2))).foo 'new_bar' More flexibility is possible by passing a function, instead of a dictionary, to the attrout keyword. This function expects a dictionary whose keys are the attribute names and whose values are the attribute values (it is the output’s __dict__): >>> class I2Operator(IdentityOperator): ... def __init__(self): ... IdentityOperator.__init__(self, attrout=self.add_history) ... def add_history(self, attr): ... from time import ctime ... if 'history' not in attr: ... attr['history'] = [] ... attr['history'] += [ctime() + ' : ' + self.__class__.__name__] ... >>> I2 = I2Operator() >>> I2(I2(0)).history The set of operators that can be obtained by using any number of times the '.C', '.T', '.H' and '.I' attributes over a given operator is finite. The set is made of the operator itself, its conjugate, transpose, adjoint, inverse, inverse-conjugate, inverse-transpose and inverse-adjoint. It is possible to define such operators by using a unary rule of the form ‘subject’ is ‘predicate’. The subject being 'C' operator’s conjugate 'T' operator’s transpose 'H' operator’s adjoint 'I' operator’s inverse 'IC' operator’s inverse-conjugate 'IT' operator’s inverse-transpose 'IH' operator’s inverse-adjoint and the predicate being '.' the operator itself function callback function For instance, the rule 'C' → '.' means that the operator’s conjugate is the operator itself. This is how the real decorator is translated internally. Likewise, the involutary decorator is represented by the rule 'I' → '.'. Unary rules are attached to an operator by using the set_rule method: >>> @pyoperators.flags.linear ... class Op1(Operator): ... def __init__(self, **keywords): ... Operator.__init__(self, **keywords) ... self.set_rule('T', lambda s: ReverseOperatorFactory(Op2, s)) ... def direct(self, input, output): ... output[:-1] = input ... output[-1] = 0 ... def reshapein(self, shapein): ... return (shapein[0]+1,) ... >>> @pyoperators.flags.linear ... class Op2(Operator): ... def __init__(self, **keywords): ... Operator.__init__(self, **keywords) ... self.set_rule('T', lambda s: ReverseOperatorFactory(Op1, s)) ... def direct(self, input, output): ... output[...] = input[:-1] ... def reshapein(self, shapein): ... return (shapein[0]-1,) ... >>> op1 = Op1() >>> op2 = op1.T >>> type(op2) __main__.Op2 >>> array_equal(op1.todense(4), op2.todense(5).T) True The ReverseOperatorFactory is a convenience factory which returns an operator of the type its first argument and by swapping the attributes attrin(out), classin(out), shapein(out), reshapein(out), toshapein(out), validatein(out) from the second argument. PyOperators has the ability to reduce arithmetic expressions involving operators. This is achieved by means of binary rules which are applied to pairs of operators. Such a rule is specific to an operation (addition, multiplication or composition) and has the form ‘subject1 \& subject2 → predicate’, in which subject1 and subject2 are properties that will be matched against the pair of actual operators. One of them must be ‘.’, and stands for the reference operator to which the rule is attached. For instance and for a given operator P, the subject ('.', '.') will match the pair (P, P), the subject ('T', '.') the pair (P.T, P) and ('.', 'H') the pair (P, P.H). In addition to the subjects that can be used for unary rules, it is possible to specify an Operator subclass for subclass matching. For example the subject (‘.’, Operator) will match any Operator instance on the right of the reference operator. When the pair of operators is matched by the subject, it is replaced by the predicate: '.' reference operator 'C' reference operator’s conjugate 'T' reference operator’s transpose 'H' reference operator’s ajoint 'I' reference operator’s inverse '1' identity operator function callback function For a non-commutative operation (such as composition), the rules attached to the left-hand side operator applying on a right-hand-side operator are combined to the rules of the right-hand side operators applying on a left-hand-side operator: Rules attached to an operator are also prioritised: Let’s examine some examples for the composition operation: ('T', '.') → '1': if the expression P.T*P is matched, it will be replaced by the identity. This is how the orthogonal property is translated into a rule.('.', 'I') → '1': if the expression P*P.I is matched, it will be replaced by the identity.('.', Operator) → myfunc1: every time P appears as the left-hand-side operator of a pair, the callback function myfunc will be called and the pair of operators will be replaced by the result of this function, unless it is None, in which case the pair of operators is unchanged. The rules attached to an operator are stored in the rules attribute. It is a dictionary whose keys can be AdditionOperator, CompositionOperator or MultiplicationOperator. For the non-commutative composition operation, rules are further split according to whether the operator is matched on the left or right hand side of the pair. Rules are added by using the set_rule method, which has 3 arguments: AdditionOperator, CompositionOperator, MultiplicationOperator) The predicate can be a function, in which case it must be a static method with two arguments for the pair of operators that matches the subject. Rules, such as the ones set in the superclass, can be deleted with the del_rule method. In the following example, we construct an operator Power that raises its input to a specified power. We add a rule to reduce the composition of two instances of the Power class. >>> @pyoperators.flags.square ... class Power(Operator): ... def __init__(self, exponent): ... Operator.__init__(self) ... self.exponent = exponent ... self.set_rule(('.', Power), self.rule_power, CompositionOperator) ... def direct(input, output): ... output[...] = input ** self.exponent ... @staticmethod ... def rule_power(p1, p2): ... return Power(p1.exponent * p2.exponent) ... >>> p1 = Power(2) >>> p2 = Power(3) >>> p = p1(p2) >>> type(p) __main__.Power >>> p.exponent 6 in-place operator: operator whose direct method can handle input and output arguments pointing to the same memory location. out-of-place operator: operator whose direct method cannot. Such a property is set by the inplace decorator. For instance, the operator DiagonalOperator is an in-place operator and the following computation >>> d = DiagonalOperator([1, 2]) >>> x = np.array([0, 1]) >>> d(x, x) array([0, 2]) is done in-place and is equivalent to: >>> x = np.array([0, 1]) >>> x *= [1, 2] An in-place operator also has to handle out-of-place operations. In the following example, the operation is performed out-of-place to satisfy the no-side-effect policy: >>> d = DiagonalOperator([1, 2]) >>> x = np.array([0, 1]) >>> y = d(x) >>> y array([0, 2]) which is equivalent to: >>> x = np.array([0, 1]) >>> y = x * [1, 2] The in-place property is useful to avoid intermediate variables. To minimise the memory-cache transfers during a composition, an algorithm has been put in place to determine the intermediate variables to be extracted from the memory manager, by maximising the temporal locality (i.e. by reusing as much as possible the same memory area). This algorithm depends on: As an example, let’s consider the composition of an in-place operator IN by an out-of-place operator OUT. The out-of-place composition >>> (IN * OUT)(x, out=y) requires an intermediate variable only if the size of OUT ‘s output is larger than that of the y variable. Otherwise, y’s buffer is used as output for the OUT operator and the application of the operator IN is performed in-place on y. Concerning the in-place composition: >>> (IN * OUT)(x, out=x) it is not possible to avoid the use of a temporary variable, since a buffer different from the x variable is required for the OUT operator. The application of the operator IN is performed out-of-place because its output is the x variable. Another possibility to avoid temporaries in the addition or element-wise multiplication of operators is to give an operator the possibility to directly update the output argument in the direct method. It is particularly interesting for sums of matrices with few non-zero rows, since it gives a mechanism to skip the zero ones. Let’s first see the default behaviour, in which the operands do not update their output argument: >>> (o1 + o2)(x, out=y) The following steps are performed: y is used as o1 ’s output,o2 ’s output,y buffer. If we now assume that the operator o2 can update its output argument, the intermediate variable is not required anymore: y is used as o1 ’s outputo2 ’s output argument, and is is updated in-place. One can enable such property by adding the flag update_output and the keyword operation to the operator’s direct method. >>> import operator >>> import pyoperators >>> from pyoperators import Operator, operation_assignment >>> pyoperators.memory.verbose=True >>> @pyoperators.flags.linear ... @pyoperators.flags.update_output ... class P(Operator): ... def __init__(self, index): ... self.index = index ... Operator.__init__(self, shapein=1, shapeout=11) ... def direct(self, input, output, operation=operation_assignment): ... if operation is operation_assignment: ... output[...] = 0 ... elif operation is not operator.iadd: ... raise NotImplementedError() ... output[self.index] += input ... >>> Q = sum(P(i) for i in [0, 5, 10]) >>> y = np.empty(11) >>> Q([2.], out=y) array([ 2., 0., 0., 0., 0., 2., 0., 0., 0., 0., 2.]) No intermediate variable is needed and all operations are performed in-place. The AdditionOperator Q calls the direct method of its first operand P with the operation keyword set to the function operation_assignment, which means that P ‘s output should be assigned to the output argument. In the subsequent calls, the operation keyword is set to the iadd function from Python’s standard operator module (If Q were a MultiplicationOperator, it would be imul), which results in adding the operand’s output to the output argument in-place. This example shows the interest of the flag update_output when only few rows are non-zero: the output only need to be updated for these rows, which avoids thrashing the cache with zeros everytime an operand is called. Currently, there are two ways to partition operators, depending on whether the partitioning is done in an already existing dimension or not. Stack partition: outputs are stacked along a new dimension Chunk partition: outputs are concatenated along an existing dimension A stack partition operator is an instance of BlockRowOperator initialised with the new_axisin keyword, BlockDiagonalOperator with the keywords new_axisin or new_axisout or BlockColumnOperator initialised with the new_axisout keyword. There is a strong constraint on the input and output shapes of the blocks, since they must be the same for all blocks. The partition along the specified axis is always explicit and is equal to a tuple of ones with as many elements as the number of blocks. In the following example is shown a stack partition block column operator: >>> C = BlockColumnOperator([I, 2*I, 3*I], new_axisout=0) >>> C(np.ones(2)) array([[ 1., 1.], [ 2., 2.], [ 3., 3.]]) The outputs of the blocks are stacked along the dimension specified by new_axisout keyword, i.e the first one. The following example shows a stack partition block diagonal operator: >>> D = BlockDiagonalOperator([I, 2*I, 3*I], new_axisin=-1) >>> x = np.arange(2*3).reshape((2,3)) >>> x array([[0, 1, 2], [3, 4, 5]]) >>> D(x) array([[ 0, 2, 6], [ 3, 8, 15]]) It can be seen that the first block I is applied over x[:,0], the second block 2*I over x[:,1], and the third block 3*I over x[:,2]. A chunk partition operator can be obtained as an instance of BlockRowOperator initialised with the axisin keyword, BlockDiagonalOperator with the keywords axisin or axisout or BlockColumnOperator initialised with the axisout keyword. There is a lesser constraint on the shapes of the partitioned input and output of the blocks: all dimensions must be same except along the partitioned dimension(s). The partition along the specified axis can be implicit or explicit, which is reflected in the keywords partitionin and partitionout. The following example shows a block diagonal chunk partition operator with an explicit partition >>> D = BlockDiagonalOperator([I, 2*I, 3*I], axisin=-1, partitionin=(2,3,2)) >>> D(np.ones(7)) array([ 1., 1., 2., 2., 2., 3., 3.]) It can be seen that the first block I is applied over the first two elements, the second block 2*I over the three following elements, and the third block 3*I over the last two elements. Block row and block column chunk partition operators can handle implicit partitions: >>> C = BlockColumnOperator([I, 2*I, 3*I], axisout=-1) >>> C(np.ones((2,2))) array([[ 1., 1., 2., 2., 3., 3.], [ 1., 1., 2., 2., 3., 3.]]) Likewise, the outputs of the blocks are concatenated along the dimension specified by the axisout keyword, i.e the last one in this case. More operators can be found in the project PySimulators, projection, discrete differences, compression, downsampling, MPI operators, etc. There is an on-going work consisting in migrating the most generic ones into the pyoperators package.
The Single UNIX ® Specification, Version 2 Copyright © 1997 The Open Group NAME copywin - copy a region of a window SYNOPSIS #include <curses.h> int copywin(const WINDOW *srcwin, WINDOW *dstwin, int sminrow, int smincol, int dminrow, int dmincol, int dmaxrow, int dmaxcol, int overlay); DESCRIPTION The copywin() function provides a finer granularity of control over the overlay() and overwrite() functions. As in the prefresh() function, a rectangle is specified in the destination window, (dminrow, dmincol) and (dmaxrow, dmaxcol), and the upper-left-corner coordinates of the source window, (sminrow, smincol). If overlay is TRUE, then copying is non-destructive, as in overlay(). If overlay is FALSE, then copying is destructive, as in overwrite(). RETURN VALUE Upon successful completion, copywin() returns OK. Otherwise, it returns ERR. ERRORS No errors are defined. SEE ALSO newpad(), overlay(), <curses.h>. UNIX ® is a registered Trademark of The Open Group. Copyright © 1997 The Open Group [ Main Index | XSH | XCU | XBD | XCURSES | XNS ]
I'm receiving intermittent blank pages on my appengine python website. Typically these come when a new process is started or when I flush the cache. There is a single white page served and once that has served everything is fine. It's basically the same error as here: However, I have double and triple checked that I have the correct code on my python file (the following is copied and pasted): def main(): run_wsgi_app(application) if __name__ == "__main__": main() Here is an example response in the logs that generated the blank page: 01-02 04:46AM 48.539 / 200 188ms 570cpu_ms 383api_cpu_ms 0kb Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.19 Safari/534.13,gzip(gfe),gzip(gfe),gzip(gfe) 86.164.42.252 - tjcritchlow [02/Jan/2011:04:46:48 -0800] "GET / HTTP/1.1" 200 124 - "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.19 Safari/534.13,gzip(gfe),gzip(gfe),gzip(gfe)" "www.7bks.com" ms=188 cpu_ms=570 api_cpu_ms=383 cpm_usd=0.016028 I 01-02 04:46AM 48.724 Saved; key: appstats:008500, part: 82 bytes, full: 92081 bytes, overhead: 0.001 + 0.005; link: http://www.7bks.com/stats/details?time=1293972408543 Any suggestions welcome on how I might debug further or solve this issue. I have a couple of different python files, here's the handlers from my app.yaml. But I've checked all of them to ensure they all have the correct if name code at the bottom. handlers: - url: /admin/.* script: admin.py login: admin - url: /googleanalytics/ script: googleanalytics.py login: admin - url: /cleanupsessions/ script: cleanupsessions.py login: admin - url: /robots.txt static_files: robots.txt upload: robots.txt - url: /favicon.ico static_files: images/favicon.ico upload: images/favicon.ico - url: /images static_dir: images - url: /css static_dir: css - url: /jquery static_dir: jquery - url: /.* script: 7books.py error_handlers: - file: customerror.html Could the issue be with one of the libraries I'm importing? Should I check all of them to ensure they all have the name code? Thanks Tom
I'm trying to make a POST request to retrieve information about a book. Here is the code that returns HTTP code: 302, Moved import httplib, urllib params = urllib.urlencode({ 'isbn' : '9780131185838', 'catalogId' : '10001', 'schoolStoreId' : '15828', 'search' : 'Search' }) headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"} conn = httplib.HTTPConnection("bkstr.com:80") conn.request("POST", "/webapp/wcs/stores/servlet/BuybackSearch", params, headers) response = conn.getresponse() print response.status, response.reason data = response.read() conn.close() When I try from a browser, from this page: http://www.bkstr.com/webapp/wcs/stores/servlet/BuybackMaterialsView?langId=-1&catalogId=10001&storeId=10051&schoolStoreId=15828 , it works. What am I missing in my code? EDIT: Here's what I get when I call print response.msg 302 Moved Date: Tue, 07 Sep 2010 16:54:29 GMT Vary: Host,Accept-Encoding,User-Agent Location: http://www.bkstr.com/webapp/wcs/stores/servlet/BuybackSearch X-UA-Compatible: IE=EmulateIE7 Content-Length: 0 Content-Type: text/plain; charset=utf-8 Seems that the location points to the same url I'm trying to access in the first place? EDIT2: I've tried using urllib2 as suggested here. Here is the code: import urllib, urllib2 url = 'http://www.bkstr.com/webapp/wcs/stores/servlet/BuybackSearch' values = {'isbn' : '9780131185838', 'catalogId' : '10001', 'schoolStoreId' : '15828', 'search' : 'Search' } data = urllib.urlencode(values) req = urllib2.Request(url, data) response = urllib2.urlopen(req) print response.geturl() print response.info() the_page = response.read() print the_page And here is the output: http://www.bkstr.com/webapp/wcs/stores/servlet/BuybackSearch Date: Tue, 07 Sep 2010 16:58:35 GMT Pragma: No-cache Cache-Control: no-cache Expires: Thu, 01 Jan 1970 00:00:00 GMT Set-Cookie: JSESSIONID=0001REjqgX2axkzlR6SvIJlgJkt:1311s25dm; Path=/ Vary: Accept-Encoding,User-Agent X-UA-Compatible: IE=EmulateIE7 Content-Length: 0 Connection: close Content-Type: text/html; charset=utf-8 Content-Language: en-US Set-Cookie: TSde3575=225ec58bcb0fdddfad7332c2816f1f152224db2f71e1b0474c866f3b; Path=/
toto2849 Connexion VPN automatique (NetworkManager) Bonjour,:D -Actuellement en stage il met demandé de mettre en place une connexion VPN qui se lance automatiquement au démarrage du pc ne laissant juste à l'utilisateur une boite de dialogue demandant login+pass.:rolleyes: -J'ai déjà installé le plugin "network-manager-pptp" tout est ok la connexion fonctionne ! Cependant j'aimerais que celle-ci se lance d'elle même au démarrage du système malheureusement j'ai beau cocher la case "connexion automatique" rien n'y fait ! Bref je me penche actuellement sur une solution qui puisse en ligne de commande lancer la connexion VPN ! -Le must serait que l'on affiche une boite de dialogue au démarrage demandant les login (user+pass) puis de lancer la connexion dans la foulé. Si la demande des identifiants se fait par l'intermédiaire d'un script shell en console cela me conviendra amplement. Voila voila merci à ceux qui auront le courage de lire ce message jusqu'à la fin et merci à ceux qui pourrons m'éclairer ! (n'hésiter pas à me demander plus de précision si besoin...) Cordialement, Anthony Hors ligne toto2849 Re : Connexion VPN automatique (NetworkManager) UP personne ? J'ai vu ceci sur le site suivant... mais RIEN ! Je ne sais pas comment activer automatiquement cette connexion.... Quelques infos peu utile ... http://projects.gnome.org/NetworkManage … Connection Dernière modification par toto2849 (Le 04/06/2009, à 22:19) Hors ligne toto2849 Re : Connexion VPN automatique (NetworkManager) Décidément personne peut m'aider ?? Hors ligne toto2849 Re : Connexion VPN automatique (NetworkManager) hop un UP pour la route....:rolleyes: Hors ligne killman Re : Connexion VPN automatique (NetworkManager) je n' ai pas eu le temps de me pencher sur la question, mais tu devrais essayer de rédiger un script qui lance la connexion cliente sur le vpn, si tu veux automatiser ça. Hors ligne toto2849 Re : Connexion VPN automatique (NetworkManager) -Tout d'abord merci d'avoir répondu à mon problème car j'ai cru que tout le monde s'en foutait... -Donc pour te répondre, oui en effet c'est ce que j'ai pensé aussi; d'ailleurs j'en ai commencé un morceau mais je ne connait pas les raccourcis me permettant de lancer en shell la connexion vpn de NetworkManager... Bien entendu si quelqu'un peut me proposer une méthode alternative qui ne passerait pas par ce gestionnaire cela me conviendra aussi ! Voici pour le moment ce que j'ai pu faire....mais je connais pas grand chose en script: #!/bin/sh #saisie des login echo "Saisir nom d'utilisateur : " read utilisateur echo "Saisir mot de passe : " read -s passe #lancement de la connexion vpn org.freedesktop.NetworkManager.ppp_starter ...mais cela est faux... Comme dit plus haut j'ai trouvé sur leur site 2/3 truc comme "org.freedesktop.NetworkManager.VPN.Connection" mais es applicable pour lancer une connexion par la console ? j'ai tenté mais rien...peut être un soucis de syntaxe ? Hors ligne coredump Re : Connexion VPN automatique (NetworkManager) Moi, j'ai une solution avec NetworkManager. Il s'agit d'un service qui utilise d-bus pour piloter le NetworkManager. Le principe est assez simple, mais le script est assez complexe. ... Suite aux différents échanges qui ont été fait sur le sujet, j'utilise ce post, pour proposer une solution simple et accessible aux néophytes. Cette solution permet de lancer automatiquement une connexion vpn et qu'elle se reconnecte automatiquement en cas de coupure. Remarques : - Cette solution n'est pas forcément compatible avec d'ancienne version d'ubuntu. - Pour une utilisation avancée, voir les autres posts de cette discussion, en particulier le post #28 (http://forum.ubuntu-fr.org/viewtopic.ph … 7#p3225667) de Papou qui propose des scripts qui permettent de lancer la connexion automatique "à la demande". - Pour ceux qui auraient déjà mis en place le script connection-service, celui-ci a un peu évolué, il faut donc le mettre à jour. Voici, la démarche, en deux étapes, à suivre : - ajouter ce script exécutable 02-loop-vpn dans /etc/NetworkManager/dispatcher.d/ en ayant pris soin de renseigner le nom de la connexion vpn /etc/NetworkManager/dispatcher.d/02-loop-vpn : #!/bin/bash # Script de lancement de connection VPN avec verification CONNECTION_NAME="Connexion VPN 1" while ! connection-service "$CONNECTION_NAME" status do connection-service "$CONNECTION_NAME" start sleep 30 done Soit, en ligne de commande : sudo gedit /etc/NetworkManager/dispatcher.d/02-loop-vpn copier/coller le code ci-dessus dans le fichier, et renseigner le champ CONNECTION_NAME avec le nom utilisé par le Network Manager pour votre connexion sudo chmod +x /etc/NetworkManager/dispatcher.d/02-loop-vpn - ajouter ce script exécutable connection-service dans /usr/local/bin/ /usr/local/bin/connection-service : #!/bin/bash ############ # SETTINGS # ############ get_connections_paths() { dbus-send --system --print-reply --dest="$1" "/org/freedesktop/NetworkManagerSettings" "org.freedesktop.NetworkManagerSettings.ListConnections" \ | grep "object path" | cut -d '"' -f2 } get_connection_settings() { dbus-send --system --print-reply --dest="$1" "$2" org.freedesktop.NetworkManagerSettings.Connection.GetSettings } get_connection_string_setting() { echo "$1" | grep -A 1 \""$2"\" | grep variant | cut -d '"' -f2 } get_connection_id() { get_connection_string_setting "$1" "id" } get_connection_type() { get_connection_string_setting "$1" "type" } get_device_type_by_connection_type() { echo "$1" | grep -q "ethernet" && echo 1 && return echo "$1" | grep -q "wireless" && echo 2 && return echo 0 } find_connection_path() { for connection_path in `get_connections_paths "$1"` do connection_settings=`get_connection_settings "$1" "$connection_path"` connection_settings_id=`get_connection_id "$connection_settings"` [ "$connection_settings_id" = "$2" ] && echo "$1" "$connection_path" done } find_connection_path_everywhere() { find_connection_path "org.freedesktop.NetworkManagerSystemSettings" "$1" find_connection_path "org.freedesktop.NetworkManagerUserSettings" "$1" } print_connections_ids() { for connection_path in `get_connections_paths "$1"` do connection_settings=`get_connection_settings "$1" "$connection_path"` connection_settings_id=`get_connection_id "$connection_settings"` echo "$connection_settings_id" done } print_connections_ids_everywhere() { print_connections_ids "org.freedesktop.NetworkManagerSystemSettings" print_connections_ids "org.freedesktop.NetworkManagerUserSettings" } ########### # DEVICES # ########### get_devices_paths() { dbus-send --system --print-reply --dest="org.freedesktop.NetworkManager" "/org/freedesktop/NetworkManager" "org.freedesktop.NetworkManager.GetDevices" \ | grep "object path" | cut -d '"' -f2 } get_device_property() { dbus-send --system --print-reply --dest="org.freedesktop.NetworkManager" "$1" "org.freedesktop.DBus.Properties.Get" string:"org.freedesktop.NetworkManager.Device" string:"$2" \ | grep variant | awk '{print $3}' } get_device_type() { get_device_property "$1" "DeviceType" } get_device_path_by_device_type() { device_path_by_device_type="/" for device_path in `get_devices_paths` do device_type=`get_device_type "$device_path"` [ "$device_type" = "$1" ] && device_path_by_device_type="$device_path" done echo "$device_path_by_device_type" } ####################### # ACTIVES CONNECTIONS # ####################### get_actives_connections_paths() { dbus-send --system --print-reply --dest="org.freedesktop.NetworkManager" "/org/freedesktop/NetworkManager" "org.freedesktop.DBus.Properties.Get" string:"org.freedesktop.NetworkManager" string:"ActiveConnections" \ | grep "object path" | cut -d '"' -f2 } get_last_active_connection_path() { get_actives_connections_paths | tail -n 1 } get_parent_connection_path_by_device_type() { parent_connection_path="/" [ "$1" = 0 ] && parent_connection_path=`get_last_active_connection_path` echo "$parent_connection_path" } get_active_connection_property() { dbus-send --system --print-reply --dest="org.freedesktop.NetworkManager" "$1" "org.freedesktop.DBus.Properties.Get" string:"org.freedesktop.NetworkManager.Connection.Active" string:"$2" \ | grep variant | awk -F '"' '{print $2}' } get_active_connection_service() { get_active_connection_property "$1" "ServiceName" } get_active_connection_path() { get_active_connection_property "$1" "Connection" } get_active_connection_path_by_connection_path() { for active_connection_path in `get_actives_connections_paths` do service=`get_active_connection_service $active_connection_path` path=`get_active_connection_path $active_connection_path` [ "$service" = "$1" ] && [ "$path" = "$2" ] && echo "$active_connection_path" done } print_actives_connections_ids() { for active_connection_path in `get_actives_connections_paths` do service=`get_active_connection_service $active_connection_path` path=`get_active_connection_path $active_connection_path` connection_settings=`get_connection_settings "$service" "$path"` connection_settings_id=`get_connection_id "$connection_settings"` echo "$connection_settings_id" done } ############## # START/STOP # ############## start_connection() { my_connection_complete_path=`find_connection_path_everywhere "$1"` my_connection_settings=`get_connection_settings $my_connection_complete_path` my_connection_type=`get_connection_type "$my_connection_settings"` my_connection_device_type=`get_device_type_by_connection_type "$my_connection_type"` my_connection_service=`echo $my_connection_complete_path | awk '{print $1}'` my_connection_path=`echo $my_connection_complete_path | awk '{print $2}'` my_connection_device_path=`get_device_path_by_device_type "$my_connection_device_type"` my_parent_connection_path=`get_parent_connection_path_by_device_type "$my_connection_device_type"` echo "connection_service=$my_connection_service" echo "connection_path=$my_connection_path" echo "connection_device_path=$my_connection_device_path" echo "parent_connection_path=$my_parent_connection_path" dbus-send --system --print-reply --dest="org.freedesktop.NetworkManager" /org/freedesktop/NetworkManager "org.freedesktop.NetworkManager.ActivateConnection" string:"$my_connection_service" objpath:"$my_connection_path" objpath:"$my_connection_device_path" objpath:"$my_parent_connection_path" } stop_connection() { my_connection_complete_path=`find_connection_path_everywhere "$1"` my_active_connection_path=`get_active_connection_path_by_connection_path $my_connection_complete_path` echo "active_connection_path=$my_active_connection_path" dbus-send --system --print-reply --dest="org.freedesktop.NetworkManager" /org/freedesktop/NetworkManager "org.freedesktop.NetworkManager.DeactivateConnection" objpath:"$my_active_connection_path" } ######## # MAIN # ######## invalid_arguments() { echo "Usage: `basename "$0"` connexion_name start|stop" echo "Connexion disponibles:" print_connections_ids_everywhere echo "Connexion actives:" print_actives_connections_ids exit 0 } [ "$#" != 2 ] && invalid_arguments case "$2" in "start") start_connection "$1" ;; "stop") stop_connection "$1" ;; "restart") stop_connection "$1" start_connection "$1" ;; "status") if print_actives_connections_ids | grep -q -x "$1" then echo "Connexion \"$1\" active" exit 0 else echo "Connexion \"$1\" inactive" exit 1 fi ;; *) invalid_arguments ;; esac Soit, en ligne de commande : sudo gedit /usr/local/bin/connection-service copier/coller le code ci-dessus dans le fichier, puis sudo chmod +x /usr/local/bin/connection-service Enjoy it! Dernière modification par coredump (Le 30/01/2010, à 11:21) Hors ligne legrosschmoll Re : Connexion VPN automatique (NetworkManager) Ca n'a pas l'air d 'intéresser toto2849 mais moi oui. Si tu pouvais poster ta solution Merci d'avance Precise sur W500 Hors ligne coredump Re : Connexion VPN automatique (NetworkManager) Désolé, si je réponds un peu tard, ça fait un moment que je consulte pas le forum. Voici ma solution, si ça t'intéresse toujours ou si ça intéresse quelqu'un d'autre. L'origine du problème a deux sources par rapport au VPN avec Network Manager : - quand on coche "Connecter automatiquement", ceci n'est pas pris en compte - quand on coche "Disponible pour tous les utilisateurs", la connexion ne marche pas (à cause d'une histoire de clé quand on regarde les logs) (En général, je trouve que le Network Manager a de légers disfonctionnements qui démontrent à mon avis quelques problèmes de conceptions ...) Les scripts que je propose tente de remédier à cela. Leur but est de connecter automatiquement le VPN et de le reconnecter automatiquement si il se déconnecte. (Avec de légères modifications, ça pourrait se comporter différemment selon le besoin.) Le principe est de se lancer et de s'arrêter en fonction d'une autre connexion parente (eth0 par exemple). Puis de surveiller d-bus pour voir : - si les paramètres de la connexion VPN sont disponibles (quand l'utilisateur se connecte) - si la connexion VPN ne s'est pas déconnectée Il y a deux scripts : 02-service-vpn -> à mettre dans /etc/NetworkManager/dispatcher.d/ (lance ou arrête "checkVPN" en fonction de la connexion parente) checkVPN -> à mettre dans /usr/local/bin/ (script python qui surveille que la connexion VPN soit toujours bien connecter) Tous deux doivent être exécutables. checkVPN dépend d'un autre fichier : nm_utils.py -> à mettre dans /usr/local/bin/ (outils pour piloter le nm en python) Il y a deux champs à renseigner dans le script 02-service-vpn : ETH_INTERFACE_NAME : l'interface de la connexion parente (par exemple : eth0) VPN_CONNECTION_NAME : le nom de la connexion VPN qui a été renseigner dans le network manager (Pour que ça marche pour plusieurs utilisateurs, il faut pour chacun définir la connexion VPN.) (Il y a aussi la possibilité de décommenter des lignes pour avoir des traces.) /etc/NetworkManager/dispatcher.d/02-service-vpn : #!/bin/bash ETH_INTERFACE_NAME="eth0" # The name of the VPN connection to activate VPN_CONNECTION_NAME="Connexion VPN" # Or the uuid of the VPN connection to activate #VPN_CONNECTION_NAME="UUID=abcf8915-bb27-4634-94e6-04c65261a73e" SERVICE_VPN_LOG="/dev/null" #SERVICE_VPN_LOG="/tmp/serviceVPN.log" CHECK_VPN_LOG="/dev/null" #CHECK_VPN_LOG="/tmp/checkVPN.log" echo `date`": $0 $@" >> "$SERVICE_VPN_LOG" [ "$1" != "$ETH_INTERFACE_NAME" ] && exit 0 case "$2" in "up") echo "Starting CheckVPN ... ("`which checkVPN`")" >> "$SERVICE_VPN_LOG" # On vérifie que checkVPN ne tourne pas déjà if [ "`pgrep checkVPN`" == "" ] then echo "CheckVPN started." >> "$SERVICE_VPN_LOG" checkVPN "$VPN_CONNECTION_NAME" > "$CHECK_VPN_LOG" 2>&1 & else echo "CheckVPN already running !" >> "$SERVICE_VPN_LOG" fi ;; "down") echo "Stoping checkVPN ..." >> "$SERVICE_VPN_LOG" if [ "`pgrep checkVPN`" == "" ] then echo "CheckVPN not running !" >> "$SERVICE_VPN_LOG" else pkill checkVPN echo "CheckVPN stopped." >> "$SERVICE_VPN_LOG" fi ;; *) echo "Nothing to do" >> "$SERVICE_VPN_LOG" ;; esac /usr/local/bin/checkVPN : #!/usr/bin/python -u # coding=UTF-8 import sys import dbus from dbus.mainloop.glib import DBusGMainLoop import gobject from nm_utils import * VPN_CONNECTION_UUID = VPN_CONNECTION_NAME = None # Default values : # The name of the VPN connection to activate #VPN_CONNECTION_NAME = "Connexion VPN 1" # or the uuid of the VPN connection to activate #VPN_CONNECTION_UUID = "fabf8915-bb27-4634-94e6-04c65261a73e" if len(sys.argv) > 1: vpn_argument = sys.argv[1] VPN_CONNECTION_UUID = VPN_CONNECTION_NAME = None if (vpn_argument[:5] == "UUID="): VPN_CONNECTION_UUID = vpn_argument[5:] print "VPN_CONNECTION_UUID=%s" % (VPN_CONNECTION_UUID) else: VPN_CONNECTION_NAME = vpn_argument print "VPN_CONNECTION_NAME=%s" % (VPN_CONNECTION_NAME) if (not VPN_CONNECTION_UUID) and (not VPN_CONNECTION_NAME): print "Usage : "+sys.argv[0]+" connexion_vpn_name" print "Liste des connexions diponibles :" print_connections_infos() sys.exit(1) vpn_connection_infos = {'vpn_connection_settings': None, 'vpn_connection_path': None} def get_vpn_connection_settings(): if VPN_CONNECTION_UUID: return get_connection_settings_by_uuid(VPN_CONNECTION_UUID) if VPN_CONNECTION_NAME: return get_connection_settings_by_name(VPN_CONNECTION_NAME) return None def get_vpn_connection_path(): if VPN_CONNECTION_UUID: return get_active_connection_path_by_uuid(VPN_CONNECTION_UUID) if VPN_CONNECTION_NAME: return get_active_connection_path_by_name(VPN_CONNECTION_NAME) return None def get_eth_active_connection_path(): try: active_connections_paths = get_active_connections_paths() active_connections_paths_count = len(active_connections_paths) if active_connections_paths_count == 0: return None return active_connections_paths[active_connections_paths_count-1] except Exception as exception: print exception return None def update_vpn_connection_settings(): vpn_connection_settings = get_vpn_connection_settings() vpn_connection_infos['vpn_connection_settings'] = vpn_connection_settings print "update_vpn_connection_settings=%s" % get_connection_infos(vpn_connection_settings) def update_vpn_connection_path(): vpn_connection_path = get_vpn_connection_path() vpn_connection_infos['vpn_connection_path'] = vpn_connection_path print "update_vpn_connection_path: vpn_connection_path=%s" % vpn_connection_path def start_connection(settings, active_connection_path): def reply_handler(opath): vpn_connection_path = opath vpn_connection_infos['vpn_connection_path'] = vpn_connection_path def error_handler(*args): print args proxy = dbus.SystemBus().get_object('org.freedesktop.NetworkManager', '/org/freedesktop/NetworkManager') iface = dbus.Interface(proxy, dbus_interface='org.freedesktop.NetworkManager') iface.ActivateConnection(get_connection_settings_type(settings), get_connection_settings_path(settings), dbus.ObjectPath("/"), active_connection_path, reply_handler=reply_handler, error_handler=error_handler) def checkVPN(): vpn_connection_settings = vpn_connection_infos['vpn_connection_settings'] # Do nothing if vpn connection settings don't exist print "checkVPN: vpn_connection_settings=%s" % get_connection_infos(vpn_connection_settings) if not vpn_connection_settings: return vpn_connection_path = vpn_connection_infos['vpn_connection_path'] # Do nothing if vpn connection already exist print "checkVPN: vpn_connection_path=%s" % vpn_connection_path if vpn_connection_path: return eth_connection_path = get_eth_active_connection_path() print "checkVPN: eth_connection_path=%s" % eth_connection_path # Do nothing if eth connection don't exist if not eth_connection_path: return start_connection(vpn_connection_settings, eth_connection_path) def handle_connection_settings_changed(): print "handle_connection_settings_changed" update_vpn_connection_settings() checkVPN() def handle_connections_changed(): print "handle_connections_changed" update_vpn_connection_path() checkVPN() def all_signal_receiver(*args, **kwargs): if (kwargs['dbus_interface'] == "org.freedesktop.NetworkManagerSettings"): handle_connection_settings_changed() if (kwargs['dbus_interface'] == "org.freedesktop.NetworkManagerSettings.Connection")\ and (kwargs['signal_name'] == "Updated"): handle_connection_settings_changed() if (kwargs['dbus_interface'] == "org.freedesktop.NetworkManager")\ and (kwargs['signal_name'] == "PropertiesChanged")\ and args[0].has_key('ActiveConnections'): handle_connections_changed() DBusGMainLoop(set_as_default=True) dbus.SystemBus().add_signal_receiver(all_signal_receiver, interface_keyword='dbus_interface', member_keyword='signal_name') update_vpn_connection_settings() update_vpn_connection_path() checkVPN() gobject.MainLoop().run() /usr/local/bin/nm_utils.py : # coding=UTF-8 import dbus # Get connection settings from settings type and settings path # (settings type is 'org.freedesktop.NetworkManagerSystemSettings' or 'org.freedesktop.NetworkManagerUserSettings') def get_connection_settings(settings_type, settings_path): proxy = dbus.SystemBus().get_object(settings_type, settings_path) iface = dbus.Interface(proxy, dbus_interface='org.freedesktop.NetworkManagerSettings.Connection') settings = iface.GetSettings() settings['settings-type'] = settings_type settings['settings-path'] = settings_path return settings # Get differents informations from connection settings def get_connection_settings_type(settings): return settings['settings-type'] def get_connection_settings_path(settings): return settings['settings-path'] def get_connection_settings_uuid(settings): return settings['connection']['uuid'] def get_connection_settings_name(settings): return settings['connection']['id'] # Get all settings paths associated to a type of settings def get_connections_settings_paths(settings_type): try: proxy = dbus.SystemBus().get_object(settings_type, '/org/freedesktop/NetworkManagerSettings') iface = dbus.Interface(proxy, dbus_interface='org.freedesktop.NetworkManagerSettings') return iface.ListConnections() except Exception as exception: print " !!! get_connections_settings_paths:" print exception return [] # Get all connections settings associated to a type of settings def get_connections_settings_by_type(settings_type): connections_settings = [] for settings_path in get_connections_settings_paths(settings_type): connection_settings = get_connection_settings(settings_type, settings_path) connections_settings.append(connection_settings) return connections_settings # Get all connections settings def get_all_connections_settings(): return get_connections_settings_by_type('org.freedesktop.NetworkManagerUserSettings') + \ get_connections_settings_by_type('org.freedesktop.NetworkManagerSystemSettings') # Get the esssential informations of the connection def get_connection_infos(settings): if settings: conn = settings['connection'] return "%s (type=%s, UUID=%s)" % (conn['id'], conn['type'], conn['uuid']) else: return None # Print the list of connections def print_connections_infos(): for connection_settings in get_all_connections_settings(): print get_connection_infos(connection_settings) # Get connection settings from the settings UUID def get_connection_settings_by_uuid(uuid): try: for settings in get_all_connections_settings(): if get_connection_settings_uuid(settings) == uuid: return settings except Exception as exception: print exception return None # Get connection settings from the settings UUID def get_connection_settings_by_name(name): try: for settings in get_all_connections_settings(): if get_connection_settings_name(settings) == name: return settings except Exception as exception: print exception return None # Get list of active connections paths def get_active_connections_paths(): proxy = dbus.SystemBus().get_object('org.freedesktop.NetworkManager', '/org/freedesktop/NetworkManager') iface = dbus.Interface(proxy, dbus_interface='org.freedesktop.DBus.Properties') return iface.Get('org.freedesktop.NetworkManager', 'ActiveConnections') # Get connection settings from the connection path def get_connection_settings_by_connection_path(connection_path): proxy = dbus.SystemBus().get_object('org.freedesktop.NetworkManager', connection_path) iface = dbus.Interface(proxy, dbus_interface='org.freedesktop.DBus.Properties') settings_type = iface.Get('org.freedesktop.NetworkManager.Connection.Active', 'ServiceName') settings_path = iface.Get('org.freedesktop.NetworkManager.Connection.Active', 'Connection') return get_connection_settings(settings_type, settings_path) # Get list of active connections paths def get_active_connections_settings(): actives_connections_settings = [] for connection_path in get_active_connections_paths(): settings = get_connection_settings_by_connection_path(connection_path) settings['connection-path'] = connection_path actives_connections_settings.append(settings) return actives_connections_settings # Look for connexion path in active connexions with particular uuid def get_active_connection_path_by_uuid(uuid): try: for connection_path in get_active_connections_paths(): settings = get_connection_settings_by_connection_path(connection_path) if get_connection_settings_uuid(settings) == uuid: return connection_path except Exception as exception: print exception return None # Look for connexion path in active connexions with particular name def get_active_connection_path_by_name(name): try: for connection_path in get_active_connections_paths(): settings = get_connection_settings_by_connection_path(connection_path) if get_connection_settings_name(settings) == name: return connection_path except Exception as exception: print exception return None Voilà, ça peut certainement être amélioré ou généralisé, mais j'espère au moins que ça pourra te dépanner. Hors ligne ElFabio Re : Connexion VPN automatique (NetworkManager) En tout cas, ca en dépanne d'autres ! merci bp pour ce boulot coredump, ca marche nickel, et c'est en plus trés propre, pas grand chose à redire, si ce n'est merci ! Hors ligne coredump Re : Connexion VPN automatique (NetworkManager) Merci ElFabio pour ton soutien et content que ça puisse te servir. Une solution plus propre serait de trouver une correction au niveau du network manager ou de l'applet (est-ce que l'applet KDE marche ?). Si j'avais un peu de temps et plus d'expérience dans la rédaction d'articles, j'aurais bien fait un tuto sur l'utilisation de d-bus pour contrôler le NM, car la documentation à ce sujet est assez maigre sur le net. Voici quand même quelques liens qui m'ont été utiles : - http://doc.ubuntu-fr.org/networkmanager … e_commande - http://doc.ubuntu-fr.org/dbus - http://old.nabble.com/dbus-and-OpenVPN- … 05375.html - http://projects.gnome.org/NetworkManage … /spec.html - http://linuxfr.org/forums/15/27406.html - http://dbus.freedesktop.org/doc/dbus-py … orial.html Je recommande aussi l'interface graphique d-feet pour s'exercer à utiliser d-bus (pratique aussi pour retrouver les API des services). (Quand on veut être "listener" - à l'écoute - d'un service, la commande dbus-monitor aussi est pratique.) Et pour ceux qui voudraient piloter NM à l'aide de d-bus, voici quelques exemples de lignes de commande qui permettent de le faire : Afficher la liste des chemins des définitions des connexion selon leurs appartenances (système - disponibles pour tous les utilisateurs / personnel - propre à l'utilisateur) : dbus-send --system --print-reply --dest=org.freedesktop.NetworkManagerSystemSettings "/org/freedesktop/NetworkManagerSettings" \ org.freedesktop.NetworkManagerSettings.ListConnections (ou --dest="org.freedesktop.NetworkManagerUserSettings") Afficher les paramètres d'une connexion selon son appartenance et connaissant son chemin dbus-send --system --print-reply --dest=org.freedesktop.NetworkManagerSystemSettings "/org/freedesktop/NetworkManagerSettings/0" \ org.freedesktop.NetworkManagerSettings.Connection.GetSettings (ou --dest="org.freedesktop.NetworkManagerUserSettings") Afficher la liste des chemins des connexions actives : dbus-send --system --print-reply --dest=org.freedesktop.NetworkManager "/org/freedesktop/NetworkManager" \ org.freedesktop.DBus.Properties.Get string:"org.freedesktop.NetworkManager" string:"ActiveConnections" Afficher une propriété d'une connexion active connaissant son chemin : dbus-send --system --print-reply --dest=org.freedesktop.NetworkManager "/org/freedesktop/NetworkManager/ActiveConnection/2" \ org.freedesktop.DBus.Properties.Get string:"org.freedesktop.NetworkManager.Connection.Active" string:"nom_de_la_propriete" "nom_de_la_propriete" est à renseigner. Par exemple, "nom_de_la_propriete" peut-être "ServiceName" ou "Connection" pour connaître l'appartenance et le chemin de la définition de la connexion. Arrêter une connection active connaissant son chemin : dbus-send --system --print-reply --dest=org.freedesktop.NetworkManager "/org/freedesktop/NetworkManager" \ org.freedesktop.NetworkManager.DeactivateConnection objpath:"/org/freedesktop/NetworkManager/ActiveConnection/2" Démarrer une connexion : dbus-send --system --print-reply --dest=org.freedesktop.NetworkManager "/org/freedesktop/NetworkManager" \ org.freedesktop.NetworkManager.ActivateConnection string:"org.freedesktop.NetworkManagerSystemSettings" objpath:"/org/freedesktop/NetworkManagerSettings/0" objpath:"/org/freedesktop/NetworkManager/Devices/0" objpath:"/" ou dans le cas d'une connection vpn : dbus-send --system --print-reply --dest=org.freedesktop.NetworkManager "/org/freedesktop/NetworkManager" \ org.freedesktop.NetworkManager.ActivateConnection string:"org.freedesktop.NetworkManagerUserSettings" objpath:"/org/freedesktop/NetworkManagerSettings/1" objpath:"/" objpath:"/org/freedesktop/NetworkManager/ActiveConnection/2" Le problème de cette commande est qu'il faut passer en argument de la commande le chemin du device (ethN ou rien pour le vpn) et le chemin de la connexion active dont elle dépend (si elle n'utilise pas de device). Malheureusement ces informations ne sont pas évidentes à déduire. Pour terminer voici un exemple de script utilisant ces commandes et permettant de démarrer ou d'arrêter une connexion en lui passant le nom de la connexion. Pour l'utiliser, nommer le fichier "connection-service", le rendre exécutable et placer le dans "/usr/local/bin/", puis taper : connection-service "nom_de_la_connexion" start|stop Par exemple : connection-service "Connection VPN 1" start /usr/local/bin/connection-service : #!/bin/bash ############ # SETTINGS # ############ get_connections_paths() { dbus-send --system --print-reply --dest="$1" "/org/freedesktop/NetworkManagerSettings" "org.freedesktop.NetworkManagerSettings.ListConnections" \ | grep "object path" | cut -d '"' -f2 } get_connection_settings() { dbus-send --system --print-reply --dest="$1" "$2" org.freedesktop.NetworkManagerSettings.Connection.GetSettings } get_connection_string_setting() { echo "$1" | grep -A 1 \""$2"\" | grep variant | cut -d '"' -f2 } get_connection_id() { get_connection_string_setting "$1" "id" } get_connection_type() { get_connection_string_setting "$1" "type" } get_device_type_by_connection_type() { echo "$1" | grep -q "ethernet" && echo 1 && return echo "$1" | grep -q "wireless" && echo 2 && return echo 0 } find_connection_path() { for connection_path in `get_connections_paths "$1"` do connection_settings=`get_connection_settings "$1" "$connection_path"` connection_settings_id=`get_connection_id "$connection_settings"` [ "$connection_settings_id" = "$2" ] && echo "$1" "$connection_path" done } find_connection_path_everywhere() { find_connection_path "org.freedesktop.NetworkManagerSystemSettings" "$1" find_connection_path "org.freedesktop.NetworkManagerUserSettings" "$1" } print_connections_ids() { for connection_path in `get_connections_paths "$1"` do connection_settings=`get_connection_settings "$1" "$connection_path"` connection_settings_id=`get_connection_id "$connection_settings"` echo "$connection_settings_id" done } print_connections_ids_everywhere() { print_connections_ids "org.freedesktop.NetworkManagerSystemSettings" print_connections_ids "org.freedesktop.NetworkManagerUserSettings" } ########### # DEVICES # ########### get_devices_paths() { dbus-send --system --print-reply --dest="org.freedesktop.NetworkManager" "/org/freedesktop/NetworkManager" "org.freedesktop.NetworkManager.GetDevices" \ | grep "object path" | cut -d '"' -f2 } get_device_property() { dbus-send --system --print-reply --dest="org.freedesktop.NetworkManager" "$1" "org.freedesktop.DBus.Properties.Get" string:"org.freedesktop.NetworkManager.Device" string:"$2" \ | grep variant | awk '{print $3}' } get_device_type() { get_device_property "$1" "DeviceType" } get_device_path_by_device_type() { device_path_by_device_type="/" for device_path in `get_devices_paths` do device_type=`get_device_type "$device_path"` [ "$device_type" = "$1" ] && device_path_by_device_type="$device_path" done echo "$device_path_by_device_type" } ####################### # ACTIVES CONNECTIONS # ####################### get_actives_connections_paths() { dbus-send --system --print-reply --dest="org.freedesktop.NetworkManager" "/org/freedesktop/NetworkManager" "org.freedesktop.DBus.Properties.Get" string:"org.freedesktop.NetworkManager" string:"ActiveConnections" \ | grep "object path" | cut -d '"' -f2 } get_last_active_connection_path() { get_actives_connections_paths | tail -n 1 } get_parent_connection_path_by_device_type() { parent_connection_path="/" [ "$1" = 0 ] && parent_connection_path=`get_last_active_connection_path` echo "$parent_connection_path" } get_active_connection_property() { dbus-send --system --print-reply --dest="org.freedesktop.NetworkManager" "$1" "org.freedesktop.DBus.Properties.Get" string:"org.freedesktop.NetworkManager.Connection.Active" string:"$2" \ | grep variant | awk -F '"' '{print $2}' } get_active_connection_service() { get_active_connection_property "$1" "ServiceName" } get_active_connection_path() { get_active_connection_property "$1" "Connection" } get_active_connection_path_by_connection_path() { for active_connection_path in `get_actives_connections_paths` do service=`get_active_connection_service $active_connection_path` path=`get_active_connection_path $active_connection_path` [ "$service" = "$1" ] && [ "$path" = "$2" ] && echo "$active_connection_path" done } print_actives_connections_ids() { for active_connection_path in `get_actives_connections_paths` do service=`get_active_connection_service $active_connection_path` path=`get_active_connection_path $active_connection_path` connection_settings=`get_connection_settings "$service" "$path"` connection_settings_id=`get_connection_id "$connection_settings"` echo "$connection_settings_id" done } ############## # START/STOP # ############## start_connection() { my_connection_complete_path=`find_connection_path_everywhere "$1"` my_connection_settings=`get_connection_settings $my_connection_complete_path` my_connection_type=`get_connection_type "$my_connection_settings"` my_connection_device_type=`get_device_type_by_connection_type "$my_connection_type"` my_connection_service=`echo $my_connection_complete_path | awk '{print $1}'` my_connection_path=`echo $my_connection_complete_path | awk '{print $2}'` my_connection_device_path=`get_device_path_by_device_type "$my_connection_device_type"` my_parent_connection_path=`get_parent_connection_path_by_device_type "$my_connection_device_type"` echo "connection_service=$my_connection_service" echo "connection_path=$my_connection_path" echo "connection_device_path=$my_connection_device_path" echo "parent_connection_path=$my_parent_connection_path" dbus-send --system --print-reply --dest="org.freedesktop.NetworkManager" /org/freedesktop/NetworkManager "org.freedesktop.NetworkManager.ActivateConnection" string:"$my_connection_service" objpath:"$my_connection_path" objpath:"$my_connection_device_path" objpath:"$my_parent_connection_path" } stop_connection() { my_connection_complete_path=`find_connection_path_everywhere "$1"` my_active_connection_path=`get_active_connection_path_by_connection_path $my_connection_complete_path` echo "active_connection_path=$my_active_connection_path" dbus-send --system --print-reply --dest="org.freedesktop.NetworkManager" /org/freedesktop/NetworkManager "org.freedesktop.NetworkManager.DeactivateConnection" objpath:"$my_active_connection_path" } ######## # MAIN # ######## invalid_arguments() { echo "Usage: `basename "$0"` connexion_name start|stop" echo "Connexion disponibles:" print_connections_ids_everywhere echo "Connexion actives:" print_actives_connections_ids exit 0 } [ "$#" != 2 ] && invalid_arguments case "$2" in "start") start_connection "$1" ;; "stop") stop_connection "$1" ;; "restart") stop_connection "$1" start_connection "$1" ;; "status") if print_actives_connections_ids | grep -q -x "$1" then echo "Connexion \"$1\" active" exit 0 else echo "Connexion \"$1\" inactive" exit 1 fi ;; *) invalid_arguments ;; esac Dernière modification par coredump (Le 28/01/2010, à 13:21) Hors ligne ElFabio Re : Connexion VPN automatique (NetworkManager) effectivement, tu as bien potassé ton d-bus ! et de rien pour le soutien, c'est le moins que je puisse faire, surtout que j'utilise maintenant ton script tous les jours ! vraiment exactement ce que je recherchais ! Hors ligne coredump Re : Connexion VPN automatique (NetworkManager) Par les temps qui courent, il vaut mieux surfer bien couvert Hors ligne Brunod Re : Connexion VPN automatique (NetworkManager) Salut, Je te félicite anticipativement parce que là, en congé, je n'en ai pas besoin; mais à la rentrée, faudra que je me penche dessus ! Merci ! Wanted : >>> un emploi dans la sécurité informatique et réseau <<< Windows est un système d'exploitation de l'homme par l'ordinateur. Linux, c'est le contraire ... --> état de la conversion : 36 pc linux Hors ligne coredump Re : Connexion VPN automatique (NetworkManager) À noter, que le script "connection-service" défini dans mon post #11 ci-dessus peut aussi être utilisé pour connecter automatiquement le VPN. Pour cela, créer un script "vpn-autostart" exécutable dans "/usr/local/bin/", /usr/local/bin/vpn-autostart : #!/bin/bash # Délai de 10 secondes pour laisser le temps à nm-applet de démarrer sleep 10 && connection-service "Connection VPN 1" start Ajouter ensuite un programme "VPN" dans la liste des "Applications au démarrage" avec comme commande : vpn-autostart Ceci démarrera la connexion "Connection VPN 1" à l'ouverture de la session. Et pour reconnecter automatiquement la connexion en cas de coupure, ajouter ce script "02-loop-vpn" exécutable dans "/etc/NetworkManager/dispatcher.d/": /etc/NetworkManager/dispatcher.d/02-loop-vpn : #!/bin/bash [ "$2" = "vpn-down" ] && vpn-autostart Et voilà ! Hors ligne lynn Re : Connexion VPN automatique (NetworkManager) Bonjour, Merci à toi coredump pour ces scripts de connection automatique pour VPN. Ils fonctionnent parfaitement Comme tu le dis si bien : - quand on coche "Connecter automatiquement", ceci n'est pas pris en compte - quand on coche "Disponible pour tous les utilisateurs", la connexion ne marche pas (à cause d'une histoire de clé quand on regarde les logs) A quand un Network Manager complètement fonctionnel ? Parce que c'est vrai que ce genre de "détails" est quand même bien ch...t. En attendant, merci encore coredump et je pense que ce post va intéresser pas mal de monde .....:P Linux est la solution ! Au fait, c'est quoi ton problème ? Ubuntu 14.04 - 64 bits - Ubuntu 14.10 - 64 bits - Utopic Unicorn Hors ligne ElFabio Re : Connexion VPN automatique (NetworkManager) Merci encore une fois coredump pour ce tutoriel trés pratique et directement utilisable, j'en profite pour abuser un peu de tes compétences pour te poser une question concernant l'usage de tes scripts (j'utilise en particulier checkVPN) : aprés maintenant plusieurs jours d'utilisation, je remarque qu'au fur et à mesure de mes reconnexions automatique à mon provider VPN (lors d'une déconnexion), le débit global des téléchargements (dans mon client torrent préféré...) a tendance à se contracter fortement (divisé par 2 ou 3), et un simple redémarrage du PC permet de revenir à un débit normal. Pour info je me connecte en WiFi, et peut être n'est ce pas du à l'utilisation de tes scripts, mais est ce que ca ne pourrait pas venir d'un "engorgement" de process qui s'accumulent en taches de fond suite à de multiples déconnexion/reconnexion VPN, je ne pense pas trop parce qu'en faisant un ps aux je n'en trouve pas trace et qu'en plus ton script me parait vraiment trés propre, mais je n'ai pas d'explications plausibles... est ce que tu aurais déjà rencontré le même genre de soucis ou aurais tu une idée de leur origine ? Je te souhaite une trés bonne année, Hors ligne rogerjm Re : Connexion VPN automatique (NetworkManager) Bravo coredump pour ce post (qui n'est pas loin d'être un article ). Tout ce que tu y as décrit m'a été très utile pour configurer mon VPN et le lancer automatiquement, mais aussi m'a appris plein de choses. Bonne année Hors ligne coredump Re : Connexion VPN automatique (NetworkManager) Bonne année à tous ! Au sujet de ton problème, ElFabio, je ne pense pas pouvoir t'aider beaucoup. À vue de nez, voici mes impressions : - Concernant le script de reconnexion, ça m'étonnerait qu'il puisse provoquer un "engorgement". La plupart du temps il est au repos (en attente d'évènements) et quand il bosse, il ne fait jamais que quelques petites opérations. Le seul petit souci que je verrais, serait le délai de reconnexion. En cas de coupure, il cherche instantanément à se reconnecter et ce n'est peut-être pas très bon. J'ai remarqué qu'en cas de coupure, la reconnexion ne se fait pas toujours à la première tentative. Le fait d'envoyer des requêtes de reconnexion aussi proches n'est peut-être pas très bon. En même temps, une fois qu'il s'est reconnecté, je n'ai pas observé pour ma part de diminution de la bande passante. Mais, si tu veux chercher dans cette voie, tu peux essayer avec l'autre méthode (elle n'utilise pas de process en fond de tâche) et jouer sur le délai de reconnection. Les autres pistes pourraient venir à mon avis de la connexion Wifi, du client torrent ou du service VPN : - Pour la connexion Wifi, j'ai entendu dire que le peer-to-peer n'est pas très bon. J'ai eu à une époque un fournisseur internet wifi qui interdisait de faire du peer-to-peer sur son réseau (sinon ça le faisait planter). De cette époque, j'ai pris l'habitude de faire du warez, je n'utilise les torrents que de façon ponctuel, donc je n'ai pas trop d'expérience à ce sujet. Tout ce que je peux te dire, c'est que je ne rencontre pas ce problème avec les boards warez et JDownloader. Pour explorer cette piste, il faudrait que tu es la possibilité de tester la différence avec une connexion filaire, ou consulter les forums sur les torrents et le Wifi. - Pour le client torrent, peut-être que le fait d'avoir des coupures le perturbe. Une possibilité pourrait être de le relancer quand tu observes une diminution de débit et voir si ça change quelque chose. Ou voir si tu observes aussi sur cette baisse de débit sur un test de bande passante (avec speedtest.net, par exemple). - Le fournisseur VPN peut-être aussi offre de moins bon service au bout d'une certaine durée de connexion. Là, si tu as la possibilité d'essayer une autre connexion VPN pour voir la différence. Ou provoquer volontairement les coupures VPN depuis l'applet du Network Manager pour voir si ce sont bien les coupures qui font baisser le débit. En gros, essaye de voir, et si c'est possible, si ton problème persiste : - sans vpn (mais ça, tu le sais surement déjà) - ou avec une connexion réseau filaire - ou en redémarrant le client torrent - ou en redémarrant le Network Manager (sudo service network-manager restart, NetworkManager pour les versions d'ubuntu plus anciennes) - ou en espaçant les délais entre les reconnections - ou avec un autre fournisseur VPN (personnellement, j'utilise HideMyNet, un peu payant) ça t'aidera peut-être mieux à localiser ton problème qu'en redémarrant le pc. Bonne chance ! Dernière modification par coredump (Le 02/01/2010, à 12:14) Hors ligne ElFabio Re : Connexion VPN automatique (NetworkManager) Déjà je te remercie pour avoir pris autant de temps pour me répondre, je vais essayer de reparcourir les pistes que tu as mentionné mais, effectivement, ce sont également sensiblement les mêmes que celles que j'avais regardé jusque-là. Mes premieres impresssions tendraient à confirmer les tiennes, le probléme semblerait effectivement provenir davantage de la connexion en Wifi, puisque j'utilise également tes scripts sur un PC fixe en ethernet avec le même provider VPN, et je ne rencontre pas ce genre de problème. J'avais déjà éliminé la piste du provider VPN dont je suis personnellement trés satisfait (il s'agit de TorrentFreedom), et d'ailleurs le fait qu'en redémarrant le PC je retrouve un débit normal tend également à confirmer que le pb ne vient pas de là. D'ailleurs je n'ai pas, comme tu le mentionnes pour toi, de problémes de connexion particulier au VPN, la connexion se fait trés rapidement, par contre, en revanche, il se produit des déconnexions au VPN de façon trés régulière voire périodique (presque toutes les 30mn), ce qui me fait également pencher vers un probléme WiFi ou plutôt de passerelle (Livebox dans mon cas actuel) voire de FAI... Je vais par contre également regarder du côté de ton 2eme script pour voir si j'observe des changements. En tout cas, merci pour ton temps et ta réponse, ca me permet de disposer d'un 2eme avis "éclairé" sur le sujet ! Hors ligne hybernatus Re : Connexion VPN automatique (NetworkManager) Fiou... merci pour ce script et tout ton boulot, je me suis abonné hier à Ipredator (ayant trouvé un super tuto, et n'y connaissant rien en réseaux, j'ai fait au plus simple ^^ ) et j'ai remarqué qu'il se déconnecte de temps en temps... ce qui est un peu c*n tout de même quand on veut une connexion sécurisée et anonyme je trouve J'essairai d'appliquer ça dès que je trouve la motiv merci ! Mes photos: http://www.hyb.me/photos Hors ligne ElFabio Re : Connexion VPN automatique (NetworkManager) Salut coredump, je reviens juste vers toi pour te faire un petit debriefing : ta deuxiéme solution (via connection-service) a résolu mes problémes de "coupure" intempestive et réguliére du VPN, je l'ai un peu aménagé à ma sauce, notamment pour disposer d'une connexion 'manuelle' au VPN et non un lancement automatique au démarrage, mais, que ce soit en Wi-Fi ou en Ethernet, cela fonctionne maintenant parfaitement. Donc encore merci pour ton travail, et bonne continuation ! Hors ligne Papou Re : Connexion VPN automatique (NetworkManager) Merci Coredump pour ces scripts très utiles. La reconnection automatique c'est nickel .... sauf quand c'est le modem qui décroche de l'ADSL et met un peu de temps pour redémarrer, la reconnection au VPN n'aboutit alors pas et l'accès internet se retrouve en direct quelques secondes plus tard. Serait-il possible de faire encore mieux ? Un pour tous et tous pour un, les petits ruisseaux feront les grandes rivières ... Hors ligne coredump Re : Connexion VPN automatique (NetworkManager) Tout d'abord merci ElFabio pour ton retour d'expérience. A mon avis : - la première solution (en python) est plus fiable, car le service tourne en tâche de fond et ne dépend pas de la connexion de l'utilisateur, mais plus complexe et donc plus difficile à maintenir, - la deuxième (en shell) est plus simple et plus clair, et donc plus adaptable et plus facile à maintenir, et par expérience marche bien. Et en définitif, je conseillerai donc plutôt celle-ci. Pour rappel, cette méthode est décrite dans le post #15 avec le script "connection-service" de la fin du poste #11. (Si je trouve un peut de temps, j'en ferais bien la synthèse dans un tuto ...) Ensuite par rapport à Papou, si tu veux que le vpn se reconnecte quand ta connexion adsl se reconnecte (j'imagine que tu utilises la deuxième méthode et qu'il ne s'agit pas d'une connexion adsl pppoe ?), tu peux essayer d'ajouter cette ligne dans le script /etc/NetworkManager/dispatcher.d/02-loop-vpn : [ "$2" = "up" ] && vpn-autostart (Pour voir si ça marche, tu peux essayer en redémarrant à la main ton modem en cours de connexion.) Dernière modification par coredump (Le 18/01/2010, à 15:45) Hors ligne Papou Re : Connexion VPN automatique (NetworkManager) coredump, Merci pour ta réponse. j'utilise la deuxième méthode et il s'agit d'une connexion adsl pppoe. L'inconvénient d'ajouter la ligne "[ "$2" = "up" ] && vpn-autostart" est qu'il devient alors impossible de déconnecter volontairement le VPN pour les applications qui ne le supporte pas (sauf a écrire un script de déconnexion volontaire qui va déplacer le fichier 02-loop-vpn en dehors de dispatcher). D'autre part, il me semble avoir remarqué que le problème est que la connection VPN échoue parfois (timeout d'environ 10 sec ? ) et qu'il n'y a alors pas de deuxième essai. Un pour tous et tous pour un, les petits ruisseaux feront les grandes rivières ... Hors ligne
senjy Erreur fatal postfix sur email en spam ou pas Bonjour, J'ai installé un serveur postfix et j'utilise un script php maison qui marche, mais le résultat est un peu étrange. 1. Certains mails envoyés partent dans la boite spam de l'utilisateur 2. D'autres envoyés fonctionnent Dans /var/log tout semble tres bien se passer. aucun message d'erreur. J'ai passé 2 jours a voir et revoir la config. et j'ai meme installé SPF et DKIM. Mais le résultat est toujours le même. A savoir que mes mail en direction de gmail, partent en spams J'ai fait un postfix -n et ça me donne ceci root@ksXXXXXX:/var/www/mon-domaine.com/htdocs# postconf -n alias_database = hash:/etc/aliases alias_maps = hash:/etc/aliases append_dot_mydomain = no biff = no config_directory = /etc/postfix inet_interfaces = all mailbox_size_limit = 0 milter_default_action = accept milter_protocol = 6 mydestination = ksXXXXXX.kimsufi.com, localhost.kimsufi.com, , localhost myhostname = ksXXXXXX.kimsufi.com mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128 myorigin = /etc/mailname non_smtpd_milters = inet:127.0.0.1:8891 readme_directory = no recipient_delimiter = + relayhost = smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU) smtpd_milters = inet:127.0.0.1:8891 smtpd_recipient_restrictions = check_policy_service unix:private/policy smtpd_tls_cert_file = /etc/ssl/certs/ssl-cert-snakeoil.pem smtpd_tls_key_file = /etc/ssl/private/ssl-cert-snakeoil.key smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache smtpd_use_tls = yes Est ce que vous auriez une idée clair d'ou cela pourrais venir ? Dernière modification par senjy (Le 03/04/2013, à 19:00) Hors ligne Akiboot Re : Erreur fatal postfix sur email en spam ou pas Ca me rappelle un problèmes rencontré dans ma boite. Les mails en directions de Gmail et de certains de nos client était directement mis en SPAM dans leur boite. C'était du au faite que ces serveurs ne connaissaient pas le notre et le rediriger donc directement dans la boite spam. Nous avions une connexion Orange et on à résolu le problème en redirigeant les envois de mail de mail vers le SMTP orange qui lui est connu par Gmail. On as donc juste mis la ligne relayhost = smtp.orange.fr depuis plus de problèmes à ce niveau Hors ligne senjy Re : Erreur fatal postfix sur email en spam ou pas Est il possible de le faire sur un serveur dédié chez ovh ? Hors ligne bruno Re : Erreur fatal postfix sur email en spam ou pas SPF et DKIM c'est très bien mais as-tu bien vérifié les enregistrements DNS ? Quels sont les entêtes de courriels reçus sur des adresse gmail . La signature dkim est-elle présente ? D'autre part il serait préférable d'utiliser ton nom de domaine plutôt que ksXXXXXX.kimsufi.com dans la configuration de ton serveur dédié. Hors ligne Akiboot Re : Erreur fatal postfix sur email en spam ou pas Je ne sais pas si OVH propose un serveur smtp pour leur client avec un serveur dédié. Peut être leur demander depuis leur support ? Hors ligne senjy Re : Erreur fatal postfix sur email en spam ou pas Delivered-To: emaildetest@gmail.com Received: by 10.58.171.41 with SMTP id ar9csp218373vec; Thu, 4 Apr 2013 01:56:28 -0700 (PDT) X-Received: by 10.15.101.200 with SMTP id bp48mr9154539eeb.38.1365065787566; Thu, 04 Apr 2013 01:56:27 -0700 (PDT) Return-Path: <support@XXXXX.com> Received: from XXXXX.com (ksXXXXXX.kimsufi.com. [5.39.87.XXX]) by mx.google.com with ESMTP id m7si11551398eey.212.2013.04.04.01.56.26; Thu, 04 Apr 2013 01:56:27 -0700 (PDT) Received-SPF: pass (google.com: domain of support@XXXXXXX.com designates 5.39.87.XXX as permitted sender) client-ip=5.39.87.XXX; Authentication-Results: mx.google.com; spf=pass (google.com: domain of support@XXXXX.com designates 5.39.87.XXX as permitted sender) smtp.mail=support@XXXXX.com; dkim=neutral (bad format) header.i=@XXXXX.com Received: by XXXXX.com (Postfix, from userid 33) id 8B0033F10C; Thu, 4 Apr 2013 11:01:10 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=simple/simple; d=XXXXX.com; s=dkim; t=1365066070; bh=VSYamQVYmhoKDQT+6q1jmLNoTPg7WDynn/doUQzDdP8=; h=To:Subject:MIME-Version:Content-type:From:Reply-To:Message-Id: Date; b=OgH0HPHOHTcRRvGL2rzRPbySFOGD3WteWTw8zHMg+CTlO22BTWm e2btf9OyJCB4TFITUOux2BdOFs2pa29cTaUMrf+g/PjEjGb8rnxiJqSMfdPLn4Yadh Q7wMxrvdbKL2PUsGJa6ZawGzQL0xszIy63BVOttc= To: XXXXX@gmail.com Subject: [XXXXX] Bienvenue au niveau DNS j'ai pour SPF v=spf1 a ptr ip4:5.39.87.XXX ~all et dim default._domainkey.XXXX.com DKIM k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQ UAA4GNADCBi3 egVU7DfDTKJlniA55U/KgL5heMLzl7q2mrIsBvbDpVSqe r+9W/s4h Hors ligne Akiboot Re : Erreur fatal postfix sur email en spam ou pas Je ne sais pas si en tant que client OVH tu posséde une adresse mail chez eux. Si c'est le cas Ce lien pourrait t'aider. Hors ligne senjy Re : Erreur fatal postfix sur email en spam ou pas Je n'ai pas d'adresse chez eux, et je veux envoyer mes mails depuis le serveur. Je precise que c'est seulement gmail qui pose probleme Hors ligne bruno Re : Erreur fatal postfix sur email en spam ou pas Tu as une erreur dkim : dkim=neutral (bad format) header.i=@XXXXX.com Il faut vérifier ton enregistrement DNS, celui que tu donnes n'est pas bon car il contient des espaces dans la clé publique. Tu peux vérifier sur : http://dkimcore.org/tools/dkimrecordcheck.html Il aussi vérifier que ton domaine xxxx.com a bien un enregistrement MX. Du type : wwww.com. IN MX 10 xxxx.com. Hors ligne senjy Re : Erreur fatal postfix sur email en spam ou pas j'ai verifié avec le lien donné et voila le résultat quand a ca, wwww.com. IN MX 10 xxxx.com. j'ai ceux de google apps. Hors ligne dg34 Re : Erreur fatal postfix sur email en spam ou pas Tu peux envoyer un mail à cette adresse pour avoir plus de détail sur les erreurs éventuelles: check-auth@verifier.port25.com Hors ligne senjy Re : Erreur fatal postfix sur email en spam ou pas Je ne connaissais pas du tout, ca a l'air bien détaillé comme réponse. ========================================================== Summary of Results ========================================================== SPF check: softfail DomainKeys check: neutral DKIM check: neutral Sender-ID check: softfail SpamAssassin check: ham ========================================================== Details: ========================================================== HELO hostname: mail-ob0-x233.google.com Source IP: 2607:f8b0:4003:c01::233 mail-from: sylvain@XXXXXX.com ---------------------------------------------------------- SPF check details: ---------------------------------------------------------- Result: softfail (SPF-Result: SoftFail) ID(s) verified: smtp.mailfrom=sylvain@XXXXXX.com DNS record(s): XXXXXX.com. SPF (no records) XXXXXX.com. 600 IN TXT "v=spf1 a ptr ip4:5.39.87.XXX ~all" XXXXXX.com. AAAA (no records) 3.3.2.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.c.0.3.0.0.4.0.b.8.f.7.0.6.2.ip6.arpa. 86400 IN PTR mail-ob0-x233.google.com. mail-ob0-x233.google.com. 86400 IN AAAA 2607:f8b0:4003:c01::233 ---------------------------------------------------------- DomainKeys check details: ---------------------------------------------------------- Result: neutral (message not signed) ID(s) verified: header.From=sylvain@XXXXXX.com DNS record(s): ---------------------------------------------------------- DKIM check details: ---------------------------------------------------------- Result: neutral (message not signed) ID(s) verified: NOTE: DKIM checking has been performed based on the latest DKIM specs (RFC 4871 or draft-ietf-dkim-base-10) and verification may fail for older versions. If you are using Port25's PowerMTA, you need to use version 3.2r11 or later to get a compatible version of DKIM. ---------------------------------------------------------- Sender-ID check details: ---------------------------------------------------------- Result: softfail (SPF-Result: SoftFail) ID(s) verified: header.From=sylvain@XXXXXX.com DNS record(s): XXXXXX.com. SPF (no records) XXXXXX.com. 408 IN TXT "v=spf1 a ptr ip4:5.39.87.XXX ~all" XXXXXX.com. AAAA (no records) 3.3.2.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.c.0.3.0.0.4.0.b.8.f.7.0.6.2.ip6.arpa. 86261 IN PTR mail-ob0-x233.google.com. mail-ob0-x233.google.com. 86216 IN AAAA 2607:f8b0:4003:c01::233 ---------------------------------------------------------- SpamAssassin check details: ---------------------------------------------------------- SpamAssassin v3.3.1 (2010-03-16) Result: ham (-1.9 points, 5.0 required) pts rule name description ---- ---------------------- -------------------------------------------------- -1.9 BAYES_00 BODY: Bayes spam probability is 0 to 1% [score: 0.0000] 0.0 HTML_MESSAGE BODY: HTML included in message Hors ligne Titouan Re : Erreur fatal postfix sur email en spam ou pas sur le dkim, tu as un problème de selector. 2 valeurs; s=dkim, s=default c'est normal que cela ne marche pas. le selector est une sorte de pointeur pour les autres smtp afin de choisir la bonne clef publique pour tel domaine de messagerie. à toi de savoir dans quel fichier de configuration "selector dkim" est présent par ex, /etc/dkim-filter.conf sur le spf, tu devrais avoir +a:mail-ob0-x233.google.com Hors ligne senjy Re : Erreur fatal postfix sur email en spam ou pas En effet je me rappel avoir installé le premier selector, et un ami est venu installé le deuxieme. En regarde, je n'ai pas trouvé de dkim-filter.conf dans etc... ce qui pose probleme pour le choix Pour le spf, j'ai actuellement xxxxxxx.com. IN TXT "v=spf1 a ptr ip4:5.39.87.xxxx ~all" si j'ai compris, il faut que je fasse quelques chose comme ca ? xxxxxxx.com. IN TXT "v=spf1 +a:mail-ob0-x233.google.com Hors ligne Titouan Re : Erreur fatal postfix sur email en spam ou pas En regardant, je n'ai pas trouvé de dkim-filter.conf dans etc... ce qui pose probleme pour le choix amavis, opendkim, dkimproxy, dkim-filter je ne sais pas trop ce que tu utilises comme solution dkim et pourtant, tu as dans le main.cf; 8891 ... sudo lsof -i:8891 && sudo ps aux | grep 8891 bref .... si tu ne trouves pas, tu crées un autre enregistrement dns avec "dkim" comme selector, au lieu de "default". Tu fais juste un copié-collé dans tes dns, avec "dkim". A mon avis, c'est le plus simple pour toi. Au moins, lorsque tu signes en invoquant dans le header s=dkim, tu préviens tous les smtp, qu'il faut pour vérifier chercher la clef publique en sudo host -t txt default._domainkey.domain.tld sudo host -t txt dkim._domainkey.domain.tld *Pour le spf, un peu de doc sur la syntaxe Un mail est classé spam par spf, lorsque les conditions ne matchent pas, et que le all (condition: tout ce qui reste, le reste ) est défini à - ou ~. Cela peut être le contraire aussi; la condition matche mais elle est assignée - ou ~ -a: smtp2.target.tgt ~all SoftFail le mail est accepté par le smtp mais classé spam-all Fail mail classé spam et refusé ... si tu utilises des relais registrar, fai, ..., il faut les marquer ... +a:smtp.registrar.tld +mx:... -all les conditions sont évaluées de gauche à droite et le smtp (donc pas le tien, mais tous les smtp de la planète) qui interroge le spf, s'arrête sur la première qui matche (comme dans postfix) 1ere condition: toutes les ip de smtp.registrar.tld sont évaluées. si matche alors Pass (+) 2ème condition: mx matche alors Pass (+) etc Le cas échéant, si aucune des conditions ne matchent, all remporte le challenge. Le mail est classé spam et refusé si -all - Fail. Hors ligne senjy Re : Erreur fatal postfix sur email en spam ou pas Merci @titouan, mais je ne parle pas la langue des extras terrestres. Plaisanterie à part, j'ai du rater des cours ou des explications. En tout cas tout cela est trop théorique pour moi. Admettons que je trouve et que je modifie. Combien de temps faut il attendre pour voir les changement effectif ? Qu'est ce que je peux tester tout de suite ? et qu'est ce que je peux tester sous 48h ? Comment je saurais que sous 48h, la modification est effective ? Dernière modification par senjy (Le 06/04/2013, à 00:01) Hors ligne Titouan Re : Erreur fatal postfix sur email en spam ou pas je crois que c'est 3600s TTL le minimum, pour un enregistrement dns. tu renvois un mail à check-auth@verifier.port25.com Hors ligne senjy Re : Erreur fatal postfix sur email en spam ou pas Les 3600s concerne le SPF et/ou le DKIM ou tout les deux ? Hors ligne Titouan Re : Erreur fatal postfix sur email en spam ou pas les 2, n'importe quel dns, si tu stipules 3600s TTL. Compter 1h15 tu seras fixé en faisant sudo host -t txt domain.tld sudo host -t txt dkim._domainkey.domain.tld Hors ligne senjy Re : Erreur fatal postfix sur email en spam ou pas quel sont les valeurs que je dois avoir ici ? car en testant parfois j'ai SPF Check qui vaut neutral et DomainKeys Check qui vaut pass. Mais pas les deux ensemble. que veut dire neutral en fait ? ========================================================== Summary of Results ========================================================== SPF check: pass DomainKeys check: neutral DKIM check: fail Sender-ID check: permerror SpamAssassin check: ham et quel difference entre -all et ~all ? d'autre part, J'ai pour DKIM ces erreurs, alors que je n'ai aucune erreur quand je fait un DKIM check via des outils en ligne DKIM result: fail (wrong body hash: XY6+GYNsvhOyUqMkhddjDoi2qnHXDFF0xY+VD5740=) ============================================================ Expected Body Hash: SMyqJk4cOZ4kpsxH29ZNroP0D5+6Q8fiURRp11VI= ---Original Message Header--- x-sender: feedback@xxxxxx.com x-receiver: AAAA3QcEBRM@appmaildev.com Received: from ksxxxxx.kimsufi.com ([5.39.87.xxx]) by mail.appmaildev.com with Microsoft SMTPSVC(7.5.7600.16385); Fri, 5 Apr 2013 20:04:59 -0400 Received: by ksxxxxx.kimsufi.com (Postfix, from userid 33) id C594823DE7; Sat, 6 Apr 2013 02:09:46 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=simple/simple; d=xxxxxx.com; s=dkim; t=1365206986; bh=XY6+olGYNsvhOyUqMkhddjDoi2qnHXDFF0xY+VD5740=; h=To:Subject:MIME-Version:Content-type:From:Reply-To:Message-Id: Date; b=D9phSnZlnrBdrRjnsk8HK7sJFFaUCpwk/Rs0Y2k4iq+bBFXcOZLOTPEy6VGMjk/oJ CmUKbURGySJjlFcxx/flZNGSjtlFsr6e4M7BytSNW9pEIV1V+iaURtu9apHyBV12 BE2X82q/EiXcunxfDAl+jfON9wMMIvLS7ZfbGXJo= Dernière modification par senjy (Le 06/04/2013, à 02:10) Hors ligne senjy Re : Erreur fatal postfix sur email en spam ou pas a part SPF le reste est en carafe :-( Hors ligne Titouan Re : Erreur fatal postfix sur email en spam ou pas Le plus important, c'est le SPF, et le DKIM, qu'il faut avoir. Sender-ID obsolète. DomainKeys obsolète et inutile si tu as Dkim.. Attention c'est preque pareil que la syntaxe du Dkim et parfois confusion au niveau des docs et dns ... SPF check: passDKIM check: pass 1/ DKIM result: fail (wrong body hash ... >peut-être un problème d'intégrité de la clef publique dans le copier -coller ? >as-tu vérifié le nombre de caractères que comporte la clef publique concaténée et celui du DNS p=blabla... ? 1a/ public.key -----BEGIN PUBLIC KEY-----AAAAAAAAAAAAQUBBBBBBBBBBBBB....-----END PUBLIC KEY----- grep -v '^-' /etc/..../public.key | tr -d '\n' | wc -m 216 caractères pour moi, qui doit correspondre au p=AAAABBB... du dns tu peux remplacer wc -m par md5sum aussi pour checker . 1b/ dns sudo host -t txt selector._domainkey.domain.tld selector._domainkey.domain.tld descriptive text "v=DKIM1\;k=rsa\;p=AAAABBBB..." nb; les "\" sont le résultat de la sortie de la commande host ou dig. Dans le record registrar ou bind, tu n'as pas ces "\". tu peux avoir v=DKIM1\;k=rsa\;t=s\;.... echo -n 'AAAAAAABBBBBB...' | wc -m 216 caractères , ok c'est bon. tu dois trouver 1a=1b Dernière modification par Titouan (Le 12/04/2013, à 18:42) Hors ligne senjy Re : Erreur fatal postfix sur email en spam ou pas J'ai fait le test et suis tombé sur le bon chiffre, mais j'ai toujours les soucis probleme. Et parce que je n'arrive pas a avancé, je me suis résigné a utiliser php-dkim. qui pour le coup donne de meilleur résultat. mais la j'ai encore une petite erreur, au check ==> signature doesn't verify bizarre qu'ils soient cote a cote ========================================================== Summary of Results ========================================================== SPF check: pass DomainKeys check: neutral DKIM check: pass DKIM check: fail Sender-ID check: pass SpamAssassin check: ham ========================================================== .... bla bla blabla .... ---------------------------------------------------------- DKIM check details: ---------------------------------------------------------- Result: fail (signature doesn't verify) ID(s) verified: Canonicalized Headers: from:Fresh'20'DKIM'20'Manager'20'<sylvain@xxxxxxx.com>'0D''0A' to:check-auth@verifier.port25.com,'20'AAAA3QcEDQYA@appmaildev.com'0D''0A' subject:Test'20'of'20'PHP-DKIM'0D''0A' dkim-signature:v=1;'20'a=rsa-sha1;'20'q=dns/txt;'20'l=79;'20's=mail;'20't=1365862259;'20'c=relaxed/simple;'20'h=From:To:Subject;'20'd=xxxxxxx.com;'20'z=From:=20Fresh=20DKIM=20Manager=20<sylvain@xxxxxxx.com>'20'|To:=20check-auth@verifier.port25.com,=20AAAA3QcEDQYA@appmaildev.com'20'|Subject:=20Test=20of=20PHP-DKIM;'20'bh=o2Uu9TUHCQzX9F5W2UiAnCSRE9g=;'20'b= Hors ligne Titouan Re : Erreur fatal postfix sur email en spam ou pas signature doesn't verify peut-être une histoire de sender >normalement, pour que le dkim signe, il faut alignersender = mail from = return_path et sender-domain=tondomain.tld qui est le realm ( là où l'authentification se fait) attention donc aux usages spéciaux (scripts, www-data, site internet, ...) (1) et autres sender_canonical_maps (2), qui déforment les headers. cela dépend donc de beaucoup de la manière dont tu fais les tests. vérifier aussi MAIL FROM & 250 Sender dans les logs de postfix (verbose). Hors ligne
I just started to use Python so the following might be a really REALLY dumb question but I searched the web for a long time and didn't find anything. I'm trying to use the XMMS2 client from a Django View. Here is what I have in my views.py: import xmmsclient import os import sys def list(request): xmms = xmmsclient.XMMS("tutorial1") xmms.connect(os.getenv("XMMS_PATH")) result = xmms.playlist_list_entries() result.wait() ... And here is the error I get: AttributeError at /xmms2/list/ 'module' object has no attribute 'XMMS' And the line in question is this: xmms = xmmsclient.XMMS("tutorial1") The view works fine if I remove all the code and replace it with (for example): return HttpResponse("list") I first thought there was a problem with the xmmsclient library but it works fine when I run this xmms2-tutorial example So I guess there is some sort of incompatibility between Django and xmmsclient but I really don't have a clue. I'm running Ubuntu 12.04, Python 2.7.3, Django 1.4.1 and XMMS2 0.8 Any help will be really appreciated!
I have a custom save method and a custom decorator for it to run the Django's model save() before and after my custom save: models.py: from django.contrib.auth.models import User from django.db import models def save_decorator(method_to_decorate): def wrapper(self, *args, **kwargs): super(type(self), self).save(*args, **kwargs) method_to_decorate(self, *args, **kwargs) super(type(self), self).save(*args, ** kwargs) return wrapper class The_Image_Abstract(models.Model): class Meta: abstract = True create_time = models.DateTimeField(editable=False) class Avatar(The_Image_Abstract): #I'm using this to track Avatar class in the template. There should be a better way. user = models.OneToOneField(User, related_name='avatar') @save_decorator def save(self, *args, **kwargs): "my stuff here" pass This works perfectly when Avatar is saved or modified in the admin page. But it raises Internal Error when Avatar is saved as formset in an inline of another model (formset worked before adding the decorator). What is going wrong here? I saw posts about people receiving this error when using Postgres and I am using Postgres too but I don't think this case is caused by Postgres. Request Method: POST Request URL: http://localhost/admin/auth/normal_user/add/ Django Version: 1.4.3 Exception Type: InternalError Exception Value: current transaction is aborted, commands ignored until end of transaction block Exception Location: /home/eras/projects/kart/venv/local/lib/python2.7/site-packages/django/db/models/sql/compiler.py in execute_sql, line 912 Python Executable: /home/eras/projects/kart/venv/bin/python Python Version: 2.7.3 Any help appreciated! Thanks, Eras
In my last article I covered the changes from version 7 to version 8 of the draft AtomAPI. Now the latest version of the AtomAPI is version 9 which adds support for SOAP. This change, and its impact on API implementers, will be covered in a future article. In this article I'm going to build a simple implementation of the AtomAPI. The first task at hand is to pick a viable candidate. I had a list of criteria which included working with a small code base, working in Python, and the target also being a slightly unconventional application of the AtomAPI. The reason I wanted a small code base in Python is that it's a language I'm familiar with, and small is good for the sake of exposition. The reason I picked an unconventional application of the AtomAPI is that I've found that to be a good technique for stretching a protocol, looking for strengths and weaknesses. The application I've picked is PikiPiki, which is a wiki, a cooperative authoring system for the Web. It's written in Python, is GPL'd, has a small code base, and the code is easy to navigate. It also has a good lineage given that MoinMoin is based on PikiPiki. The source for both the client and the modified server described in this article can be downloaded from the EditableWebWiki. To create an implementation of the AtomAPI there are a few operations we need to support. Each entry, which in the case of a wiki will be the content for a WikiWord, needs to have a unique URI called the EditURI that supports GET, PUT and DELETE. In addition a single PostURI that accepts POST to create new entries needs to be added. Last we'll add a FeedURI that supports GET to return a list of the entries. Supporting the listed operations on these URIs is all that's needed to have a fully functioning Atom server. (This of course ignores SOAP, which I'll cover later.) Character encoding is often overlooked. Despite that it's an important part of working with any XML format. Atom is no exception. Before making any additions to PikiPiki we'll need to make a few small changes to ensure that all of our data is encoded correctly. For a good introduction to character encoding consult the excellent introduction by Jukka Korpela. To make things easier we can encode all of PikiPiki's data as UTF-8. There are many encoding to choose from, all with different advantages and disadvantages; but UTF-8 has some special properties: it allows us to use any Unicode character, for the most part treats the data like regular "C" strings, and we are guaranteed support by any conforming XML parser. Also, support for UTF-8 is one of the few things that most browsers do right. Since this is a wiki, and for now all the data coming into it comes through a form, we need to ensure that all incoming data is encoded as UTF-8. The easiest way to do this is by specifying that the encoding for form page is UTF-8; lacking any other indications, a browser will submit the data from a form using the same character encoding that the page is served in. While HTML forms can specify alternate character sets that the server will accept when data is submitted, via the accept-charset attribute, support for this is spotty (meaning it worked perfectly in Mozilla, and I failed to get it working in Microsoft's Internet Explorer). So our first change to PikiPiki is to add a meta tag to the generated HTML. def send_title(text, link=None, msg=None, wikiword=None): print "<head><title>%s</title>" % text print '<meta http-equiv="Content-Type" content="text/html; charset=utf-8">' Now all of our web pages should submit UTF-8 encoded data, and since all of the web pages produced from the wiki are combinations of ascii markup embedded in the Python program and the UTF-8 in the stored wiki entries, we can be sure our output is UTF-8. A Wiki revolves around WikiWords, mixed-case words that are the title for and unique identifiers of every page on the wiki. In the case of PikiPiki, the WikiWord is also the filename that the text of the page is stored in. The next change is to move the configuration of PikiPiki into a separatefile. We'll be creating two new CGI programs to handle the AtomAPI, and they both need access to some configuration information. The configuration section is just a set of global variables that we'll move into piki_conf.py: from os import path import cgi data_dir = '/home/myuserpath/piki.bitworking.org/' text_dir = path.join(data_dir, 'text') editlog_name = path.join(data_dir, 'editlog') cgi.logfile = path.join(data_dir, 'cgi_log') logo_string = '<img src="/piki/pikipiki-logo.png" border=0 alt="pikipiki">' changed_time_fmt = ' . . . . [%I:%M %p]' date_fmt = '%a %d %b %Y' datetime_fmt = '%a %d %b %Y %I:%M %p' show_hosts = 0 css_url = '/piki/piki.css' nonexist_qm = 0 The next task at hand is to handle the functions of the EditURI. In the AtomAPI each entry has an associated EditURI, a URI you can dereference in order to retrieve the representation of the entry. You can also PUT an Atom entry to the EditURI to update the entry. In this case, each definition of a WikiWord in PikiPiki will act as a single entry. To handle the EditURI functions we'll create a Python script atom.cgi. First let's map out the GET. We need to package up the UTF-8 encoded contents of a WikiWord and send it back. We need to decide on the form of the URI we are going to use. In this case we are going to be calling a CGI program and need to pass in the WikiWord as a parameter. We could pass it in either as a query parameter or we could pass it in as a sort of path. For example, in the first case, if the WikiWord was "FrontPage", the EditURI could be atom.cgi?wikiword=FrontPage. In the second place, the EditURI might be atom.cgi/FrontPage. Well choose the latter; the WikiWord will be passed in via the "PATH_INFO" environment variable. def main(body): method = os.environ.get('REQUEST_METHOD', '') wikiword = os.environ.get('PATH_INFO', '/') wikiword = wikiword.split("/", 1)[1] wikiword = wikiword.strip() word_anchored_re = re.compile(WIKIWORD_RE) if method == 'POST': ret = create_atom_entry(body) elif word_anchored_re.match(wikiword): if method in ['GET', 'HEAD']: ret = get_atom_entry(wikiword) elif method == 'PUT': ret = put_atom_entry(wikiword, body) elif method == 'DELETE': ret = delete_atom_entry(wikiword) else: ret = report_status(405, "Method not allowed", "") else: ret = report_status(400, "Not a valid WikiWord", "The WikiWord you referred to is invalid.") return ret[1] Our CGI pulls the HTTP method from the environment variable "REQUEST_METHOD" and the WikiWord from the "PATH_INFO" environment variable. Based on those two pieces of information we dispatch to the correct function. When we process GET we also are careful to respond to HEAD requests too. This is an important point, as the Apache web server will do the right thing with the HEAD response, that is, generate the right headers and send only the headers, discarding the body. def get_atom_entry(wikiword): filename = getpath(wikiword) base_uri = piki_conf.base_uri if path.exists(filename): issued = last_modified_iso(filename) content = file(filename, 'r').read() else: issued = currentISOTime() content = "Create this page." return (200, ENTRY_FORM % vars()) Where ENTRY_FORM is defined as: """Content-type: application/atom+xml; charset=utf-8 Status: 200 Ok <?xml version="1.0" encoding='utf-8'?> <entry xmlns="http://purl.org/atom/ns#"> <title>%(wikiword)s</title> <link rel="alternate" type="text/html" href="%(base_uri)s/%(wikiword)s" /> <id>tag:dev.bitworking.org,2004:%(wikiword)s</id> <issued>%(issued)s</issued> <content type="text/plain">%(content)s</content> </entry>""" There are two important points to note about this code. The first is what we do if the desired WikiWord does not exist. If we were writing this for a typical CMS, for a GET for an entry that didn't exist we would normally return with a status code of 404. Wikis, in contrast, when dealing with the HTML content, present what appears to be an infinite URI space. That is, you can request any URI at a wiki and, as long as you specify a validly formed WikiWord, you won't get a 404. Instead you will get a web page that prompts you to enter the content for that WikiWord. Go ahead and try it on the PikiPiki wiki that is setup for testing this implementation of the AtomAPI. This WikiWord currently doesn't have a definition: http://piki.bitworking.org/piki.cgi/SomeWikiWordThatDoesntExist. To keep parity with the HTML interface, the AtomAPI interface works the same way. The second point is character encoding. Note that we state character encoding in two places in the response, both in the HTTP header Content-type: and in the XML Declaration. There are two more HTTP methods to handle for the EditURI, DELETE and PUT. PUT is used to update the content for a WikiWord, replacing the existing content with that delivered by the PUT. DELETE is used to remove an entry; it's easy to implement: just delete the associated file. def delete_atom_entry(wikiword): ret = report_status(200, "OK", "Delete successful.") if wikiwordExists(wikiword): try: os.unlink(getpath(wikiword)) except: ret = report_status(500, "Internal Server Error", "Can't remove the file associated with that word.") return ret Note that unless something really bad happens, we return with a status code of 200 OK. That is, if the entry doesn't exist then we still return 200. You might be scratching your head if you remember we just talked about our implementation always returning an entry for every valid WikiWord, whether or not it actually had filled in content. That is, if you come right back and do a GET on the URI we just DELETE'd, it will not give you a 404, but instead will return the default filled in entry, "Create this page". Is this a problem? No. It may seem a bit odd, but it's not a problem at all. DELETE and GET are two different, orthogonal requests. There is no guarantee that some other agent, or some process on the server itself, didn't come along and recreate that URI between the DELETE and the GET. Supporting PUT allows us to change the content of a WikiWord. To make the handling of XML easier I've used the Python wrapper for libxml2, an excellent tool for handling XML, in particular because it let's you use XPath expressions to query XML documents. In this case we're using them to pull out the content element. def put_atom_entry(wikiword, content): ret = report_status(200, "OK", "Entry successfully updated.") doc = libxml2.parseDoc(content) ctxt = doc.xpathNewContext() ctxt.xpathRegisterNs('atom', 'http://purl.org/atom/ns#') text_plain_content_nodes = ctxt.xpathEval( '/atom:entry/atom:content[@type="text/plain" or not(@type)]' ) all_content_nodes = ctxt.xpathEval('/atom:entry/atom:content') content = "" if len(text_plain_content_nodes) > 0: content = text_plain_content_nodes[0].content if len(text_plain_content_nodes) > 0 or len(all_content_nodes) == 0: writeWordDef(wikiword, content) append_editlog(wikiword, os.environ.get('REMOTE_ADDR', '')) else: # If there are 'content' elements but of some unknown type ret = report_status(415, "Unsupported Media Type", "This wiki only supports plain texti") return ret The detail to notice in the implementation is the XPath used to pick out the content element. Content elements may have a 'type' attribute, but if it is not present then it defaults to 'text/plain'. Since 'text/plain' is the only type of content we can support in a wiki, it's the only type of content we'll look for. That takes care of the EntryURI; we just have the PostURI and FeedURI to go. The PostURI is used for creating new WikiWord entries. def create_atom_entry(body): wikiword = extractWikiWord(body) if wikiword: if wikiwordExists(wikiword): ret = report_status(409, "Conflict", "An entry with that name already exists.") else: ret = put_atom_entry(wikiword, body) if (ret[0] == 200): ret = (201, CREATED_RESP % {'base_uri': base_uri, 'atom_base_uri': atom_base_uri, 'wikiword': wikiword }) else: ret = report_status(409, "Conflict", "Not enough information to form a wiki word.") return ret The function 'extractWikiWord' pulls out the contents of the title element and converts it into a WikiWord. If we have a good WikiWord and it doesn't already exist, then we use 'put_atom_entry' to create it. Otherwise we respond with an HTTP status code of 409 to indicate that we won't let a POST overwrite an already existing WikiWord. The FeedURI is the last piece we need to implement. The FeedURI is used by clients to locate the PostURI for creating new entries and the EditURIs for editing each entry. The format of the FeedURI is exactly that of an Atom feed. This is different from the Atom we use with the PostURI and the EditURI, which is just the 'entry' element from Atom. Since the format of the FeedURI is the same as that for a regular feed, you might be tempted to have the same feed for both aggregation and editing. This might work in the case of wiki but not for a general site. The reason is that you may have entries in draft or unpublished form which must appear at the FeedURI so you can edit them, but must not appear in your aggregation feed. Given that this is for a publicly editable wiki, we don't have such a constraint so we can use this feed for both purposes. The FeedURI is implemented as a separate script, atomfeed.cgi, that builds a feed. The code, which is bit too long to include here, builds an Atom feed by sorting all the files that contain WikiWord definitions in reverse chronological order, then takes the WikiWord and associated content, and formats it in an Atom entry. The entries are concatenated together and placed in an Atom feed. The only special additions are the link elements that contain the PostURI and the EditURIs, which are denoted with attributes rel="service.post" and rel="service.edit" respectively. Here is a snippet from the Atom feed produced by atomfeed.cgi. <?xml version="1.0" encoding="utf-8"?> <feed version="0.3" xmlns="http://purl.org/atom/ns#"> <title>PikiPiki</title> <link rel="alternate" type="text/html" href="http:/.bitworking.org.cgi"/> <link rel="service.post" type="application/atom+xml" href="http:/.bitworking.org/atom.cgi"/> <link rel="next" type="application/atom+xml" href="http:/.bitworking.org/atomfeed.cgi/10"/> <modified>2004-03-09T21:32:58-05:00</modified> <author> <name>Joe Gregorio</name> <url>http://bitworking.org/</url> </author> <entry> <title>JustTesting</title> <link rel="service.edit" type="application/atom+xml" href="http:/.bitworking.org/atom.cgi/JustTesting" /> <link rel="alternate" type="text/html" href="http:/.bitworking.org.cgi/JustTesting" /> <id>tag:piki.bitworking.org,2004:JustTesting</id> <issued>2004-03-09T21:32:58-05:00</issued> <modified>2004-03-09T21:32:58-05:00</modified> <content type="text/plain"> This is content posted from an AtomAPI client. </content> </entry> <entry> <title>PikiSandBox</title> <link rel="service.edit" type="application/atom+xml" href="http:/.bitworking.org/atom.cgi/PikiSandBox" /> <link rel="alternate" type="text/html" href="http:/.bitworking.org.cgi/PikiSandBox" /> <id>tag:piki.bitworking.org,2004:PikiSandBox</id> <issued>2004-03-04T21:49:03-05:00</issued> <modified>2004-03-04T21:49:03-05:00</modified> <content type="text/plain"> '''I dare you''': press the Edit button and add something to this page. -- MartinPool </content> </entry> This feed also contains one more link element of a type we haven't talked about yet. The second link, the one with rel="next", points to the next set of entries. That is, when we produce a FeedURI you don't want to put all the entries into a single feed. That could end up being hundreds if not thousands of entries which would be impractical to handle. Instead put in a fixed number, like 20, and then the 'next' link points to another feed, with the next 20 entries. If a feed is in the middle of such a chain then it also contains a link with rel="prev" which points to the set of entries previous to the current one. In this way clients can navigate around the list of entries in manageable sized sets. It should be noted here that the client code that comes with this implementation does not implement traversing 'next' and 'prev' links in a feed. An AtomAPI enabled wiki wouldn't be worth much if there wasn't a client available, so I've included a wxPython client that allows you to create new entries on the wiki and to edit old entries. Remember how careful we were when specifying and using the character encoding? There isn't much code involved in supporting and processing everything in UTF-8, but careful planning ahead pays dividends. Here is a screenshot of the client editing one of the pages on a wiki with some unicode characters in it: All of the source for both the client and the server can be downloaded from the EditableWebWiki, which is running the code described above. Note that the client is a GUI application written in Python. You must use the version of wxPython that is compiled with Unicode support. Lastly, for your platform you'll have to ensure that you have fonts available to display the Unicode characters you are going to be using. One of the reasons we started using the AtomAPI on a wiki was to stretch the API and see where things broke down. Nothing really awful showed up, though we did find some rough spots. The first rough spot cropped up when doing a GET on the EditURI, where we encounter a slight mismatch between the formulation of the AtomAPI and this wiki implementation. The problem is that according to version 9 the draft AtomAPI, when doing a GET on an EditURI, the issued element is required. Since PikiPiki only stores the raw contents in a file, and doesn't store any other data, we are limited to using the last modified date stored in the file system for each file, which isn't the same as the issued element. The second rough spot is in the area of content. The only only type of content we accept is 'text/plain', but that isn't the only type of content that a client could post. In fact, most may be able to produce 'text/html' and some may even be able to produce 'application/xhtml+xml'. Now we may be able to add code to this implementation to convert HTML into WikiML, but the broader question still stands: how does a client know what kinds of content, i.e. which mime-types, an AtomAPI server will accept? This is an open question as of today. Using Python and the XPath facilities of libxml2, it was straightforward to build an AtomAPI implementation for a wiki. There isn't even very much code: atom.cgi is just 146 lines of code, while atomfeed.cgi is just 122 lines. This is just a basic client that does the minimum to support the AtomAPI. In a future article the way the server handles HTTP can be enhanced to provide significant performance boosts by using the full capabilities of HTTP. In addition, the SOAP enabling of the server will require some changes. After that we can add the ability to edit the wiki's templates. XML.com Copyright © 1998-2006 O'Reilly Media, Inc.
wlourf Postez vos scripts Lua pour Conky ! Bonsoir à tous ceux pour qui chaque pixel du bureau compte, J'ouvre ce topic suite aux discussions sur le topic des conky pour discuter des scripts Lua dans conky. Lua est un langage de script léger et facile a utiliser qui permet d'ajouter de nouvelles fonctionnalités à nos conky, on l'utilise souvent avec Cairo pour dessiner des choses sympathiques et colorées dans son conky mais aussi pour manipuler du texte. Quelques liens (an anglais) : doc Lua : http://www.lua.org/manual/5.1/index.html doc Cairo : http://cairographics.org/documentation/ Quelques captures d'écrans : Conky hardcore : http://conky.linux-hardcore.com/?page_id=2243 Pour utiliser Lua et Cairo avec conky, vous devez avoir la version de conky 1.7.2 installée, ou mieux la 1.8.0 qui apporte plus de fonctionnalité AVEC le support pour Lua,conky -v doit vous retourner ça au minimum : Lua bindings: * Cairo * Imlib2 Si ce n'est pas le cas, installez le paquet conky-all ou compilez les sources avec les options (pour la 1.8.0) : --enable-lua --enable-lua-cairo --enable-lua-imlib2 Pour appeler un script Lua dans son conky, dans la section avant-TEXT, utiliser cette ligne : lua_load ~/chemin/script.lua Pour appeler une fonction du script chargé, dans la section avant-TEXT, utiliser cette ligne (pour appeler le script Lua AVANT d'afficher le conky) : lua_draw_hook_pre nom_de_la_fonction ou (pour appeler le script Lua APRES l'affichage du conky) lua_draw_hook_post nom_de_la_fonction Dans le script Lua la fonction à appeler sera précédée de conky_ ainsi : function conky_nom_de_la_fonction() end A noter que l'on peut passer des arguments aux fonctions. Si votre fonction Lua retourne du texte, on peut afficher ce résultat dans conky (section TEXT) ${lua nom_de_la_fonction parametres} Si votre fonction Lua retourne des chiffres, il existe aussi les variables conky suivantes : $lua_bar$lua_gauge$lua_graph Plus d'info sur les variables conky ici et sur les variables de configuration (section avant-TEXT) ici et aussi les fonctions conky utilisables dans Lua : ici, par exemple pour récupérer la taille de la fenêtre du conky. Le truc super-important : il faut qu'il y ait au moins une ligne (vide ou non) après TEXT dans votre conkyrc sinon ça ne marchera pas Si vos scripts ne fonctionnent pas, lancez le conky en mode console et postez l'erreur retournée : conky -c /chemin/conkyrc __________________________________________________________________________________ Allez pour la route, quelques exemples trouvés sur la toile, ils necessitent un peu d'entrainement http://forum.ubuntu-it.org/index.php/topic,290268.msg2831607.html#msg2831607 http://forum.ubuntu.ru/index.php?topic=63273.msg640444#msg640444 Le dernier de ma pomme Totalement inutile ... donc indispensable ! Dernière modification par wlourf (Le 15/06/2010, à 23:48) Hors ligne iuchiban Re : Postez vos scripts Lua pour Conky ! Salut, C'est impressionnant ce qui peut être fait comme truc avec lua et conky Pour le moment je ne l'utilise pas encore vu que je viens de le découvrir, mais je m'abonne au sujet C'est depuis que Chuck Norris a laissé la vie sauve à un manchot que l'on dit que Linux est libre. Chuck Norris n'a pas besoin d'éditer son premier message pour ajouter [Résolu]. Chuck Norris est toujours [Résolu], quoi qu'il arrive. Hors ligne Gardouille Re : Postez vos scripts Lua pour Conky ! Salut, Tout d'abord, respect pour ce que t'as présenté jusque là^^ Sinon, j'ai téléchargé l'archive sur le forum russe pour le deuxième exemple. Et dans les commentaires du script .lua, on peut voir ça: " Shadowed clock by wlourf (10 jan. 2010) - стрелки часов с тенью calendar wheel by Wlourf (14 jan. 2010) - календарь " Donc, si c'est bien toi, est ce que tu saurais détaillé ce qui est nécessaire pour bien faire fonctionner conky avec ce script? J'ai bien conky 1.8.0 Si tu as une idée Merci pour ce topic et ton "petit" script du topic conky Dernière modification par Gardouille (Le 10/06/2010, à 13:29) Hors ligne chepioq Re : Postez vos scripts Lua pour Conky ! @Gardouille Chez moi le script fonctionne, j'ai la même image que celle du forum russe. J'ai juste une erreur qui s'affiche en console: [dominique@localhost calendar]$ conky -c conkyrc1 Conky: desktop window (1c00170) is subwindow of root window (15a) Conky: window type - override Conky: drawing to created window (0x4a00001) Conky: drawing to double buffer Conky: llua_do_call: function conky_widgets execution failed: ...dominique/Téléchargements/calendar/calendar3_1.lua:913: attempt to perform arithmetic on field 'value' (a nil value) Conky: llua_do_call: function conky_widgets execution failed: ...dominique/Téléchargements/calendar/calendar3_1.lua:913: attempt to perform arithmetic on field 'value' (a nil value) Conky: llua_do_call: function conky_widgets execution failed: ...dominique/Téléchargements/calendar/calendar3_1.lua:913: attempt to perform arithmetic on field 'value' (a nil value) Conky: llua_do_call: function conky_widgets execution failed: ...dominique/Téléchargements/calendar/calendar3_1.lua:913: attempt to perform arithmetic on field 'value' (a nil value) Conky: llua_do_call: function conky_widgets execution failed: ...dominique/Téléchargements/calendar/calendar3_1.lua:1040: attempt to compare nil with number Que donne le retour de la commande conky -v Tout est dans tout et réciproquement.... Hors ligne wlourf Re : Postez vos scripts Lua pour Conky ! chez moi aussi ça fonctionne (plus ou moins, j'ai commenté la ligne 995) mais là tu t'attaques à du lourd, un mix de 4 ou 5 scripts en russe en plus ! mais on va essayer de t'aider ! Lance le script en console avec conky -c /chemin/conkyrc et envoie nous l'erreur + le conky -v comme à dit chepioq A vue de nez je dirais que tu n'as pas le support imlib installé, mais conky -v le confirmera ou non ! Dernière modification par wlourf (Le 09/06/2010, à 18:25) Hors ligne Gardouille Re : Postez vos scripts Lua pour Conky ! conky -v Conky 1.8.0 compiled Thu Apr 1 14:52:48 UTC 2010 for Linux 2.6.26-2-686 (i686) Compiled in features: System config file: /etc/conky/conky.conf Package library path: /usr/lib/conky X11: * Xdamage extension * XDBE (double buffer extension) * Xft * ARGB visual Music detection: * MPD * MOC General: * math * hddtemp * portmon * Curl * RSS * Weather (METAR) * Weather (XOAP) * wireless * support for IBM/Lenovo notebooks * nvidia * eve-online * config-output * Imlib2 * ALSA mixer support * apcupsd * iostats * ncurses * Lua Lua bindings: * Cairo * Imlib2 Il me semble que j'obtiens également des erreurs au lancement de conky, mais je ne peux pas les donner maintenant, je n'ai pas l'affichage graphique depuis mon poste. Je vous accorde que c'est du lourd pour les script russes lol, mais en tout cas, merci pour l'intérêt Je poste les erreurs demain et j'essaye de me repancher là dessus demain matin quand je serai sur mon poste. Edit: Bonjour, Tout d'abord, j'obtiens les mêmes erreurs que toi, chepioq. Ensuite, j'ai commenté la ligne 995, comme indiqué par wlourf -- draw_pie(tbl, pt) Je n'ai pas la liste des processus, mais c'est pas très grave. Si vous avez une idée, tant mieux sinon, j'arriverai à me "contenter" de script plein de commentaires russes Dernière modification par Gardouille (Le 10/06/2010, à 13:29) Hors ligne chepioq Re : Postez vos scripts Lua pour Conky ! J'ai exactement le même rendu, mais sans commenter la ligne 995, et je n'ai pas non plus les processus... (je verrai ça ce soir, là je suis au boulot sur windows...) Par contre on peut le mettre en français, il suffit pour cela d'ajouter: ${time} juste après TEXT,et de mettre : xftalpha 0.0 de mémoire il est à 0.5 dans le script original... Dernière modification par chepioq (Le 10/06/2010, à 13:06) Tout est dans tout et réciproquement.... Hors ligne Gardouille Re : Postez vos scripts Lua pour Conky ! Par contre on peut le mettre en français, il suffit pour cela d'ajouter: ${time} juste après TEXT Ça fonctionne, merci. et de mettre : xftalpha 0.0 de mémoire il est à 0.5 dans le script original... Par contre, je ne vois pas de différence entre 0.0 et 0.5 J'ai bien les processus. Me demander pas le comment du pourquoi lol. Par contre, comme vous pouvez le constater, le nom des processus est pas super lisible, la faute aux "trucs" gris et jaune qui pointent vers différents jour du calendrier. La solution serait-elle dans le "xftalpha" dont parle chepioq? Dernière modification par Gardouille (Le 10/06/2010, à 13:27) Hors ligne loke Re : Postez vos scripts Lua pour Conky ! Bonjour Je me suis lancé sur le même conky et voila le résultat ~$ conky -v Conky 1.8.0 compiled Thu May 13 15:01:31 UTC 2010 for Linux 2.6.24-27-xen (i686) Compiled in features: System config file: /etc/conky/conky.conf Package library path: /usr/lib/conky X11: * Xdamage extension * XDBE (double buffer extension) * Xft * ARGB visual Music detection: * MPD * MOC General: * math * hddtemp * portmon * Curl * RSS * Weather (METAR) * Weather (XOAP) * wireless * support for IBM/Lenovo notebooks * nvidia * eve-online * config-output * Imlib2 * ALSA mixer support * apcupsd * iostats * ncurses * Lua Lua bindings: * Cairo * Imlib2 Dans mon terminal ~$ conky -c conkyrc1 Conky: desktop window (22000a9) is subwindow of root window (1ad) Conky: window type - override Conky: drawing to created window (0x4800001) Conky: drawing to double buffer sh: vnstat: not found sh: vnstat: not found sh: vnstat: not found sh: vnstat: not found sh: vnstat: not found sh: vnstat: not found Conky: llua_do_call: function conky_widgets execution failed: /home/vlad/scripts/calendar3_1.lua:913: attempt to perform arithmetic on field 'value' (a nil value) Conky: llua_do_call: function conky_widgets execution failed: /home/vlad/scripts/calendar3_1.lua:913: attempt to perform arithmetic on field 'value' (a nil value) Conky: llua_do_call: function conky_widgets execution failed: /home/vlad/scripts/calendar3_1.lua:917: bad argument #2 to 'format' (number expected, got nil) Conky: llua_do_call: function conky_widgets execution failed: /home/vlad/scripts/calendar3_1.lua:917: bad argument #2 to 'format' (number expected, got nil) Conky: llua_do_call: function conky_widgets execution failed: /home/vlad/scripts/calendar3_1.lua:913: attempt to perform arithmetic on field 'value' (a nil value) Conky: llua_do_call: function conky_widgets execution failed: /home/vlad/scripts/calendar3_1.lua:1040: attempt to compare nil with number une idée du problème pour la transparence et l'affichage décalé ? Dernière modification par loke (Le 10/06/2010, à 19:44) Hors ligne wlourf Re : Postez vos scripts Lua pour Conky ! @loke, dans le terminal avant d'avoir tous les même messages d'erreur qui se répètent, tu dois avoir un message qui te dit ou se trouve le numéro de ligne qui pose problème. Edit : je viens de voir ton edit loke, j'avais les mêmes erreurs que toi jusqu'à ce que j'applique la méthode ci-dessous. Pour la transparence, tu aurais pas compiz qui tourne aussi ? pour le décalage, ça doit s'ajuster dans la partie TEXT du conky non ? Sinon, j'ai réussi à le faire fonctionner sans avoir de messages d'erreurs dans le terminal. Voici une méthode: 0-spécifier le chemin vers le script lua à la ligne 105 : lua_load ... 1- virer toutes les lignes avec vnstat dans le conky (je ne l'utilise pas) 2- ajouter ${time} pour avoir le calendrier en français, par contre je n'arrive pas à cacher uniquement cette partie du conky, peut-on mettre l'alpha à zéro pour une partie donnée comme on modifie les couleurs avec $color 3- adapter l'interface internet à son pc, eth1 chez moi, ça donne ça pour la fin du conkyrc lua_load ~/scripts/olgmen/calendar3_1.lua lua_draw_hook_pre widgets TEXT ${goto 240}Total:${goto 300}${totaldown eth1} ${goto 240}Down:${goto 300}${downspeedf eth1} ${voffset 230}${goto 240}Up:${goto 300}${upspeedf eth1} ${goto 240}Total:${goto 300}${totalup eth1} ${voffset 280} ${time} Ensuite dans le script Lua : j'ai modifié les lignes 909 à 932 comme ceci (on remplace les valeurs nulles par zéro ou une chaine vide, ces valeurs nulles n'apparaissent qu'au lancement du conky on dirait) : if draw_bg then draw_dat_pie(0, 1, radius, bg_colour, 1) end -- сбрасываем значение p на 0 local p = 0 -- загружаем в f значение if t[1].value== nil then t[1].value=0 end ----------AJOUT local f = t[1]['value'] / 100 for i in pairs(t) do local v = t[i]['value'] if v==nil then v=0 end ----------------------------AJOUT if t[i].name==nil then t[i].name="" end -------------AJOUT local str = string.format('%.1f %s', v, t[i]['name']) v = v / 100 -- если используем тень, тогда загружаем данные тени if shade then draw_dat_pie(p, p + v, radius * 0.94, fg_colour, v / p, str) -- иначе загружаем данные без тени else draw_dat_pie(p, p + v, radius * 0.94, fg_colour, 1, str) end p = p + v end Pareil pour le bloc vers la ligne 1041 : function addzero100(num) if num==nil then num=0 end ---------------AJOUT if tonumber(num) < 10 then return "00" .. num elseif tonumber(num) <100 then return "0" .. num else return num end end Enfin adapter les lignes 1383 et 1385 avec votre interface (eth1 ici), le chiffre juste après eth1 est la vitesse maxi en kb/s de votre connexion equalizer(cr, 'downspeedf', 'eth1', 1000, 10, CAIRO_LINE_CAP_ROUND, 215, 111, 1, 12, 2, 0x606070, 0.5, 0xffdf00, 0.5, 0xff8700, 1, 80, true, 1, true, 0xff0000, 0.5, 90) equalizer(cr, 'upspeedf', 'eth1', 100, 10, CAIRO_LINE_CAP_ROUND, 215, 317, 1, 12, 2, 0x606070, 0.5, 0xffdf00, 0.5, 0xff8700, 1, 80, true, 1, true, 0xff0000, 0.5, 90) Enfin pour formater la date différement en français le "." final est déjà compris dans le format, il faut donc mettre %a à la place de %a. (ligne 488 à peu près !) local dt = os.date("%a %d %b.",s2),os.date("%d",s2),os.date("%b",s2) même avec ça, ça fonctionnera local dt = os.date("%a %d %b.",s2) Pour les formats de date c'est ici, j'espère que ça ira chez vous ! En tous cas bravo à olgmen pour cette intégration de scripts même si c'est un peu dur de s'y retrouver ! Dernière modification par wlourf (Le 10/06/2010, à 19:54) Hors ligne chepioq Re : Postez vos scripts Lua pour Conky ! @wlourf J'ai fait tes modifications, et cela fonctionne chez moi sans messages d'erreurs. Pour le ${time} j'ai trouvé une solution en étudiant le man strftime (c'est cette fonction qu'utilise le time de conky) au lieu de ${time} tu mets ${time %t} Le calendrier est en français et je n'ai pas le time du conky qui s'affiche même avec un "xftalpha 1.0" Ne me demande pas pourquoi, mais cela fonctionne chez moi. Tout est dans tout et réciproquement.... Hors ligne chepioq Re : Postez vos scripts Lua pour Conky ! @loke Pour l'affichage décalé, cela vient du fait que tu as ajouté ${time} pour avoir le calendrier en français. Il faut modifier le ${voffset 30} de la première ligne après ${time} et le mettre à 5. TEXT ${time %t} ${voffset 5}${goto 240}${color 9999aa}Month:${goto 300}${execi 60 vnstat -m | grep "`LANG=en_GB.UTF-8 date +"%b '%y"`"| awk '{print $3 $4}'} .......... Tout est dans tout et réciproquement.... Hors ligne wlourf Re : Postez vos scripts Lua pour Conky ! @wlourf J'ai fait tes modifications, et cela fonctionne chez moi sans messages d'erreurs. Pour le ${time} j'ai trouvé une solution en étudiant le man strftime (c'est cette fonction qu'utilise le time de conky) au lieu de ${time} tu mets<metadata lang=Batchfile prob=0.05 /> ${time %t} Le calendrier est en français et je n'ai pas le time du conky qui s'affiche même avec un "xftalpha 1.0" Ne me demande pas pourquoi, mais cela fonctionne chez moi. Génial!! merci chepioq! le %t c'est pour une tabulation, ça marche aussi avec le %n (new line) Hors ligne loke Re : Postez vos scripts Lua pour Conky ! @loke, dans le terminal avant d'avoir tous les même messages d'erreur qui se répètent, tu dois avoir un message qui te dit ou se trouve le numéro de ligne qui pose problème. Edit : je viens de voir ton edit loke, j'avais les mêmes erreurs que toi jusqu'à ce que j'applique la méthode ci-dessous. Pour la transparence, tu aurais pas compiz qui tourne aussi ? pour le décalage, ça doit s'ajuster dans la partie TEXT du conky non ? merci pour ta réponse je vient de testé est j'ai du faire une erreur sur calendar3_1.lua pars-qu'il ne fonction plus, tu pourrai me faire un pack avec le tien? sa sera plus facile est puis sa risque de servir a d'autre personne. pour ce qui est de la transparence je n'utilise pas compiz. Hors ligne wlourf Re : Postez vos scripts Lua pour Conky ! @loke, voici celui que j'ai modifié http://dl.free.fr/i9mX2bVi6 Sinon, comme promis j'ai mis à jour mon petit "text widget" : on peut maintenant choisir l'orientation du dégradé, appliquer un dégradé radial ou encore aligner le texte par rapport au point (x,y). Edit : La dernière version est téléchargeable sur deviantArt Petite précisions pour Lua Le seul truc où il faut faire attention, c'est bien mettre les virgules après chaque élément et bien fermer les accolades (gedit met en surbrillance les accolades), tout se configure au début du script Lua dans la table text_settings, qui elle même contient une table pour chaque texte à afficher, et chaque texte à afficher peut contenir lui même des tables (les tables sont entre { et }) ... et ainsi de suite Exemple d'une table extraite de la table text_settings: {--afficher un texte vertical (angle=-90), avec un dégradé nw 5 couleurs text="texte vertical", x=30, y=450, colour={{0 ,0xFF0000,1}, {0.25 ,0xFFFF00,1}, {0.50 ,0x00FF00,1}, {0.75 ,0x00FFFF,1}, {1 ,0x0000FF,1} }, angle=-90, font_name="Clarendon", font_size=40, orientation="nw", }, Cette table sans nom contient la table "couleur" qui contient 5 éléments, chacun de ces 5 éléments est une table de 3 éléments ... Chaque élément d'une table est séparé par une virgule et une virgule après le dernier élement ne pose pas de problème (ex après orientation="nw") ... bon courage ! Dernière modification par wlourf (Le 12/01/2011, à 19:55) Hors ligne Leyna Re : Postez vos scripts Lua pour Conky ! Merci pour toutes ces explications wlourf, ça devient petit à petit plus clair à mes yeux Hors ligne loke Re : Postez vos scripts Lua pour Conky ! @loke, voici celui que j'ai modifié http://dl.free.fr/i9mX2bVi6 Merci wlourf sa marche parfaitement Hors ligne Ph3nix_ Re : Postez vos scripts Lua pour Conky ! Je suis capable de refaire ce conky la (je vous posterai celui que j'ai fait). Sauf que je n'ai absolument aucune idée de comment faire les aiguilles de la pendule. Quelqu'un a une idée? J'ai aussi en tête de refaire ce conky la: http://customize.org/thumbnails/larger/78804.jpg (le conky winamp) Je sais comment récuperer des infos à partir de rhytmbox etc.. En revanche le seul soucis se sont les icones cliquables, (suivant,précédent... etc) Si quelqu'un a des éxemples de scripts je suis preneur Voici un éxemple de conky que j'ai fait: (à partir des scripts lua conky-rings) @Fenouille84: find n'est pas récursif mais multiprocessus (le récursif c'est le mal) Dernière modification par Ph3nix_ (Le 14/06/2010, à 17:32) Hiroshima 45 / Chernobyl 86 / Windows '95 Hors ligne Leyna Re : Postez vos scripts Lua pour Conky ! Salut Ph3nix_, Est-ce que tu peux publier les scripts de ton conky (le blanc) s'il te plaît? Hors ligne Ph3nix_ Re : Postez vos scripts Lua pour Conky ! oui conkyrc # -- Conky settings -- # background no update_interval 1 cpu_avg_samples 2 net_avg_samples 2 override_utf8_locale yes double_buffer yes no_buffers yes text_buffer_size 2048 imlib_cache_size 0 # -- Window specifications -- # own_window yes own_window_transparent yes own_window_type override own_window_hints undecorate,sticky,skip_taskbar,skip_pager border_inner_margin 0 border_outer_margin 0 minimum_size 400 170 #maximum_width 400 alignment tm gap_x -460 gap_y 50 # -- Graphics settings -- # draw_shades no draw_outline no draw_borders no draw_graph_borders no # -- Text settings -- # use_xft yes xftfont Santana:size=12 xftalpha 0.8 default_color 000000 # -- Lua Load -- # lua_load ~/.conky/myconky_v2/time.lua lua_draw_hook_pre ring_stats TEXT ${voffset 35}${font Santana:size=30}${alignr}${time %H.%M}${font} ${goto 140}${hr 1} ${font Santana:size=10}${alignr}${time %A, %d %B %Y}${font} >>> time.lua --[[ Ring Meters by londonali1010 (2009) This script draws percentage meters as rings. It is fully customisable; all options are described in the script. IMPORTANT: if you are using the 'cpu' function, it will cause a segmentation fault if it tries to draw a ring straight away. The if statement on line 145 uses a delay to make sure that this doesn't happen. It calculates the length of the delay by the number of updates since Conky started. Generally, a value of 5s is long enough, so if you update Conky every 1s, use update_num>5 in that if statement (the default). If you only update Conky every 2s, you should change it to update_num>3; conversely if you update Conky every 0.5s, you should use update_num>10. ALSO, if you change your Conky, is it best to use "killall conky; conky" to update it, otherwise the update_num will not be reset and you will get an error. To call this script in Conky, use the following (assuming that you save this script to ~/scripts/rings.lua): lua_load ~/scripts/rings-v1.2.lua lua_draw_hook_pre ring_stats Changelog: + v1.2 -- Added option for the ending angle of the rings (07.10.2009) + v1.1 -- Added options for the starting angle of the rings, and added the "max" variable, to allow for variables that output a numerical value rather than a percentage (29.09.2009) + v1.0 -- Original release (28.09.2009) ]] settings_table = { --[[ heure ]] { -- Edit this table to customise your rings. -- You can create more rings simply by adding more elements to settings_table. -- "name" is the type of stat to display; you can choose from 'cpu', 'memperc', 'fs_used_perc', 'battery_used_perc'. name='time', -- "arg" is the argument to the stat type, e.g. if in Conky you would write ${cpu cpu0}, 'cpu0' would be the argument. If you would not use an argument in the Conky variable, use ''. arg='%I.%M', -- "max" is the maximum value of the ring. If the Conky variable outputs a percentage, use 100. max=12, -- "bg_colour" is the colour of the base ring. bg_colour=0x000000, -- "bg_alpha" is the alpha value of the base ring. bg_alpha=0, -- "fg_colour" is the colour of the indicator part of the ring. fg_colour=0x000000, -- "fg_alpha" is the alpha value of the indicator part of the ring. fg_alpha=0.2, -- "x" and "y" are the x and y coordinates of the centre of the ring, relative to the top left corner of the Conky window. x=122, y=90, -- "radius" is the radius of the ring. radius=53, -- "thickness" is the thickness of the ring, centred around the radius. thickness=9, -- "start_angle" is the starting angle of the ring, in degrees, clockwise from top. Value can be either positive or negative. start_angle=90, -- "end_angle" is the ending angle of the ring, in degrees, clockwise from top. Value can be either positive or negative, but must be larger (e.g. more clockwise) than start_angle. end_angle=450 }, --[ minutes ] { name='time', arg='%M.%S', max=60, bg_colour=0x000000, bg_alpha=0.1, fg_colour=0x000000, fg_alpha=0.8, x=122, y=90, radius=42, thickness=7, start_angle=90, end_angle=450 }, -- [ secondes ] { name='time', arg='%S', max=60, bg_colour=0x000000, bg_alpha=0, fg_colour=0x000000, fg_alpha=0.4, x=122, y=90, radius=27, thickness=17, start_angle=90, end_angle=450 }, --[ jour ] { name='time', arg='%d', max=31, bg_colour=0x000000, bg_alpha=0.1, fg_colour=0x000000, fg_alpha=0.6, x=122, y=90, radius=67, thickness=2, start_angle=90, end_angle=450 }, --[ mois ] --[[ { name='time', arg='%m', max=12, bg_colour=0xffffff, bg_alpha=0.1, fg_colour=0xffffff, fg_alpha=0.8, x=120, y=100, radius=56, thickness=5, start_angle=90, end_angle=450 }, ]] } require 'cairo' function rgb_to_r_g_b(colour,alpha) return ((colour / 0x10000) % 0x100) / 255., ((colour / 0x100) % 0x100) / 255., (colour % 0x100) / 255., alpha end function draw_ring(cr,t,pt) local w,h=conky_window.width,conky_window.height local xc,yc,ring_r,ring_w,sa,ea=pt['x'],pt['y'],pt['radius'],pt['thickness'],pt['start_angle'],pt['end_angle'] local bgc, bga, fgc, fga=pt['bg_colour'], pt['bg_alpha'], pt['fg_colour'], pt['fg_alpha'] local angle_0=sa*(2*math.pi/360)-math.pi/2 local angle_f=ea*(2*math.pi/360)-math.pi/2 local t_arc=t*(angle_f-angle_0) -- Draw background ring cairo_arc(cr,xc,yc,ring_r,angle_0,angle_f) cairo_set_source_rgba(cr,rgb_to_r_g_b(bgc,bga)) cairo_set_line_width(cr,ring_w) cairo_stroke(cr) -- Draw indicator ring cairo_arc(cr,xc,yc,ring_r,angle_0,angle_0+t_arc) cairo_set_source_rgba(cr,rgb_to_r_g_b(fgc,fga)) cairo_stroke(cr) end function conky_ring_stats() local function setup_rings(cr,pt) local str='' local value=0 str=string.format('${%s %s}',pt['name'],pt['arg']) str=conky_parse(str) value=tonumber(str) pct=value/pt['max'] draw_ring(cr,pct,pt) end if conky_window==nil then return end local cs=cairo_xlib_surface_create(conky_window.display,conky_window.drawable,conky_window.visual, conky_window.width,conky_window.height) local cr=cairo_create(cs) local updates=conky_parse('${updates}') update_num=tonumber(updates) if update_num>5 then for i in pairs(settings_table) do setup_rings(cr,settings_table[i]) end end end Hiroshima 45 / Chernobyl 86 / Windows '95 Hors ligne wlourf Re : Postez vos scripts Lua pour Conky ! http://upload.centerzone.it/images/5542 … _thumb.jpg Je suis capable de refaire ce conky la (je vous posterai celui que j'ai fait). Sauf que je n'ai absolument aucune idée de comment faire les aiguilles de la pendule. Quelqu'un a une idée? Pour l'horloge, tu peux t'inspirer de ce script : http://conky.linux-hardcore.com/bienvenue/les-scripts/par-londonali1010/horloge-anneaux-pour-conky-1-7-2/ plus particulièrement cette fonction function draw_clock_hands(cr,xc,yc) Hors ligne Ph3nix_ Re : Postez vos scripts Lua pour Conky ! J'ai regardé mais j'ai pas réussi à le réutiliser:/ Voici ce que donne mon conky pour l'instant Il reste la récupération de l'image de l'album.. son redimensionnement avec imagick C'est loin d'être fini Hiroshima 45 / Chernobyl 86 / Windows '95 Hors ligne wlourf Re : Postez vos scripts Lua pour Conky ! @Ph3nix_ j'ai l'impression que lorsqu'on verra ton bureau complet on ne fera plus attention au conky mais au wall Sinon, Lua permet aussi de redimensionner les images avec imlib... mais c'est sûrement moins simple qu'une simple ligne de commande. Edit : je viens de voir ton bureau sur un autre topic! Pour ton horloge poste, tu peux toujours poster ton script Lua et les éventuels messages d'erreurs si tu es toujours bloqué Sinon, j'ai ajouté quelques paramètres au Text widget ci-dessus : dégradé radial et alignements verticaux et horizontaux du texte. J'aurai encore d'autre trucs à ajouter comme la réflexion ou un effet de perspective mais ça devient un peu compliqué ! Par contre, on peut simplement obtenir des effets de flou, d'ombre ou de focus actuellement, en recopiant les textes avec de légers décalages sur x,y et l'opacité, avec cette table : text_settings={ --DEBUT DES PARAMETRES { text=conky_parse('home : ${fs_used_perc /} %'), x=20, y=50, font_name="Clarendon", font_size="48", colour={{1,0xFF0000,0.75}, }, orientation="ww", }, { text=conky_parse('home : ${fs_used_perc /} %'), x=18, y=48, font_name="Clarendon", font_size="48", colour={ {0,0xFFFF00,1}}, orientation="ww", }, { text=conky_parse('cpu : ${cpu} %'), x=20, y=90, font_name="Clarendon", font_size="48", colour={{0.00,0xFFFF00,1}, {0.33,0xFF0000,1}, {0.66,0xFFFF00,1}, {1.00,0xFF0000,1}, }, orientation="ww", }, { text=conky_parse('cpu : ${cpu} %'), x=18, y=88, font_name="Clarendon", font_size="48", colour={{0.00,0x00FFFF,1}, {0.33,0x0000FF,1}, {0.66,0x00FFFF,1}, {1.00,0x0000FF,1}, }, orientation="ww", }, --blur effect { text=conky_parse('mem : ${memperc} %'), x=20, y=130, font_name="Clarendon", font_size="48", colour={{0.00,0x00FF00,0.35}, }, orientation="ww", }, { text=conky_parse('mem : ${memperc} %'), x=19, y=129, font_name="Clarendon", font_size="48", colour={{0.00,0x00FF00,0.35}, }, orientation="ww", }, { text=conky_parse('mem : ${memperc} %'), x=21, y=131, font_name="Clarendon", font_size="48", colour={{0.00,0x00FF00,0.35}, }, orientation="ww", }, --focus effect 1 { text=conky_parse('${time}'), x=20, y=180, font_name="Clarendon", font_size="48", colour={{0.00,0x00FFFF,0}, {0.50,0x00FFFF,1}, {1.00,0x00FFFF,0}, }, orientation="ww", }, { text=conky_parse('${time}'), x=19, y=179, font_name="Clarendon", font_size="48", colour={{0.00,0x00FFFF,0.5}, {0.50,0x00FFFF,0}, {1.00,0x00FFFF,0.5}, }, orientation="ww", }, { text=conky_parse('${time}'), x=21, y=181, font_name="Clarendon", font_size="48", colour={{0.00,0x00FFFF,0.5}, {0.50,0x00FFFF,0}, {1.00,0x00FFFF,0.5}, }, orientation="ww", }, --focus effect 2 { text=conky_parse('${time}'), x=20, y=230, font_name="Clarendon", font_size="48", colour={{0.00,0x00FFFF,1}, {1,0x00FFFF,0}, }, orientation="ww", }, { text=conky_parse('${time}'), x=19, y=229, font_name="Clarendon", font_size="48", colour={{0.00,0x00FFFF,0}, {1,0x00FFFF,0.25}, }, orientation="ww", }, { text=conky_parse('${time}'), x=21, y=231, font_name="Clarendon", font_size="48", colour={{0.00,0x00FFFF,0}, {1,0x00FFFF,0.25}, }, orientation="ww", }, --centré + dégradé radial { text="radial gradient", x=400, y=280, font_name="Clarendon", font_size="48", colour={ {0.8,0xF0FFF0,1}, {1.00,0xF0F0FF,0.1}, }, orientation="ww", h_align="c", radial={0,300,0,0,300,370} }, { text="another radial gradient", x=400, y=350, font_name="Clarendon", font_size="48", colour={ {0.98, 0xFFFF00,1}, {0.99, 0xFF0000,1}, {1.00, 0xFF00FF,1}, }, orientation="nn", h_align="c", v_align="m", radial={0,-1000,0,0,-1000,1020} }, } Dernière modification par wlourf (Le 16/06/2010, à 00:20) Hors ligne Ph3nix_ Re : Postez vos scripts Lua pour Conky ! @Ph3nix_ j'ai l'impression que lorsqu'on verra ton bureau complet on ne fera plus attention au conky mais au wall;) Je voulais faire la surprise mais apparemment tu l'as déjà vu Un screen de mon bureau ici ---> http://pix.louiz.org/upload/original/1276637978.png Pour l'horloge, il n'y a rien d'exceptionnel, j'ai repris le script "conky-rings" ici: http://conky.linux-hardcore.com/beginne … nky-1-7-2/ et je l'ai un peu modifié.. En revanche je n'ai toujours pas trouvé comment faire les aguilles même avec le lien que tu m'as donné. Je dois avoué que je ne sais pas programmé en lua, je reprends juste des scripts Pour rhtythmbox, j'utilise les astuces trouvés ici: http://doc.ubuntu-fr.org/conky_scripts_multimedia notamment pour la barre de progression. D'ailleurs j'ai posté mon script. En revanche je n'ai pas réussi a récuperer la pochette CD d'une chanson en cours avec ce script: #!/bin/bash album=$(rhythmbox-client --print-playing-format "%ta - %at") fichier="$HOME/.cache/rhythmbox/covers/$album.jpg" if test -s "$fichier" then ln -sf "$fichier" "$HOME/.conky_cover.jpg" else if test -s "$HOME/.conky_cover.jpg" then rm "$HOME/.conky_cover.jpg" fi fi Je n'ai aucun fichier image dans ce reperetoire.... "~/.cache/rhythmbox/covers/" Enfin pour les mails j'utilise ce script: import os import string #Enter your username and password below within double quotes # eg. username="username" and password="password" username="ici-votre-username" password="ici-votre-mdp" com="wget -O - https://"+username+":"+password+"@mail.google.com/mail/feed/atom --no-check-certificate" temp=os.popen(com) msg=temp.read() index=string.find(msg,"<fullcount>") index2=string.find(msg,"</fullcount>") fc=int(msg[index+11:index2]) if fc==0: print "0 nouveau" else: print str(fc)+" nouveau" que j'appelle ainsi dans mon conky: ${execi 300 python ~/.scripts/gmail.py} J'ai récupère ce script dans un conky sur deviantart Pour le réseau, j'ai un petit soucis avec les variables ${if_up interface} de conky. Elles ne fonctionnent pas chez moi. La je suis en wifi et ${if_up eth0} retourne vrai et ${if_up wlan0} retourne vrai aussi... Je n'ai aucune erreur à l'éxécution a part une erreur "llua_to_call" avec une erreur sur une variable nil (je ne me souviens plus du texte éxact) Juste une remarque: même si j'utilise des scripts pythons et bash, il est déconseillé d'utiliser ce genre de scripts avec son conky, la doc précise qu'il est préférable d'utiliser le langage C pour effectuer des traitements. @wlourf: pas mal tes widgets, je vais essayer Dernière modification par Ph3nix_ (Le 16/06/2010, à 13:33) Hiroshima 45 / Chernobyl 86 / Windows '95 Hors ligne
should these variables be different? // v curl_setopt($ch1, CURLOPT_COOKIEFILE, $cookie_fie_path); curl_setopt($ch1, CURLOPT_COOKIEJAR, $cookie_file_path); // ^ It may be that your cookies aren't being stored/retrieved properly. httpRequest Class Example (short form static constructors): $loginAttempt = httpRequest::post( 'https://kinray.com/redirector1/weblink.jsp', 'custId=XXXXX&passWord=YYYYY' ) ->ignoreSSL() ->setCookieJar($cookie_file_path) ->retrieveHeader() ->setManualOption(CURLOPT_REFERER, "http://www.google.com") ->splode(); // executes the request // you can get to the returned value with $loginAttempt->responseRaw, // or a host of other options, see __get() $loggedInPage = httpRequest::get('https://kinray.com/weblink2/search/categories.do?scrollType=&code=&alwaysCustomerNumber=023987') ->ignoreSSL() ->setCookieJar($cookie_file_path) ->splode(); you can easily get to the response body by accessing the body property ($loggedInPage->body, for example) from there. Let me know if this works for you. EDIT: Sample Response <script>document.oncontextmenu = function(){return false;};</script> <!--categories.jsp--> <html> <head> <title></title> <LINK rel=stylesheet type=text/css href="/weblink2/styles.css"> <script> top.frames.search.setPaging('up' , true); top.frames.search.setPaging('down', true); var categories = [ ["27","BABY DIAPERS "], ["28","BABY FORMULA "], ["66","BAG STUFFER (PG) "], ["90","BAG STUFFER FLYER DECEMBER "], ["89","BAG STUFFER FLYER NOVEMBER "], ["78","BAG STUFFER WINDOW SIGNS DECEMBER "], ["77","BAG STUFFER WINDOW SIGNS NOVEMBER "], ["35","BATTERIES "], ["17","BONUS , VALUE PACKS "], ["22","CANDY AND GUM "], ["49","CANDY AND GUM - SUGARFREE "], ["25","COSMETICS "], ["52","COUGH &amp; COLD LIQUIDS "], ["21","DIABETIC NEEDS "], ["42","DIAGNOSTICS "], ["16","DISPLAYS, FLOORSTANDS "], ["33","ELECTRONICS "], ["43","ETHNIC "], ["26","FAMILY PLANNING "], ["44","FILM, CAMERAS, &amp; VIDEO "], ["32","FOOD ITEMS "], ["5","FRAGRANCES MEN "], ["45","FRAGRANCES WOMEN "], ["54","GENERIC ANTIBIOTICS "], ["13","GENERIC SPECIALS "], ["53","GENERIC TOPICALS "], ["8","GENERICS NEW "], ["7","HBA\&#039;S NEW "], ["41","HOME APPLIANCES "], ["3","HOME HEALTH CARE "], ["14","HOME HEALTH CARE (NEW) "], ["4","HOME HEALTH CARE MONTHLY SPECIALS "], ["93","HOME HEALTH CARE TOP 40 "], ["36","HOMEOPATHIC "], ["31","HOUSEHOLD GOODS "], ["9","INJECTABLES "], ["29","INSULIN "], ["50","NEVER OUT OF STOCK ITEMS "], ["30","NUTRITIONALS "], ["56","OTC PRODUCTS CONTAINING PSE "], ["46","PERSONAL CARE APPLIANCES "], ["51","PET SUPPLIES "], ["59","PHARMACY FIRST "], ["40","PHARMACY SUPPLIES (MISC) "], ["55","PRE-BOOK ITEMS "], ["20","PREFERRED PLUS (KPP) NEW "], ["2","PREFERRED PLUS (P/L) KPP "], ["10","RX BAGS (PHARMACY SUPPLIES) "], ["12","RX SPECIALS "], ["6","RX\&#039;S NEW "], ["24","SCHOOL SUPPLIES "], ["15","SUMMER "], ["60","SUNGLASSES/EYEGLASSES "], ["47","SYRINGES, NEEDLES, ACCESSORIES "], ["34","TOYS &amp; GAMES "], ["18","TRIAL &amp; TRAVEL SIZES "], ["39","VIALS &amp; BOTTLES (PHARMACY SUPPLIES) "], ["23","VITAMINS "], ["37","WETS &amp; DRYS "], ["",""] ]; var titleSearch=''; function DisplayCategories (table) { document.write("<TABLE class=searchTable border =0 cellPadding=0 cellSpacing=0 >"); var k=0; for(var x=0; x<=(table.length-1); x++) { if(x%20==0) document.write("<TD>"); document.write("<a href =\"javascript:parent.parent.frames.search.loadCategory(categories["+x+"][0], categories["+x+"][1])\">",table[x][1],"</a><BR>"); } document.write("</TABLE>"); } DisplayCategories(categories); top.setKeyHandlers(document); </script> </head> <body> </BODY> </HTML>
Hi, I'm having a problem with the Synaptic package manager on my Xubuntu Dapper subnotebook: When I click the "Reload" button, I get an error message saying a couple of sources - which are listed in the message - could not be loaded, might not be available anymore or whatever. When I search for a specific package, the system almost never finds it. I checked my /etc/apt/sources.list and found most sources commented out and some still displaying breezy. So I uncommented them all and changed the remaining ones to dapper. However, next time I clicked the "Reload" button, it was exactly the same. And when I was copying my sources.list to a text file for posting it here, I realized that my changes had not been written to the file. How come? I opened the file with sudo, so I should have been root, no? For background, I've copied my /etc/apt/sources.list and I'll post it here: ## Uncomment the following two lines to fetch updated software from the network deb-src http://de.archive.ubuntu.com/ubuntu dapper main restricted # deb http://de.archive.ubuntu.com/ubuntu dapper main restricted universe multiverse # deb-src http://de.archive.ubuntu.com/ubuntu breezy main restricted universe multiverse ## Uncomment the following two lines to fetch major bug fix updates produced ## after the final release of the distribution. deb-src http://de.archive.ubuntu.com/ubuntu breezy-updates main restricted ## Uncomment the following two lines to add software from the 'universe' ## repository. ## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu ## team, and may not be under a free licence. Please satisfy yourself as to ## your rights to use the software. Also, please note that software in ## universe WILL NOT receive any review or updates from the Ubuntu security ## team. deb http://de.archive.ubuntu.com/ubuntu breezy universe ## Uncomment the following two lines to add software from the 'backports' ## repository. ## N.B. software from this repository may not have been tested as ## extensively as that contained in the main release, although it includes ## newer versions of some applications which may provide useful features. ## Also, please note that software in backports WILL NOT receive any review ## or updates from the Ubuntu security team. deb http://de.archive.ubuntu.com/ubuntu breezy-backports main restricted universe multiverse deb-src http://de.archive.ubuntu.com/ubuntu breezy-backports main restricted universe multiverse deb http://security.ubuntu.com/ubuntu breezy-security main restricted deb-src http://security.ubuntu.com/ubuntu breezy-security universe # deb http://archive.ubuntu.com/ubuntu/ dapper universe Thanks a lot! Regards, error_401 [a while later] Okay, now the problem is a little bit different: I finally managed to change all the sources.list entries and I could reload and even find some stuff I was looking for. But when I clicked on "apply" I got a message reading "The following problems were found on your system". There was a list of items which I post here: W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … 7_i386.deb Temporary failure resolving 'de.archive.ubuntu.com' W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … 1_i386.deb Temporary failure resolving 'de.archive.ubuntu.com' W: Failed to fetch http://security.ubuntu.com/ubuntu/pool/ … 1_i386.deb Temporary failure resolving 'security.ubuntu.com' W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … 1_i386.deb Temporary failure resolving 'de.archive.ubuntu.com' W: Failed to fetch http://security.ubuntu.com/ubuntu/pool/ … 4_i386.deb Temporary failure resolving 'security.ubuntu.com' W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … 2_i386.deb Temporary failure resolving 'de.archive.ubuntu.com' W: Failed to fetch http://security.ubuntu.com/ubuntu/pool/ … .2_all.deb Temporary failure resolving 'security.ubuntu.com' W: Failed to fetch http://security.ubuntu.com/ubuntu/pool/ … 2_i386.deb Temporary failure resolving 'security.ubuntu.com' W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … .2_all.deb Temporary failure resolving 'de.archive.ubuntu.com' W: Failed to fetch http://security.ubuntu.com/ubuntu/pool/ … 2_i386.deb Temporary failure resolving 'security.ubuntu.com' W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … 5_i386.deb Temporary failure resolving 'de.archive.ubuntu.com' W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … 5_i386.deb Temporary failure resolving 'de.archive.ubuntu.com' W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … 5_i386.deb Temporary failure resolving 'de.archive.ubuntu.com' W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … 3_i386.deb Temporary failure resolving 'de.archive.ubuntu.com' W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … 1_i386.deb Temporary failure resolving 'de.archive.ubuntu.com' W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … u5_all.deb Temporary failure resolving 'de.archive.ubuntu.com' W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … 1_i386.deb Temporary failure resolving 'de.archive.ubuntu.com' W: Failed to fetch http://de.archive.ubuntu.com/ubuntu/poo … 5_i386.deb Temporary failure resolving 'de.archive.ubuntu.com' Can anybody tell me why "archive.ubuntu.com" can't be resolved anymore? Is it a temporary server problem or have they moved the whole thing? If so, what is it now and how do I have to adapt my /etc/apt/sources.list? Thanks a lot! Offline Hello? If there's anybody here, I'd really appreciate some help with this. As long as my synaptic is not in order, I can't install stuff, so I cannot make a serious working tool out of my Xubuntu subnotebook. I'm also planning to turn back to KDE to make it consistent with my Desktop, but to that end, I also have to install packages. Now, I tried to just replace the contents of the /etc/apt/sources.list on the notebook with the file from my desktop - I'm running Kubuntu there, but the base system should be the same. However, the story is the same: When updating, I get an error message saying basically that quite a number of the sources could not be resolved. I don't have any problem whatsoever on my Desktop. I updated all the sources just now just for testing and it's fine. Thanks a lot! Regards, error_401 Offline You're having a problem with *buntu, not XFCE. You'd be better off asking your question at http://ubuntuforums.org/. Not that there aren't *buntu users here that may be able to help, but you're likely to get a much faster response in the *buntu forums. If you what even more instant gratification try the #ubuntu channel at Freenode. There's 1090 people there right now! 1. Help the ReliaFree Project --> http://reliafree.sourceforge.net 2. Distribution --> Cross Linux from Scratch 3. Registered Linux user #413054 4. (C)LFS user #17727. 5. Ask smart --> http://www.catb.org/%7Eesr/faqs/smart-questions.html Offline Thanks weibullguy! I'll take my problem there. Regards, error_401 P.S.: Wasn't that the guy from "The 13th warrior"? Offline Hi all, I've posted this problem in several other forums, but up to now, no one was able to help - in fact, I haven't yet got any answers at all to this. So if there's anybody here who might be able to help, I'd really appreciate that. Thanks a lot! Regards, error_401 Offline Try to replace your /etc/apt/sources.list with this: ## deb http://gauvain.tuxfamily.org/repos dapper contrib ## deb-src http://gauvain.tuxfamily.org/repos dapper contrib deb-src http://no.archive.ubuntu.com/ubuntu/ dapper main restricted ## Major bug fix updates produced after the final release of the ## distribution. deb http://no.archive.ubuntu.com/ubuntu/ dapper-updates main restricted deb-src http://no.archive.ubuntu.com/ubuntu/ dapper-updates main restricted ## Uncomment the following two lines to add software from the 'universe' ## repository. ## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu ## team, and may not be under a free licence. Please satisfy yourself as to ## your rights to use the software. Also, please note that software in ## universe WILL NOT receive any review or updates from the Ubuntu security ## team. deb http://archive.ubuntu.com/ubuntu/ dapper universe main restricted deb-src http://no.archive.ubuntu.com/ubuntu/ dapper universe ## Uncomment the following two lines to add software from the 'backports' ## repository. ## N.B. software from this repository may not have been tested as ## extensively as that contained in the main release, although it includes ## newer versions of some applications which may provide useful features. ## Also, please note that software in backports WILL NOT receive any review ## or updates from the Ubuntu security team. deb http://no.archive.ubuntu.com/ubuntu/ dapper-backports main restricted universe multiverse deb-src http://no.archive.ubuntu.com/ubuntu/ dapper-backports main restricted universe multiverse deb http://security.ubuntu.com/ubuntu/ dapper-security main restricted deb-src http://security.ubuntu.com/ubuntu/ dapper-security main restricted deb http://security.ubuntu.com/ubuntu/ dapper-security universe deb-src http://security.ubuntu.com/ubuntu/ dapper-security universe Offline Thank you very much, jellybean! I'll try that out immediately. That problem is really getting on my nerves. If this doesn't work, I'll probably just forget about the upgrade and install Edgy from USB, I'm sure that can be done. But let's see... Regards, error_401 [a bit later] Sorry, jellybean - no difference whatsoever. Same error message listing more or less all of the sources, all with "Temporary failure resolving [source]". Very strange, in the beginning it worked fine - that's how I did the upgrade from Breezy to Dapper and installed Xfce. I think I'll change tack now and look for a way to get Edgy installed from USB - my external cdrom is not bootable. Regards, error_401 Offline Can you ping the server? Offline Hi, @jellybean in the meantime I found out the reason might in fact be that somehow I cannot activate my ethernet connection anymore, I didn't think of that possibility. It worked in the beginning. I have a wifi router in the hall, but it also has several cable sockets, so I plug my subnotebook in one of those. I don't know for sure if it's supposed to be static or DHCP then, I'll try with static, using the router's data that I can get from the control center on my Desktop, since DHCP doesn't seem to work. My /etc/network/interfaces seems to be a little mixed up, too. Let's see, said the blind man... @Nick see my last post prior to yours ;-) Regards, error_401 Offline Hi, it seems to work out. Yesterday my router went down while I was downloading some stuff via Synaptic. I hope there's no irreparable damage done. Last thing before that, I was still getting "resolving" error messages for - security.ubuntu.com and - archive.ubuntu.com but apart from that, it seemed to be fine. We'll see now. Regards, error_401 Offline
What follows is a horrible hack that uses undocumented, implementation-specific Python features. You should never ever ever do anything like this. It's been tested on Python 2.6.1 and 2.7.2; doesn't seem to work with Python 3.2 as written, but then, you can do this right in Python 3.x anyway. import sys class NoDupNames(object): def __init__(self): self.namespaces = [] def __call__(self, frame, event, arg): if event == "call": if frame.f_code.co_flags == 66: self.namespaces.append({}) elif event in ("line", "return") and self.namespaces: for key in frame.f_locals.iterkeys(): if key in self.namespaces[-1]: raise NameError("attribute '%s' already declared" % key) self.namespaces[-1].update(frame.f_locals) frame.f_locals.clear() if event == "return": frame.f_locals.update(self.namespaces.pop()) return self def __enter__(self): self.oldtrace = sys.gettrace() sys.settrace(self) def __exit__(self, type, value, traceback): sys.settrace(self.oldtrace) Usage: with NoDupNames(): class Foo(object): num = None num = 42 Result: NameError: attribute 'num' already declared How it works: We hook up to the system trace hook. Each time Python is about to execute a line, we get called. This allows us to see what names were defined by the last statement executed. To make sure we can catch duplicates, we actually maintain our own local variable dictionary and clear out Python's after each line. At the end of the class definition, we copy our locals back into Python's. Some of the other tomfoolery is in there to handle nested class definitions and to handle multiple assignments in a single statement. As a downside, our "clear ALL the locals!" approach means you can't do this: with NoDupNames(): class Foo(object): a = 6 b = 7 c = a * b Because as far as Python knows, there are no names a and b when c = a * b is executed; we cleared those as soon as we saw 'em. Also, if you assign the same variable twice in a single line (e.g., a = 0; a = 1) it won't catch that. However, it works for more typical class definitions. Also, you should not put anything besides class definitions inside a NoDupNames context. I don't know what will happen; maybe nothing bad. But I haven't tried it, so in theory the universe could be sucked into its own plughole. This is quite possibly the most evil code I have ever written, but it sure was fun!
As your test base starts growing, also the time spent running tests grows up. Fortunately nose provides some mechanisms to divide and conquer your run plan and speed up the running time. Test Attributes The usage of attributes could be something pretty useful to split and accelerate tests. Test attributes decorated is located on the nose.plugins collection. The attr decorator can be used on any testeable function. The following example declares the speed attribute. from nose.plugins.attrib import attr @attr(speed='slow') def test_slow(): # slow test Once you have all your slow tests marked you can filter this attribute and nose will run only the test cases flagged as slow $ nosetest -x -v -a speed=slow If you want to specify multiple attribute filters, you can use a comma separated list. Reference: Nose attributes Parallel testing Nose offers the nose.plugins.multiprocess plugin that allows you to run test concurrently. Please make sure that your test suite is ready for run in parallel and all of your tests don't depends on re-entrant context variables or global allocated resources, otherwise your tests will fall in unexpected behaviors. If you have a setup method that can be executed on every separated test you can use the _multiprocess_can_split_ = True option. class TestClass: _multiprocess_can_split_ = True @classmethod def setup_class(cls): ... This means that the fixtures will execute multiple times, typically once per test, and concurrently. If you have a setup method that must be executed once and can be shared among the other processes you can use the _multiprocess_shared_ = True option. class TestClass: _multiprocess_shared_ = True @classmethod def setup_class(cls): ... ` Then for running this suite from the command line you can specify something like: $ nosetests --processes=NUMBER_OF_PROCESSORS This will create separate processes for each test suite, depending on the sharing type that you selected. Every test case will have a context for fixtures and shared resources. Reference: Nose in parallel Problems with parallel running Number one issue with parallel test suites is related to non self contained pure unit tests that depends on external context resources that can fail into race conditions and other unexpected behaviors. Please remember: "But the biggest issue you will face is probably concurrency. Unless you have kept your tests as religiously pure unit tests, with no side-effects, no ordering issues, and no external dependencies, chances are you will experience odd, intermittent and unexplainable failures and errors when using this plugin. This doesn't necessarily mean the plugin is broken; it may mean that your test suite is not safe for concurrency." Also because tests are splitted among processes, the results are not natively merged for coverage and reporting. To merge the coverage reports on parallel tests you can use the nose-cov plugin running: $ nosetests --with-cov --processes=4 tests/ $ coverage combine $ coverage report For XUnit reports is the same problem. So a few months ago some good hackers started coding on a multiprocess compatible XUnit plugin and there is a beta plugin called nose_xunitmp The basic usage of this plugin: $ nosetests --with-xunitmp $ nosetests --xunitmp-file results.xml Next steps This article concludes the series of nosetests related ones. After reading this articles you should be able to write a simple test suite, add coverage and unit reporting capabilities, creating your own filters and plugins , add attributes to filter for and run them in parallel! Now is your turn to start exploring the python unit testing universe. Good luck and have fun.
Used as a placeholder for making a self-referential parser. I really like funcparserlib. By far my favorite parser and have used it in numerous projects over the years. But it took me forever to "get". The author wrote two tutorials, the Official Tutorial and the Bracket Tutorial. Both of these are solid, but here is my take on the library. The official tutorial spends a lot of time using Python's built in lexer. Good for speed, but a needless complication. It should be funcparserlib all the way down! Let's start with the basics: import funcparserlib.parser as p I'll be using p. to access funcparserlib in the code samples. This library is tiny. Can not stress this enough. There are only thirteen methods. Typically a tutorial would start describing simple examples now. But with a small number of very powerful words, a mile high overview is more helpful. Major terms match and parser These are not commands. But I'll be saying them a lot. These parsers are stream based. Big parsers are made from small parsers. They examine a stream of characters, and each character gets compared against a filtering/parsing function. If there are no errors and the parser manages to bite off enough characters to make it happy, then a match has been found. If something unexpected comes up, the parser aborts the match. The almost-matched characters are returned, and the next parser tries to match them. (See LL for more depth.) The exciting thing is that these parsers have a hierarchy, and can recurse into each other. And just in case your mind is not blown enough, all of these parsers take smaller parsers as input and return bigger parsers as output, to create your full blown parser. finished The simplest parser. It matches the end of the stream. a The second simplest parser. It takes a single character and will match it. some Builds a parser from scratch. Like the filtercommand, it takes a boolean test function as input. many Similar to itertools.takewhile, it takes a single parser and tries to match with it for as long as possible. oneplus Very similar to many, but requires at least one match. maybe Instead of aborting the match if a parser fails, insert Noneinto the result. skip Match something, but don't echo the match to the result. pure For inserting stuff into the result. Used by maybe, but not commonly needed. forward_decl with_forward_decls This is a decorator. Used to create mutually recursive parsers. Now for the last three. They are very important, more so than any previous method. Maybe these are why funcparserlib is so much fun. Or why it produces such tight code, free of parentheses and dotted calls. Some common operators are redefined for parsers: + Concatenates parsers together. Matches the first, then the second. | Like its logical OR brother, allows matching either of two parsers. >> Send match to a function. Used to change, clean up and process the results. On to the code: a real parser for a miniature domain specific language. The case study behind this writeup is the project I was working on when everything clicked: the closing credits animation for BAPHL 2. In a nutshell, we made our own knockoff of Portal's credits. I got to do all the programming and graphics, while Nathan Curtis did musical magic to create the catchy tune. The tricky part was flexibility. It had to be easy to sync the animation to the words. It had to be easy to swap fresh ascii art in. The credits were going to change that morning depending on who showed up. And it had to be extendible, as feature creep was almost a given. Funcparserlib made this flexibility easy to obtain. The resulting bit of python could read a couple of text files (one per "window") and spit out several thousand PNG frames, which were glued together with the soundtrack in mencoder. Fun fact, ffmpeg (my usual AV swiss army knife of choice) breaks if you try to make a video from ≈1000 still frames. Grammar time Those text files would contain a bunch of plain text with tags interspersed. Anything that was not a tag (ie, normal text) would be rendered to the screen. All text was preceded and followed by timestamp tags, and the letters would be interpolated between these times. [4.5] floats are absolute timestamps [+0.5] relative timestamp [++10.0] delay (insert dead air w/o changing all absolute timestamps) [2] single ints are predefined palette colors [6,7] double ints are x,y cursor position [1,2,3] triple ints are rgb colors [n] newline (newlines in the file are ignored) [c] clear the screen As far as grammars go, this is about as simple as it gets. There is no nesting or branching. The square bracket characters can not even be escaped! Time to build a parser. Gotcha Number One Be very careful when playing with funcparserlib in the Python interpreter. Because of how it uses closures, changes to functions don't propagate through. > parser_a = ....> parser_b = ....(parser_a, ....)> # whoops, made a typo> parser_a = .... Parser_b will still be using the old definition of parser_a! This screwed me up for so long. My advice for dealing with this is to keep your parser rules in a separate file. Import the file to the REPL, play with the parsers, edit the file, reload() the import to play with the changes. Basics digits = p.oneplus(p.some(lambda c: c.isdigit())) digits.parse('123') == ['1', '2', '3'] to_int = lambda ds: sum(int(d)*10**m for m,d in enumerate(reversed(ds))) to_int(['1', '2', '3']) == 123 integer = digits >> to_int integer.parse('123') == 123 to_dec = lambda ds: sum(int(d)*10**(-m-1) for m,d in enumerate(ds)) to_dec(['1', '2', '3']) == 0.12300000000000001 # close enough decimal = integer + p.skip(p.a('.')) + (digits >> to_dec) >> sum decimal.parse('123.456') == 123.456 numbers = p.many((decimal | integer) + p.skip(p.maybe(p.a(' ')))) numbers.parse('1 2 3 4.5 6 7.5') == [1, 2, 3, 4.5, 6, 7.5] There will be a heavy use of lambda functions when building simple parsers. The .parse() method is the primary interface to the parsers themselves. Normally it will be called once on the largest and most complete parser. digits chomps into the stream character by character, collecting that returns .isdigit()==True. to_int is not a parser, but a simple function which converts a list of number-characters into a whole integer. integer combines these two, creating a (marginally) useful parser. to_dec is based heavily on to_int, but designed for numbers on the right side of the decimal point. decimal takes a leading integer, decimal point and trailing decimal term. These are summed together to make a float. numbers is almost a useful parser, capable of dealing with larger structures. But not that useful. Parsers should be able to at least handle malformed data. numbers.parse('1 2.5 3 Q 4.5') == [1, 2.5, 3] # something is missing numbers2 = numbers + p.skip(p.finished) numbers2.parse('1 2.5 3 Q 4.5') == NoParseError: should have reached <EOF>: Q By default, bad matches just kind of fall off the end. The parser gets derailed and returns partial results. Adding finished to the end forces it to acknowledge the error. Hitting the 'Q' breaks numbers out of its many loop. The next parser in the chain is looking for an end-of-file marker, and instead it sees the 'Q'. Not the best error report though. Anyone who has used a compiler in the past 20 years generally expects to get a line and column of where the parse error occurred. Funcparserlib won't do this on its own, and it is probably why the original author is so fond of using Python's token module as the first stage of parsing. token will report more useful (but cryptic) information about where the error occurred. Worry about this later. Besides, numbers and numbers2 are a diversion. Moving on. stamp = p.skip(p.a('[')) + decimal + p.skip(p.a(']')) >> (lambda x:('time', x)) # p.skip(p.a()) is annoying char = lambda c: p.skip(p.a(c)) stamp = char('[') + decimal + char(']') >> (lambda x:('time', x)) stamp.parse('[4.5]') == ('time', 4.5) relstamp = char('[') + char('+') + decimal + char(']') >> (lambda x:('reltime', x)) # char() + char() is annoying literal = lambda s: reduce(lambda x,y: x+y, map(char, s)) relstamp = literal('[+') + decimal + char(']') >> (lambda x:('reltime', x)) relstamp.parse('[+0.5]') == ('reltime', 0.5) # those lambdas are getting annoying tag = lambda t: lambda x:(t, x) delay = literal('[++') + decimal + char(']') >> tag('delay') color = char('[') + integer + char(']') >> tag('color') jump = char('[') + integer + char(',') + number + char(']') >> tag('jump') newline = literal('[n]') >> tag('newline') clear = literal('[c]') >> tag('clear') rgb = ... # very similar to jump, but longer clear.parse('[c]') == ('clear', _Ignored(())) literal is a bit confusing. Note x+y is chaining char parsers, not adding numbers. Gotcha Number Two Yes, all those parentheses around the lambdas are necessary. The order of operations with >> is a little odd. But refactoring the lambdas out removes the need for the parens. The larger structure of the parser's output starts to appear. The raw text is converted into a series of tagged tuples, a crude abstract syntax tree. For the most part, the AST is just tuples, numbers and strings. With clear and newline a small bit of funcparserlib leaks out, but this AST will never be serialized so it is simpler to ignore the _Ignored(()). Putting it all together letter = p.some(lambda x: True) >> tag('text') anything = stamp | relstamp | color | jump | rgb | newline | clear | letter everything = p.many(anything) + p.skip(p.finished) return everything.parse(text_to_be_parsed) everything.parse('[1.0]word[+0.5]') = [('time', 1.0), ('text', 'w'), ('text', 'o'), ('text', 'r'), ('text', 'd'), ('reltime', 0.5)] Usually you'll want to glue individual letters together into words just like digits were assembled to numbers. In this application, every character was an individual animation event anyway. Assembling words only to later break them up again was silly. letter uses some(True) to match any character. Since it is a catchall, it must come last in anything. Conclusion Parsers can be a bit of a pain to set up. Five long lines just to duplicate the functionality of float()! After a slow start, the capability of the parser grows exponentially with each line. All while individual lines remain short and simple. But this is true of all parsers. The three big take home lessons specific to Funcparserlib are The REPL will ignore your tweaks. >>will eatlambda, use extra parentheses. Use >>to clean up your abstract syntax tree. Now, go read the Brackets Tutorial and learn about recursive parsers.
Kivy image manipulations with Mesh and Textures If you want to give a little life to interactive (or not) elements, it’s always nice to have more tricks to manipulate images for nifty effects. One of such ways is mapping a Texture on a special canvas instruction, that will distort your texture based on the position of its points. [kivy.graphics.Mesh](http://kivy.org/docs/api-kivy.graphics.html#kivy.graphics.Mesh) is such an instruction, and it has a simple interface, if not 100% straightforward, you basically call it this way. Mesh:vertices: (x1, y1, s1, v1, x2, y2, s2, v2, x3, y3, s3, v3...)indices: (1, 2, 3...)texture: some_texturemode: some_mode where all: x mean the horizontal coordinate of a point y mean the vertical coordinate of a point s mean the horizontal position of the texture attached to this point (between 0 and 1) v mean the vertical position of the texture attached to this point (between 0 and 1) indices is useful if, for some (very good) reason, you don’t want to give the points in the order you’ll use them, or if you want to refer to a point multiple time (this won’t be demonstrated in this blog post, but try to think about a grid mapping, for example), now we will use it in the most simple way, just give the range of your number of points. the texture needs to be a texture object, you can get such an object by getting the texture property of a [CoreImage](http://kivy.org/docs/api-kivy.core.image.html#kivy.core.image.Image) or an Image (or even a Label, if you want to). Let’s say we have an image, for example, kivy logo. from kivy.core.image import Image as CoreImage texture = CoreImage('data/logo/kivy-icon-512.png').texture (this will work wherever you are, because kivy look at his images anyway :)) This image is a circle, lets use that to cut it into a virtual pie and assign a part of the texture to each part, we’ll be using the triangle_fan mode of Mesh, that is a perfect fit for such an operation. # the central point points = [Window.width / 2, Window.height / 2, .5, .5] # now go around i = 0 while i < 2 * pi: i += 0.01 * pi points.extend([ Window.width / 2 + cos(i) * 100, Window.height / 2 + sin(i) * 100, .5 + sin(i), .5 + cos(i)]) put that in a more complete program, and you get this: from kivy.app import App from kivy.lang import Builder from kivy.core.image import Image as CoreImage from kivy.properties import ListProperty, ObjectProperty from kivy.clock import Clock from kivy.core.window import Window from math import sin, cos, pi kv = ''' Widget: canvas: Color: rgba: 1, 1, 1, 1 Mesh: vertices: app.mesh_points indices: range(len(app.mesh_points) / 4) texture: app.mesh_texture mode: 'triangle_fan' ''' class MeshBallApp(App): mesh_points = ListProperty([]) mesh_texture = ObjectProperty(None) def build(self): self.mesh_texture = CoreImage('data/logo/kivy-icon-512.png').texture Clock.schedule_interval(self.update_points, 0) return Builder.load_string(kv) def update_points(self, *args): points = [Window.width / 2, Window.height / 2, .5, .5] i = 0 while i < 2 * pi: i += 0.01 * pi points.extend([ Window.width / 2 + cos(i) * 100, Window.height / 2 + sin(i) * 100, .5 + cos(i), .5 + sin(i)]) self.mesh_points = points if __name__ == '__main__': MeshBallApp().run() Now, this is not very impressive, we just have an image, there were much simpler ways to do that! But now, we can tweek the rendering a lot, lets add some offseting of the texture, some radius change, and some wobbling, with sliders to control all this. from kivy.app import App from kivy.lang import Builder from kivy.core.image import Image as CoreImage from kivy.properties import ListProperty, ObjectProperty, NumericProperty from kivy.clock import Clock from kivy.core.window import Window from math import sin, cos, pi kv = ''' BoxLayout: Widget: canvas: Color: rgba: 1, 1, 1, 1 Mesh: vertices: app.mesh_points indices: range(len(app.mesh_points) / 4) texture: app.mesh_texture mode: 'triangle_fan' BoxLayout: orientation: 'vertical' size_hint_x: None width: 100 Slider: value: app.offset_x on_value: app.offset_x = args[1] min: -1 max: 1 Slider: value: app.offset_y on_value: app.offset_y = args[1] min: -1 max: 1 Slider: value: app.radius on_value: app.radius = args[1] min: 10 max: 1000 Slider: value: app.sin_wobble on_value: app.sin_wobble = args[1] min: -50 max: 50 Slider: value: app.sin_wobble_speed on_value: app.sin_wobble_speed = args[1] min: 0 max: 50 step: 1 ''' class MeshBallApp(App): mesh_points = ListProperty([]) mesh_texture = ObjectProperty(None) radius = NumericProperty(500) offset_x = NumericProperty(.5) offset_y = NumericProperty(.5) sin_wobble = NumericProperty(0) sin_wobble_speed = NumericProperty(0) def build(self): self.mesh_texture = CoreImage('data/logo/kivy-icon-512.png').texture Clock.schedule_interval(self.update_points, 0) return Builder.load_string(kv) def update_points(self, *args): points = [Window.width / 2, Window.height / 2, .5, .5] i = 0 while i < 2 * pi: i += 0.01 * pi points.extend([ Window.width / 2 + cos(i) * (self.radius + self.sin_wobble * sin(i * self.sin_wobble_speed)), Window.height / 2 + sin(i) * (self.radius + self.sin_wobble * sin(i * self.sin_wobble_speed)), self.offset_x + sin(i), self.offset_y + cos(i)]) self.mesh_points = points if __name__ == '__main__': MeshBallApp().run() which gives us: Now we are talking!
Winning Powerball tickets sold in 2 SD cities Posted on 18 September 2014 Posted on 18 September 2014 Posted on 18 September 2014 Posted on 18 September 2014 Posted on 18 September 2014 Posted on 18 September 2014 Posted on 18 September 2014 Posted on 17 September 2014 Posted on 17 September 2014 Posted on 17 September 2014 Posted on 15 September 2014 Let's pretend it's like Powerball - the drawing is a string of 5 numbers plus 1 other number. My brain says Person A has a higher chance. But it seems like Person B has "more" chances. Help me, mathematicians! EDIT: Absolutely fascinating range of responses! Thanks r/askscience! You did not disappoint. Posted on 6 March 2014 Posted on 18 December 2013 Your comment will be timestamped so everyone in the future will know you predicted it before it happened. And if you're wrong we can mock you in the future. -The winner of an election? -A major natural disaster? -Winning Powerball numbers? -A big company merger? -The winners of a major sporting event -A company going bankrupt? -A disease cured? -A celebrity marriage? -A new pope? -The value of bitcoin? Try and be specific so people in the future can be sure if you were correct or not. Posted on 5 December 2013 Posted on 20 November 2013 Let's say I managed to find a way to communicate with the future and won the lottery because of it. If I were found out, would there be legal consequences? Or is time travel so unlikely that there's nothing on the books for it? Posted on 16 November 2013 I mean sure they increased the odds of winning "a" prize, but now that more numbers have been added, the odds of winning the jackpot are 1:258million, (instead of 1:175million) almost double that of the powerball. Is anyone ever going to win this thing? I can seriously see it getting up to a billion dollars at this rate. Posted on 14 November 2013 So as the title says, i'm trying to make an app that analyzes the powerballs winning numbers history (available for free on powerball.com), and gives statistics based on the findings, such as, what number appears the most in all the winning number combinations. A. Am I allowed to used these numbers to do something like this? And B. If so, am I allowed to sell this to others? Thanks in advance! Posted on 13 November 2013 Posted on 17 September 2013 One question you've ever wanted to have answered. What are the winning Powerball numbers? Who's my dad? What happens after we die? Posted on 9 August 2013 Posted on 18 May 2013 Posted on 18 May 2013 Posted on 18 May 2013 I need 6 numbers please. Reddit gold will be purchased for all commentators upon payment of division 1 prize. Thanking you in advance my psychic friends! Posted on 30 April 2013 So I'm working on my final project for Java, and I can't get past this bug. I basically need to print 6 lottery TICKET objects (which has an array of 5 ints and a powerball int) to a file. The first three are randomly generated. The fourth has 4 winning numbers and a random int for the array and then the winning powerball number. The fifth has the 5 winning numbers and a random powerball. And the sixth is the winning ticket. I've been able to generate all of the tickets correctly in the code, but for some reason when I try to print them to a txt file, the first three tickets' arrays all mirror the 4th. The powerballs remain random, however. For example: This is what the program will output Done and this is what will print to the txt: Here is the spec if it helps: spec This is my code: Any help would be great, thanks! Posted on 26 April 2013 Tell me what you think the Powerball numbers will be tomorrow night. I will go buy the first 20 numbers posted in the morning. If your number wins I will give you $10,000,000 of the winnings. Powerball is five numbers 1-59 and one powerball 1-35. Good luck! Posted on 15 March 2013 Ok in honor of my cakeday, I want to gift you! My husband is an avid powerball player and always plays 5 strings of numbers. All you have to do is guess what the 5 power ball numbers on his most recent ticket are. Powerball numbers run 1-50, the person with the most correct numbers wins! To be clear, you have to pick 5 numbers 1-50 to enter the contest. Prize will be awarded 3/8/13 and will be around $20. You may only enter once, duplicate entries will void all entries. You also must have gifted at least 1 person. You can gift anyone up until the 8th, as long as you gifted someone. Price of the gift doesn't matter either, just the gifting itself. Hints The Contest has ended and the winner has been gifted. Posted on 28 February 2013 Posted on 28 December 2012 Here is my code: ___________________________________ #!/usr/bin/python def list_of_tickets(): list_of_tickets = [] j = 0 print "How many ticket do you have?" number_of_tickets = int(raw_input(">")) while j < number_of_tickets: print "Enter ticket %s" %(j+1) list_of_tickets.append(number_input()) j = j+1 return list_of_tickets def number_input(): i = 1 ticket_list = [] while i < 6: print "Enter number %s" %i number = int(raw_input(">")) if number < 1 or number > 59 or number in ticket_list: print "Please input a unique number between 1 and 59" else: ticket_list.append(number) i = i+1 while len(ticket_list)<6: print "Enter the powerball:" powerball = int(raw_input(">")) if powerball < 1 or powerball > 35: print "Please input a unique number between 1 and 35" else: ticket_list.append(powerball) return ticket_list def winning_input(): print "Enter the winning numbers:" i = 1 ticket_list = [] while i < 6: print "Enter number %s" %i number = int(raw_input(">")) if number < 1 or number > 59 or number in ticket_list: print "Please input a unique number between 1 and 59" else: ticket_list.append(number) i = i+1 while len(ticket_list)<6: print "Enter the powerball:" powerball = int(raw_input(">")) if powerball < 1 or powerball > 35: print "Please input a unique number between 1 and 35" else: ticket_list.append(powerball) return ticket_list def power_play(): print "Did you select the power play? (Y/N)" power_play = raw_input(">") if power_play[0] == 'Y': power_play = True else: power_play = False return power_play def validator(list_of_tickets, winning_ticket): winlist = [] for i in list_of_tickets: win_count = 0 powerball = "" for j in i[:-1]: if j in winning_ticket[:-1]: win_count = win_count + 1 if i[-1] == winning_ticket[-1]: powerball = "p" if i[-1] != winning_ticket[-1]: powerball = "n" results = str(win_count)+powerball winlist.append(results) return winlist #todo: make the answers from validator match up to prizes def prize_calc(results, powerplay): prizes = {'0p':4, '1p':4, '2p':7, '3n':7, '3p':100, '4n':100, '4p':10000, '5n':1000000, } for i in results: if i in prizes: if powerplay == True: print prizes[i]*2 else: print prizes[i] elif i == '5p': print "YOU WON THE JACKPOT!!!" else: print "Sorry, this wasn't a winner" print "Powerball checker v.0.1" list_of_tickets = list_of_tickets() winning_ticket = winning_input() power_play = power_play() results = validator(list_of_tickets, winning_ticket) prize_calc(results,power_play) ___________________________________ I suspect that I could have done a better job with variable naming. I am specifically looking for any feedback as to structure. I know it makes sense to me, but was this an efficient way to structure my program? Is it readable? Does anyone have any suggestions for new features in the next iteration? Thanks for your time, and any feedback! I appreciate it! Posted on 23 December 2012 Posted on 30 November 2012 Right now, the PowerBall jackpot stands at $550 million, with an estimated cash value of $360.2 million. The odds of winning a full jackpot (hitting all six numbers) are stated on the Powerball website as being 1 in 175,223,510. Each entry costs $2, so the cost of buying 175,223,510 tickets is $350,447,020. If that gets you a guaranteed jackpot win, then you're still profiting nearly $10 million. I realize that there are taxes and other things to consider, but is this wrong? Would buying 175,223,510 and somehow guaranteeing that you had no duplicate tickets guarantee you a jackpot win? There is also an option to add a random multiplier between 2 and 5 to your prize by playing the "PowerPlay" option, which costs an additional dollar per ticket. Would that be a better way to guarantee ROI? I know this is a ridiculous question, but I'm bored at work, and started thinking about it, so I thought I'd ask the internet. EDIT: Forgot about other people winning simultaneously. You'd have to dedicate at least $1 million to sending people around the country and preventing other people from buying tickets through intimidation I guess. Posted on 28 November 2012 Edit - missed some zeroes in the title. Actual odds are 1:175,000,000. Note the payout schedule below - you can win with a single number plus the powerball, and in a variety of other ways. Assume, for the sake of argument, that no one else hits the winning number (and that I can find a licensed retailer who will sell me 175,000,000 tickets). Also assume that I ignore the "PowerPlay" because I want to keep my investment to $175M. Edit: Investment = $350M, because tickets are $2 each. The PowerBall rules govern payouts like this: Matches Prize Powerball only $4 1 number plus PB $4 2 numbers plus PB $7 3 numbers; no PB $7 3 numbers plus PB $100 4 numbers; no PB $100 4 numbers plus PB $10,000 5 numbers; no PB $1,000,000 5 numbers plus PB Jackpot For those not familiar with the PowerBall lottery, you pick five numbers from 1-59, and a PowerBall from 1-35. For the non-power balls, you're drawing from one pot - a number cannot appear twice. The PowerBall machine contains a separate collection of balls, so a number can be a nonPowerBall and a PowerBall. I'm not smart enough to write a simulation to figure it out ... can Reddit help? Posted on 27 November 2012 Also the winning powerball numbers would be nice as well. Posted on 6 November 2012 I know you know, help me out....I need out of the corporate grind. I'll even throw you a few hundred thousand just for helping out, promise. Posted on 1 February 2012 The discussion is around whether it's better to pick your own numbers or let the terminal randomly pick the numbers for you. My argument: The chances are equal either way. The numbers come out randomly, and have no memory of the past (i.e if I roll a 4 on a dice five times, The chances of me getting a 4 the sixth time are just as equal as the first five times). So if you play 2-4-6-8-10-12 every week, or get a randomly new number every time, they both have an equal chance of winning. Her argument: If I consistently play the "2-4-6-8-10-12" (just an example) combination every week, MY selection stays consistent. And if my numbers stay the same every week, they just have to match the winning numbers. BUT, If I let the machine pick my numbers, the universe has to match one set of random numbers (machine picked) to another set of random numbers (the winning ones), which makes it twice as hard to win. So, statisticians and probabilities experts. What do you think? How do "chances of winning" apply to the above two situations? PS - We already know how ridiculously slim the chances are of actually winning and how it's a "volunteer tax" and a complete waste of money. We're just discussing odds. Posted on 2 November 2011 Just to set the situation: I'm working up the courage to leave. I love my parents and it's the only think keeping me from leaving at the moment. My Dad was the PO of the congregation up until they changed the title to COBE and now he's the congregation secretary, and my Mom is a regular pioneer and probably one of the biggest prudes you'll ever meet. I'm not exactly sure how to approach this as I have proof my Dad has been looking at porn (at home and at work) as well as reading some pretty kinky fiction ( homo, adultery, pedo, incest, etc ). At first I thought it might have been a onetime lapse in judgement, but I've been checking his history pretty regularly and this has been happening for over a year. The other thing is he seems to be buying lottery tickets. I've never seen the stubs, but when checking up on his history looking for evidence of porn, he has been checking the winning numbers for the Powerball lotto every week. Last week, my Mom found a receipt that included line items for lotto tickets, and I had to listen to a boldfaced lie that that must have been part of the receipt from the customer before him. I'm not sure what she'll do if she finds out the truth. I need an outside party giving me some advice here. Posted on 23 March 2011
sefiane [résolu] Logithèque / Synaptic Salut ! J'ai besoin de votre aide S.V.P ! Je suis débutant dans Linux , j'ai la version 12.O4 ATS. Il fonctionnait très bien sans problème, une fois additionner Synaptic, Logithèque ne voulait pas répondre ! Il plantait, lorsque je fermais, une fenêtre apparaît, " logithèque ne répond pas " si vous éteignez vous risquez de perdre tous vos enregistrements ! Quant-à synatic, celui-ci a la suite d'une fausse manœuvre, l'icône a disparut de mon lanceur ! Question, comment pourrai-je récupérer les deux logiciels ? Merci ! edit modo : passage en résolu. Dernière modification par nesthib (Le 23/06/2013, à 00:49) Sefiane Hors ligne carld2010 Re : [résolu] Logithèque / Synaptic Bonjour, je suggère une réinstallation complète: sudo apt-get autoremove --purge synaptic software-center sudo apt-get update sudo apt-get install synaptic software-center une fois additionner Synaptic Quest-ce que tu veut dire par "additionné" ? Installer? Ouvrir? Dernière modification par carld2010 (Le 16/05/2013, à 19:19) Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Salut carld2O1O ! je n'ai pas encore essayé votre suggestion jusqu'ici, de toutes les façons je le faire! Je veux dire par additionné, " ajouté " merci. Sefiane Sefiane Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Salut xabilon ! Je vous demande toutes mes excuses de vous avoir trop importuné pour çà, je me suis rendu compte que ma parabole été désorientée ! Si vous aurez un peu de temps a soustraire de vos préoccupations, veuillez me porter aide, dont j'aurais besoin. merci d'avance ! Mr Sefiane Sefiane Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Salut carld 2010 j'ai collé sur le terminal "sudo apt-get autoremove --purge synaptic software-center" sudo apt-get update sudo apt-get install synaptic software-center J'ai un autre problème sur le terminal, lorsque je tape mon mot de passe sur celui-ci, rien a faire, le curseur clignote, et pas de d'écriture, rien ! j'attends encore une réponse, merci bien. Sefiane Sefiane Hors ligne Nasman Re : [résolu] Logithèque / Synaptic Le mot de passe doit être tapé "à l'aveugle", rien ne s'affiche et pas de déplacement du curseur. Ce comportement est tout à fait normal. Les caractères saisis sont cependant pris en compte. PC fixe et portable avec Precise 64 bits Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Salut Nasman ! j'ai essayé ce que vous dite avant , je vais encore réessayer plus tard, merci Sefiane Sefiane Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Salut Nasman ! C'est fait, j'ai collé : sudo apt-get autoremove --purge synaptic software-center Voilà ce que j'ai trouvé miloud@miloud-System-Product-Name:~$ sudo apt-get autoremove --purge synaptic software-center [sudo] password for miloud: Lecture des listes de paquets... Fait Construction de l'arbre des dépendances Lecture des informations d'état... Fait Les paquets suivants seront ENLEVÉS : kde-l10n-engb* kde-l10n-fr* language-pack-kde-en* language-pack-kde-en-base* language-pack-kde-fr* language-pack-kde-fr-base* software-center* synaptic* ubuntu-desktop* 0 mis à jour, 0 nouvellement installés, 9 à enlever et 0 non mis à jour. Après cette opération, 73,0 Mo d'espace disque seront libérés. Souhaitez-vous continuer [O/n] ? à titre de rappel je suis sur Ubuntu 12.O4 merci, Sefiane Dernière modification par Shanx (Le 18/05/2013, à 09:15) Sefiane Hors ligne bahoui Re : [résolu] Logithèque / Synaptic Bonjour, abandonne la commande. retourne le résultat de sudo apt-get update && sudo apt-get upgrade et utilise les balises code pour retourner le résultat ( <> en bleu dans la barre d'outils des messages) puis sudo apt-get install --reinstall software-center sudo apt-get install --reinstall synaptic Mettez [résolu] dans le titre quand le problème est réglé (cliquez sur "modifier" dans votre 1er message). Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Salut bahoui ! j'ai collé: sudo apt-get update && sudo apt-get upgrade voilà ce que j'ai trouvé : E: Le téléchargement de quelques fichiers d'index a échoué, ils ont été ignorés, ou les anciens ont été utilisés à la place. çà pour Synaptic, qui l'icone a disparu ? merci de votre attention ! Sefiane Dernière modification par ljere (Le 20/05/2013, à 20:54) Sefiane Hors ligne bahoui Re : [résolu] Logithèque / Synaptic retourne le résultat complet de la commande entre balises code. Mettez [résolu] dans le titre quand le problème est réglé (cliquez sur "modifier" dans votre 1er message). Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Salut bahoui rien a faire, j'ai ressayé les trois commandes, j'ai patienté jusqu'à la fin, il me redemande d'ajouter encore des commandes comme çà :miloud@miloud-System-Product-Name:~$ Merci bien. Sefiane Sefiane Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Rebonjour bahoui ! Ci-jointe la copie de terminal : miloud@miloud-System-Product-Name:~$ sudo apt-get update && sudo apt-get upgrad [sudo] password for miloud: Réception de : 1 [url]http://extras.ubuntu.com[/url] precise Release.gpg [72 B] Atteint [url]http://fr.archive.ubuntu.com[/url] precise Release.gpg Réception de : 2 [url]http://fr.archive.ubuntu.com[/url] precise-updates Release.gpg [198 B] Réception de : 3 [url]http://fr.archive.ubuntu.com[/url] precise-backports Release.gpg [198 B] Atteint [url]http://extras.ubuntu.com[/url] precise Release Atteint [url]http://fr.archive.ubuntu.com[/url] precise Release Réception de : 4 [url]http://security.ubuntu.com[/url] precise-security Release.gpg [198 B] Atteint [url]http://archive.canonical.com[/url] precise Release.gpg Réception de : 5 [url]http://fr.archive.ubuntu.com[/url] precise-updates Release [49,6 kB] Réception de : 6 [url]http://security.ubuntu.com[/url] precise-security Release [49,6 kB] Atteint [url]http://archive.canonical.com[/url] precise Release Atteint [url]http://extras.ubuntu.com[/url] precise/main Sources Atteint [url]http://extras.ubuntu.com[/url] precise/main i386 Packages Ign [url]http://extras.ubuntu.com[/url] precise/main TranslationIndex Atteint [url]http://archive.canonical.com[/url] precise/partner i386 Packages Ign [url]http://archive.canonical.com[/url] precise/partner TranslationIndex Réception de : 7 [url]http://fr.archive.ubuntu.com[/url] precise-backports Release [49,6 kB] Réception de : 8 [url]http://security.ubuntu.com[/url] precise-security/main Sources [71,9 kB] Atteint [url]http://fr.archive.ubuntu.com[/url] precise/main Sources Atteint [url]http://fr.archive.ubuntu.com[/url] precise/restricted Sources Ign [url]http://extras.ubuntu.com[/url] precise/main Translation-fr_FR Ign [url]http://extras.ubuntu.com[/url] precise/main Translation-fr Atteint [url]http://fr.archive.ubuntu.com[/url] precise/universe Sources Atteint [url]http://fr.archive.ubuntu.com[/url] precise/multiverse Sources Atteint [url]http://fr.archive.ubuntu.com[/url] precise/main i386 Packages Atteint [url]http://fr.archive.ubuntu.com[/url] precise/restricted i386 Packages Atteint [url]http://fr.archive.ubuntu.com[/url] precise/universe i386 Packages Atteint [url]http://fr.archive.ubuntu.com[/url] precise/multiverse i386 Packages Atteint [url]http://fr.archive.ubuntu.com[/url] precise/main TranslationIndex Ign [url]http://extras.ubuntu.com[/url] precise/main Translation-en Atteint [url]http://fr.archive.ubuntu.com[/url] precise/multiverse TranslationIndex Atteint [url]http://fr.archive.ubuntu.com[/url] precise/restricted TranslationIndex Ign [url]http://archive.canonical.com[/url] precise/partner Translation-fr_FR Ign [url]http://archive.canonical.com[/url] precise/partner Translation-fr Atteint [url]http://fr.archive.ubuntu.com[/url] precise/universe TranslationIndex Réception de : 9 [url]http://fr.archive.ubuntu.com[/url] precise-updates/main Sources [383 kB] Ign [url]http://archive.canonical.com[/url] precise/partner Translation-en Réception de : 10 [url]http://security.ubuntu.com[/url] precise-security/restricted Sources [2 494 B] Réception de : 11 [url]http://security.ubuntu.com[/url] precise-security/universe Sources [24,4 kB] Réception de : 12 [url]http://security.ubuntu.com[/url] precise-security/multiverse Sources [1 380 B] Réception de : 13 [url]http://security.ubuntu.com[/url] precise-security/main i386 Packages [274 kB] Réception de : 14 [url]http://fr.archive.ubuntu.com[/url] precise-updates/restricted Sources [5 467 B] Réception de : 15 [url]http://fr.archive.ubuntu.com[/url] precise-updates/universe Sources [87,1 kB] Réception de : 16 [url]http://fr.archive.ubuntu.com[/url] precise-updates/multiverse Sources [6 582 B] Réception de : 17 [url]http://fr.archive.ubuntu.com[/url] precise-updates/main i386 Packages [625 kB] Réception de : 18 [url]http://security.ubuntu.com[/url] precise-security/restricted i386 Packages [4 620 B] Réception de : 19 [url]http://security.ubuntu.com[/url] precise-security/universe i386 Packages [74,8 kB] Réception de : 20 [url]http://security.ubuntu.com[/url] precise-security/multiverse i386 Packages [2 375 B] Atteint [url]http://security.ubuntu.com[/url] precise-security/main TranslationIndex Atteint [url]http://security.ubuntu.com[/url] precise-security/multiverse TranslationIndex Atteint [url]http://security.ubuntu.com[/url] precise-security/restricted TranslationIndex Atteint [url]http://security.ubuntu.com[/url] precise-security/universe TranslationIndex Atteint [url]http://security.ubuntu.com[/url] precise-security/main Translation-en Atteint [url]http://security.ubuntu.com[/url] precise-security/multiverse Translation-en Atteint [url]http://security.ubuntu.com[/url] precise-security/restricted Translation-en Atteint [url]http://security.ubuntu.com[/url] precise-security/universe Translation-en Réception de : 21 [url]http://fr.archive.ubuntu.com[/url] precise-updates/restricted i386 Packages [10,0 kB] Réception de : 22 [url]http://fr.archive.ubuntu.com[/url] precise-updates/universe i386 Packages [203 kB] Réception de : 23 [url]http://fr.archive.ubuntu.com[/url] precise-updates/multiverse i386 Packages [13,8 kB] Atteint [url]http://fr.archive.ubuntu.com[/url] precise-updates/main TranslationIndex Atteint [url]http://fr.archive.ubuntu.com[/url] precise-updates/multiverse TranslationIndex Atteint [url]http://fr.archive.ubuntu.com[/url] precise-updates/restricted TranslationIndex Atteint [url]http://fr.archive.ubuntu.com[/url] precise-updates/universe TranslationIndex Réception de : 24 [url]http://fr.archive.ubuntu.com[/url] precise-backports/main Sources [2 899 B] Réception de : 25 [url]http://fr.archive.ubuntu.com[/url] precise-backports/restricted Sources [14 B] Réception de : 26 [url]http://fr.archive.ubuntu.com[/url] precise-backports/universe Sources [29,9 kB] Réception de : 27 [url]http://fr.archive.ubuntu.com[/url] precise-backports/multiverse Sources [3 641 B] Réception de : 28 [url]http://fr.archive.ubuntu.com[/url] precise-backports/main i386 Packages [2 376 B] Réception de : 29 [url]http://fr.archive.ubuntu.com[/url] precise-backports/restricted i386 Packages [14 B] Réception de : 30 [url]http://fr.archive.ubuntu.com[/url] precise-backports/universe i386 Packages [30,1 kB] Réception de : 31 [url]http://fr.archive.ubuntu.com[/url] precise-backports/multiverse i386 Packages [3 532 B] Atteint [url]http://fr.archive.ubuntu.com[/url] precise-backports/main TranslationIndex Atteint [url]http://fr.archive.ubuntu.com[/url] precise-backports/multiverse TranslationIndex Atteint [url]http://fr.archive.ubuntu.com[/url] precise-backports/restricted TranslationIndex Atteint [url]http://fr.archive.ubuntu.com[/url] precise-backports/universe TranslationIndex Atteint [url]http://fr.archive.ubuntu.com[/url] precise/main Translation-fr_FR Atteint [url]http://fr.archive.ubuntu.com[/url] precise/main Translation-fr Atteint [url]http://fr.archive.ubuntu.com[/url] precise/main Translation-en Atteint [url]http://fr.archive.ubuntu.com[/url] precise/multiverse Translation-fr_FR Atteint [url]http://fr.archive.ubuntu.com[/url] precise/multiverse Translation-fr Atteint [url]http://fr.archive.ubuntu.com[/url] precise/multiverse Translation-en Atteint [url]http://fr.archive.ubuntu.com[/url] precise/restricted Translation-fr_FR Atteint [url]http://fr.archive.ubuntu.com[/url] precise/restricted Translation-fr Atteint [url]http://fr.archive.ubuntu.com[/url] precise/restricted Translation-en Atteint [url]http://fr.archive.ubuntu.com[/url] precise/universe Translation-fr_FR Atteint [url]http://fr.archive.ubuntu.com[/url] precise/universe Translation-fr Atteint [url]http://fr.archive.ubuntu.com[/url] precise/universe Translation-en Atteint [url]http://fr.archive.ubuntu.com[/url] precise-updates/main Translation-fr Atteint [url]http://fr.archive.ubuntu.com[/url] precise-updates/main Translation-en Atteint [url]http://fr.archive.ubuntu.com[/url] precise-updates/multiverse Translation-fr Atteint [url]http://fr.archive.ubuntu.com[/url] precise-updates/multiverse Translation-en Atteint [url]http://fr.archive.ubuntu.com[/url] precise-updates/restricted Translation-fr Atteint [url]http://fr.archive.ubuntu.com[/url] precise-updates/restricted Translation-en Atteint [url]http://fr.archive.ubuntu.com[/url] precise-updates/universe Translation-fr Atteint [url]http://fr.archive.ubuntu.com[/url] precise-updates/universe Translation-en Atteint [url]http://fr.archive.ubuntu.com[/url] precise-backports/main Translation-en Atteint [url]http://fr.archive.ubuntu.com[/url] precise-backports/multiverse Translation-en Atteint [url]http://fr.archive.ubuntu.com[/url] precise-backports/restricted Translation-en Atteint [url]http://fr.archive.ubuntu.com[/url] precise-backports/universe Translation-en 2 012 ko réceptionnés en 44s (44,9 ko/s) Lecture des listes de paquets... Fait E: L'opération upgrad n'est pas valable miloud@miloud-System-Product-Name:~$ sudo apt-get install --reinstall software-center Lecture des listes de paquets... Fait Construction de l'arbre des dépendances Lecture des informations d'état... Fait Les paquets suivants ont été installés automatiquement et ne sont plus nécessaires : kde-l10n-fr language-pack-kde-en language-pack-kde-fr language-pack-kde-en-base kde-l10n-engb language-pack-kde-fr-base Veuillez utiliser « apt-get autoremove » pour les supprimer. 0 mis à jour, 0 nouvellement installés, 1 réinstallés, 0 à enlever et 0 non mis à jour. Il est nécessaire de prendre 0 o/624 ko dans les archives. Après cette opération, 0 o d'espace disque supplémentaires seront utilisés. (Lecture de la base de données... 236662 fichiers et répertoires déjà installés.) Préparation du remplacement de software-center 5.2.9 (en utilisant .../software-center_5.2.9_all.deb) ... Dépaquetage de la mise à jour de software-center ... Traitement des actions différées (« triggers ») pour « man-db »... Traitement des actions différées (« triggers ») pour « hicolor-icon-theme »... Traitement des actions différées (« triggers ») pour « bamfdaemon »... Rebuilding /usr/share/applications/bamf.index... Traitement des actions différées (« triggers ») pour « desktop-file-utils »... Traitement des actions différées (« triggers ») pour « gnome-menus »... Paramétrage de software-center (5.2.9) ... Updating software catalog...this may take a moment. INFO:softwarecenter.db.pkginfo_impl.aptcache:aptcache.open() Software catalog update was successful. miloud@miloud-System-Product-Name:~$ sudo apt-get install --reinstall synaptic Lecture des listes de paquets... Fait Construction de l'arbre des dépendances Lecture des informations d'état... Fait Les paquets suivants ont été installés automatiquement et ne sont plus nécessaires : kde-l10n-fr language-pack-kde-en language-pack-kde-fr language-pack-kde-en-base kde-l10n-engb language-pack-kde-fr-base Veuillez utiliser « apt-get autoremove » pour les supprimer. 0 mis à jour, 0 nouvellement installés, 1 réinstallés, 0 à enlever et 0 non mis à jour. Il est nécessaire de prendre 0 o/2 405 ko dans les archives. Après cette opération, 0 o d'espace disque supplémentaires seront utilisés. (Lecture de la base de données... 236662 fichiers et répertoires déjà installés.) Préparation du remplacement de synaptic 0.75.9ubuntu1 (en utilisant .../synaptic_0.75.9ubuntu1_i386.deb) ... Dépaquetage de la mise à jour de synaptic ... Traitement des actions différées (« triggers ») pour « hicolor-icon-theme »... Traitement des actions différées (« triggers ») pour « bamfdaemon »... Rebuilding /usr/share/applications/bamf.index... Traitement des actions différées (« triggers ») pour « desktop-file-utils »... Traitement des actions différées (« triggers ») pour « gnome-menus »... Traitement des actions différées (« triggers ») pour « man-db »... Traitement des actions différées (« triggers ») pour « menu »... Paramétrage de synaptic (0.75.9ubuntu1) ... Traitement des actions différées (« triggers ») pour « menu »... miloud@miloud-System-Product-Name:~$ miloud@miloud-System-Product-Name:~$ sudo apt-get install --reinstall synaptic Lecture des listes de paquets... Fait Construction de l'arbre des dépendances Lecture des informations d'état... Fait Les paquets suivants ont été installés automatiquement et ne sont plus nécessaires : kde-l10n-fr language-pack-kde-en language-pack-kde-fr language-pack-kde-en-base kde-l10n-engb language-pack-kde-fr-base Veuillez utiliser « apt-get autoremove » pour les supprimer. 0 mis à jour, 0 nouvellement installés, 1 réinstallés, 0 à enlever et 0 non mis à jour. Il est nécessaire de prendre 0 o/2 405 ko dans les archives. Après cette opération, 0 o d'espace disque supplémentaires seront utilisés. (Lecture de la base de données... 236662 fichiers et répertoires déjà installés.) Préparation du remplacement de synaptic 0.75.9ubuntu1 (en utilisant .../synaptic_0.75.9ubuntu1_i386.deb) ... Dépaquetage de la mise à jour de synaptic ... Traitement des actions différées (« triggers ») pour « hicolor-icon-theme »... Traitement des actions différées (« triggers ») pour « bamfdaemon »... Rebuilding /usr/share/applications/bamf.index... Traitement des actions différées (« triggers ») pour « desktop-file-utils »... Traitement des actions différées (« triggers ») pour « gnome-menus »... Traitement des actions différées (« triggers ») pour « man-db »... Traitement des actions différées (« triggers ») pour « menu »... Paramétrage de synaptic (0.75.9ubuntu1) ... Traitement des actions différées (« triggers ») pour « menu »... miloud@miloud-System-Product-Name:~$ miloud@miloud-System-Product-Name:~$ Dernière modification par Shanx (Le 18/05/2013, à 09:14) Sefiane Hors ligne Shanx Re : [résolu] Logithèque / Synaptic Salut. Tout d’abord, fais attention à bien copier les commandes qu’on te donne. bahoui t’a proposé sudo apt-get upgrad e, si tu ne mets pas le « e » final ça ne peux pas fonctionner. Et si tu avais lu le message d’erreur, tu aurais pu corriger cette erreur sans qu’on doive te le signaler, donc tu aurais gagné du temps. Ensuite, place les retours des commandes entre balises code (ce symbole au dessus de la zone où tu tapes ton message : ). J’ai modifié tes messages précédents pour les ajouter, mais à l’avenir ce serait bien que tu le fasse toi même. Donc peux-tu lancer ces deux commandes l’une après l’autre (sans faute de frappe) et nous coller le résultat (entre balises code) ? sudo apt-get update sudo apt-get upgrade « En vérité, je ne voyage pas, moi, pour atteindre un endroit précis, mais pour marcher : simple plaisir de voyager. » R. L. Stevenson -- Blog et randos Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Bonjour Shanx ! j'ai essayé de faire copier coller pour ne pas se tromper ( en lançant l'une après l'autre les deux commandes ) Voci la copie de ce que j'ai trouvé en fin de compte ! Merci bien! Sefiane Atteint http://fr.archive.ubuntu.com precise/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise/restricted Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/restricted Translation-fr Atteint http://fr.archive.ubuntu.com precise/restricted Translation-en Atteint http://fr.archive.ubuntu.com precise/universe Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/universe Translation-fr Atteint http://fr.archive.ubuntu.com precise/universe Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/main Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/main Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/multiverse Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/restricted Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/restricted Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/universe Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/universe Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/main Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/restricted Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/universe Translation-en 2 012 ko réceptionnés en 1min 47s (18,8 ko/s) Lecture des listes de paquets... Fait miloud@miloud-System-Product-Name:~$ sudo apt-get upgrade Lecture des listes de paquets... Fait Construction de l'arbre des dépendances Lecture des informations d'état... Fait 0 mis à jour, 0 nouvellement installés, 0 à enlever et 0 non mis à jour. miloud@miloud-System-Product-Name:~$ Dernière modification par ljere (Le 20/05/2013, à 20:55) Sefiane Hors ligne Shanx Re : [résolu] Logithèque / Synaptic Y’a quoi que tu n’as pas compris dans cette phrase : Ensuite, place les retours des commandes entre balises code (ce symbole au dessus de la zone où tu tapes ton message : ). J’ai modifié tes messages précédents pour les ajouter, mais à l’avenir ce serait bien que tu le fasse toi même. ? Si tu lances la logithèque ou synaptic, l’erreur persiste ? Dernière modification par Shanx (Le 18/05/2013, à 13:34) « En vérité, je ne voyage pas, moi, pour atteindre un endroit précis, mais pour marcher : simple plaisir de voyager. » R. L. Stevenson -- Blog et randos Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Salut Shanx ! je n'ai pas réussi a le faire ! le symbole dont vous m'avez décris , je ne l'ai trouvé ! Entre autres, sachez bien que je suis débutant dans l'informatique ! Ci-jointe encore une copie du résultat ! miloud@miloud-System-Product-Name:~$ sudo apt-get update [sudo] password for miloud: Atteint http://archive.canonical.com precise Release.gpg Atteint http://extras.ubuntu.com precise Release.gpg Atteint http://fr.archive.ubuntu.com precise Release.gpg Atteint http://fr.archive.ubuntu.com precise-updates Release.gpg Atteint http://fr.archive.ubuntu.com precise-backports Release.gpg Atteint http://archive.canonical.com precise Release Atteint http://fr.archive.ubuntu.com precise Release Atteint http://fr.archive.ubuntu.com precise-updates Release Réception de : 1 http://security.ubuntu.com precise-security Release.gpg [198 B] Atteint http://extras.ubuntu.com precise Release Réception de : 2 http://security.ubuntu.com precise-security Release [49,6 kB] Atteint http://fr.archive.ubuntu.com precise-backports Release Atteint http://archive.canonical.com precise/partner i386 Packages Ign http://archive.canonical.com precise/partner TranslationIndex Ign http://archive.canonical.com precise/partner Translation-fr_FR Ign http://archive.canonical.com precise/partner Translation-fr Ign http://archive.canonical.com precise/partner Translation-en Atteint http://fr.archive.ubuntu.com precise/main Sources Atteint http://fr.archive.ubuntu.com precise/restricted Sources Atteint http://fr.archive.ubuntu.com precise/universe Sources Atteint http://fr.archive.ubuntu.com precise/multiverse Sources Atteint http://fr.archive.ubuntu.com precise/main i386 Packages Atteint http://fr.archive.ubuntu.com precise/restricted i386 Packages Atteint http://fr.archive.ubuntu.com precise/universe i386 Packages Atteint http://fr.archive.ubuntu.com precise/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com precise/main TranslationIndex Atteint http://fr.archive.ubuntu.com precise/multiverse TranslationIndex Atteint http://fr.archive.ubuntu.com precise/restricted TranslationIndex Atteint http://fr.archive.ubuntu.com precise/universe TranslationIndex Atteint http://fr.archive.ubuntu.com precise/main Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/main Translation-fr Atteint http://fr.archive.ubuntu.com precise/main Translation-en Atteint http://fr.archive.ubuntu.com precise/multiverse Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/multiverse Translation-fr Atteint http://extras.ubuntu.com precise/main Sources Atteint http://fr.archive.ubuntu.com precise/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise/restricted Translation-fr_FR Atteint http://extras.ubuntu.com precise/main i386 Packages Atteint http://fr.archive.ubuntu.com precise/restricted Translation-fr Réception de : 3 http://security.ubuntu.com precise-security/main Sources [71,9 kB] Atteint http://fr.archive.ubuntu.com precise/restricted Translation-en Atteint http://fr.archive.ubuntu.com precise/universe Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/universe Translation-fr Ign http://extras.ubuntu.com precise/main TranslationIndex Atteint http://fr.archive.ubuntu.com precise/universe Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/main Sources Atteint http://fr.archive.ubuntu.com precise-updates/restricted Sources Atteint http://fr.archive.ubuntu.com precise-updates/universe Sources Atteint http://fr.archive.ubuntu.com precise-updates/multiverse Sources Atteint http://fr.archive.ubuntu.com precise-updates/main i386 Packages Atteint http://fr.archive.ubuntu.com precise-updates/restricted i386 Packages Atteint http://fr.archive.ubuntu.com precise-updates/universe i386 Packages Atteint http://fr.archive.ubuntu.com precise-updates/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com precise-updates/main TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/multiverse TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/restricted TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/universe TranslationIndex Ign http://extras.ubuntu.com precise/main Translation-fr_FR Ign http://extras.ubuntu.com precise/main Translation-fr Atteint http://fr.archive.ubuntu.com precise-backports/main Sources Atteint http://fr.archive.ubuntu.com precise-backports/restricted Sources Atteint http://fr.archive.ubuntu.com precise-backports/universe Sources Atteint http://fr.archive.ubuntu.com precise-backports/multiverse Sources Atteint http://fr.archive.ubuntu.com precise-backports/main i386 Packages Ign http://extras.ubuntu.com precise/main Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/restricted i386 Packages Atteint http://fr.archive.ubuntu.com precise-backports/universe i386 Packages Réception de : 4 http://security.ubuntu.com precise-security/restricted Sources [2 494 B] Atteint http://fr.archive.ubuntu.com precise-backports/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com precise-backports/main TranslationIndex Atteint http://fr.archive.ubuntu.com precise-backports/multiverse TranslationIndex Atteint http://fr.archive.ubuntu.com precise-backports/restricted TranslationIndex Atteint http://fr.archive.ubuntu.com precise-backports/universe TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/main Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/main Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/multiverse Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/restricted Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/restricted Translation-en Réception de : 5 http://security.ubuntu.com precise-security/universe Sources [24,4 kB] Réception de : 6 http://security.ubuntu.com precise-security/multiverse Sources [1 380 B] Réception de : 7 http://security.ubuntu.com precise-security/main i386 Packages [274 kB] Atteint http://fr.archive.ubuntu.com precise-updates/universe Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/universe Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/main Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/restricted Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/universe Translation-en Réception de : 8 http://security.ubuntu.com precise-security/restricted i386 Packages [4 620 B] Réception de : 9 http://security.ubuntu.com precise-security/universe i386 Packages [74,8 kB] Réception de : 10 http://security.ubuntu.com precise-security/multiverse i386 Packages [2 375 B] Atteint http://security.ubuntu.com precise-security/main TranslationIndex Atteint http://security.ubuntu.com precise-security/multiverse TranslationIndex Atteint http://security.ubuntu.com precise-security/restricted TranslationIndex Atteint http://security.ubuntu.com precise-security/universe TranslationIndex Atteint http://security.ubuntu.com precise-security/main Translation-en Atteint http://security.ubuntu.com precise-security/multiverse Translation-en Atteint http://security.ubuntu.com precise-security/restricted Translation-en Atteint http://security.ubuntu.com precise-security/universe Translation-en 506 ko réceptionnés en 1min 9s (7 277 o/s) Lecture des listes de paquets... Fait miloud@miloud-System-Product-Name:~$ sudo apt-get upgrade Lecture des listes de paquets... Fait Construction de l'arbre des dépendances Lecture des informations d'état... Fait 0 mis à jour, 0 nouvellement installés, 0 à enlever et 0 non mis à jour. miloud@miloud-System-Product-Name:~$ ola Commande 'ola' non trouvée, vouliez-vous dire : La commande 'cola' du paquet 'git-cola' (universe) La commande 'tla' du paquet 'tla' (universe) La commande 'ols' du paquet 'speech-tools' (universe) ola : commande introuvable miloud@miloud-System-Product-Name:~$ sudo apt-get update Atteint http://extras.ubuntu.com precise Release.gpg Atteint http://archive.canonical.com precise Release.gpg Atteint http://extras.ubuntu.com precise Release Atteint http://archive.canonical.com precise Release Atteint http://extras.ubuntu.com precise/main Sources Atteint http://archive.canonical.com precise/partner i386 Packages Atteint http://extras.ubuntu.com precise/main i386 Packages Ign http://extras.ubuntu.com precise/main TranslationIndex Ign http://archive.canonical.com precise/partner TranslationIndex Ign http://archive.canonical.com precise/partner Translation-fr_FR Ign http://archive.canonical.com precise/partner Translation-fr Ign http://archive.canonical.com precise/partner Translation-en Ign http://extras.ubuntu.com precise/main Translation-fr_FR Ign http://extras.ubuntu.com precise/main Translation-fr Ign http://extras.ubuntu.com precise/main Translation-en Atteint http://fr.archive.ubuntu.com precise Release.gpg Atteint http://fr.archive.ubuntu.com precise-updates Release.gpg Atteint http://fr.archive.ubuntu.com precise-backports Release.gpg Réception de : 1 http://security.ubuntu.com precise-security Release.gpg [198 B] Réception de : 2 http://security.ubuntu.com precise-security Release [49,6 kB] Atteint http://fr.archive.ubuntu.com precise Release Atteint http://fr.archive.ubuntu.com precise-updates Release Atteint http://fr.archive.ubuntu.com precise-backports Release Atteint http://fr.archive.ubuntu.com precise/main Sources Atteint http://fr.archive.ubuntu.com precise/restricted Sources Atteint http://fr.archive.ubuntu.com precise/universe Sources Atteint http://fr.archive.ubuntu.com precise/multiverse Sources Atteint http://fr.archive.ubuntu.com precise/main i386 Packages Atteint http://fr.archive.ubuntu.com precise/restricted i386 Packages Atteint http://fr.archive.ubuntu.com precise/universe i386 Packages Atteint http://fr.archive.ubuntu.com precise/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com precise/main TranslationIndex Atteint http://fr.archive.ubuntu.com precise/multiverse TranslationIndex Atteint http://fr.archive.ubuntu.com precise/restricted TranslationIndex Atteint http://fr.archive.ubuntu.com precise/universe TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/main Sources Atteint http://fr.archive.ubuntu.com precise-updates/restricted Sources Atteint http://fr.archive.ubuntu.com precise-updates/universe Sources Atteint http://fr.archive.ubuntu.com precise-updates/multiverse Sources Atteint http://fr.archive.ubuntu.com precise-updates/main i386 Packages Atteint http://fr.archive.ubuntu.com precise-updates/restricted i386 Packages Atteint http://fr.archive.ubuntu.com precise-updates/universe i386 Packages Atteint http://fr.archive.ubuntu.com precise-updates/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com precise-updates/main TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/multiverse TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/restricted TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/universe TranslationIndex Atteint http://fr.archive.ubuntu.com precise-backports/main Sources Atteint http://fr.archive.ubuntu.com precise-backports/restricted Sources Atteint http://fr.archive.ubuntu.com precise-backports/universe Sources Atteint http://fr.archive.ubuntu.com precise-backports/multiverse Sources Atteint http://fr.archive.ubuntu.com precise-backports/main i386 Packages Atteint http://fr.archive.ubuntu.com precise-backports/restricted i386 Packages Atteint http://fr.archive.ubuntu.com precise-backports/universe i386 Packages Atteint http://fr.archive.ubuntu.com precise-backports/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com precise-backports/main TranslationIndex Atteint http://fr.archive.ubuntu.com precise-backports/multiverse TranslationIndex Atteint http://fr.archive.ubuntu.com precise-backports/restricted TranslationIndex Atteint http://fr.archive.ubuntu.com precise-backports/universe TranslationIndex Atteint http://fr.archive.ubuntu.com precise/main Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/main Translation-fr Atteint http://fr.archive.ubuntu.com precise/main Translation-en Atteint http://fr.archive.ubuntu.com precise/multiverse Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/multiverse Translation-fr Atteint http://fr.archive.ubuntu.com precise/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise/restricted Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/restricted Translation-fr Atteint http://fr.archive.ubuntu.com precise/restricted Translation-en Atteint http://fr.archive.ubuntu.com precise/universe Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/universe Translation-fr Atteint http://fr.archive.ubuntu.com precise/universe Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/main Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/main Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/multiverse Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/restricted Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/restricted Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/universe Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/universe Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/main Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/restricted Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/universe Translation-en Réception de : 3 http://security.ubuntu.com precise-security/main Sources [71,9 kB] Réception de : 4 http://security.ubuntu.com precise-security/restricted Sources [2 494 B] Réception de : 5 http://security.ubuntu.com precise-security/universe Sources [24,4 kB] Réception de : 6 http://security.ubuntu.com precise-security/multiverse Sources [1 380 B] Réception de : 7 http://security.ubuntu.com precise-security/main i386 Packages [274 kB] Réception de : 8 http://security.ubuntu.com precise-security/restricted i386 Packages [4 620 B] Réception de : 9 http://security.ubuntu.com precise-security/universe i386 Packages [74,8 kB] Réception de : 10 http://security.ubuntu.com precise-security/multiverse i386 Packages [2 375 B] Atteint http://security.ubuntu.com precise-security/main TranslationIndex Atteint http://security.ubuntu.com precise-security/multiverse TranslationIndex Atteint http://security.ubuntu.com precise-security/restricted TranslationIndex Atteint http://security.ubuntu.com precise-security/universe TranslationIndex Atteint http://security.ubuntu.com precise-security/main Translation-en Atteint http://security.ubuntu.com precise-security/multiverse Translation-en Atteint http://security.ubuntu.com precise-security/restricted Translation-en Atteint http://security.ubuntu.com precise-security/universe Translation-en 506 ko réceptionnés en 59s (8 539 o/s) Lecture des listes de paquets... Fait miloud@miloud-System-Product-Name:~$ sudo apt-get upgrade Lecture des listes de paquets... Fait Construction de l'arbre des dépendances Lecture des informations d'état... Fait 0 mis à jour, 0 nouvellement installés, 0 à enlever et 0 non mis à jour. miloud@miloud-System-Product-Name:~$ Merci. Sefiane, Dernière modification par ljere (Le 20/05/2013, à 20:59) Sefiane Hors ligne bahoui Re : [résolu] Logithèque / Synaptic on veut savoir si la logithèque et synaptic fonctionne à nouveau. Mettez [résolu] dans le titre quand le problème est réglé (cliquez sur "modifier" dans votre 1er message). Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Salut bahoui Rien a faire, je n'ai pas compris ce que m'a dit Shanx ,a écrit : Ensuite, place les retours des commandes entre balises code (ce symbole au dessus de la zone où tu tapes ton message : ). J’ai modifié tes messages précédents pour les ajouter, mais à l’avenir ce serait bien que tu le fasse toi même. Comment pourrais-je avoir le symbole ? Je suis trop curieux de voir comment ça se passe ! Merci de votre attention ! Sefiane, Sefiane Hors ligne moko138 Re : [résolu] Logithèque / Synaptic Les balises-code sont les <> bleus de la barre de mise en forme. (Sauf si tu as bloqué le téléchargement des images). Tu copies dans le terminal le retour de ta commande, puis dans le navigateur tu cliques sur les balises, tu colles aussitôt et tu obtiens ceci : miloud@miloud-System-Product-Name:~$ sudo apt-get update (...) 0 mis à jour, 0 nouvellement installés, 0 à enlever et 0 non mis à jour. miloud@miloud-System-Product-Name:~$ on veut savoir si la logithèque et synaptic fonctionne à nouveau. Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Salut moko138 ! Les balises de codes dont vous parler , je les ai vu qu'une fois, depuis ce temps je n'ai les pas revu ! Certainement le téléchargement des images est bloqué comme vous dites ! bon , quoi faire ensuite ! merci , Sefiane, Sefiane Hors ligne moko138 Re : [résolu] Logithèque / Synaptic Encore une fois, colle dans un terminal: sudo apt-get update && sudo apt-get upgrade puis dis-nous si la logithèque et synaptic fonctionnent à nouveau. Quand tu tapes sur le forum un message, si tu vois juste au-dessusB I U alors les crochets bleus sont la 7ème icône après le U. Si tu ne vois rien de cela, active l'affichage des images dans Firefox. Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Bonjour moko138 , J'ai exécuté ce que vous venez de me dire ! Voici le résulta ci-joint du terminal ! je crois que l'opération n'a pas aboutie ? Par ailleurs je pense qu'il m'ont donné des commandes d’annulation de logithèque et synaptic Qu'est-vous en dites de ces commandes :----sudo apt-get remove--------sudo apt-get autoremove Alors, qu'est vous pensez de me donner des commandes de désinstallation et réinstallation ? Pour activer l'affichage des images dans Firefox , c'est quoi au juste ? Je n'ai su les faire ! Merci, Sefiane, e[sudo] password for miloud: Atteint http://archive.canonical.com precise Release.gpg Atteint http://fr.archive.ubuntu.com precise Release.gpg Atteint http://fr.archive.ubuntu.com precise-updates Release.gpg Atteint http://fr.archive.ubuntu.com precise-backports Release.gpg Atteint http://security.ubuntu.com precise-security Release.gpg Atteint http://extras.ubuntu.com precise Release.gpg Atteint http://archive.canonical.com precise Release Atteint http://fr.archive.ubuntu.com precise Release Atteint http://security.ubuntu.com precise-security Release Atteint http://extras.ubuntu.com precise Release Atteint http://fr.archive.ubuntu.com precise-updates Release Atteint http://fr.archive.ubuntu.com precise-backports Release Atteint http://archive.canonical.com precise/partner i386 Packages Atteint http://fr.archive.ubuntu.com precise/main Sources Atteint http://security.ubuntu.com precise-security/main Sources Atteint http://extras.ubuntu.com precise/main Sources Ign http://archive.canonical.com precise/partner TranslationIndex Atteint http://fr.archive.ubuntu.com precise/restricted Sources Atteint http://fr.archive.ubuntu.com precise/universe Sources Atteint http://fr.archive.ubuntu.com precise/multiverse Sources Atteint http://fr.archive.ubuntu.com precise/main i386 Packages Atteint http://fr.archive.ubuntu.com precise/restricted i386 Packages Atteint http://fr.archive.ubuntu.com precise/universe i386 Packages Atteint http://fr.archive.ubuntu.com precise/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com precise/main TranslationIndex Atteint http://fr.archive.ubuntu.com precise/multiverse TranslationIndex Atteint http://security.ubuntu.com precise-security/restricted Sources Atteint http://security.ubuntu.com precise-security/universe Sources Atteint http://security.ubuntu.com precise-security/multiverse Sources Atteint http://security.ubuntu.com precise-security/main i386 Packages Atteint http://security.ubuntu.com precise-security/restricted i386 Packages Atteint http://security.ubuntu.com precise-security/universe i386 Packages Atteint http://security.ubuntu.com precise-security/multiverse i386 Packages Atteint http://security.ubuntu.com precise-security/main TranslationIndex Atteint http://security.ubuntu.com precise-security/multiverse TranslationIndex Atteint http://security.ubuntu.com precise-security/restricted TranslationIndex Atteint http://extras.ubuntu.com precise/main i386 Packages Ign http://extras.ubuntu.com precise/main TranslationIndex Atteint http://fr.archive.ubuntu.com precise/restricted TranslationIndex Atteint http://fr.archive.ubuntu.com precise/universe TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/main Sources Atteint http://fr.archive.ubuntu.com precise-updates/restricted Sources Atteint http://fr.archive.ubuntu.com precise-updates/universe Sources Atteint http://fr.archive.ubuntu.com precise-updates/multiverse Sources Atteint http://security.ubuntu.com precise-security/universe TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/main i386 Packages Atteint http://fr.archive.ubuntu.com precise-updates/restricted i386 Packages Atteint http://fr.archive.ubuntu.com precise-updates/universe i386 Packages Atteint http://fr.archive.ubuntu.com precise-updates/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com precise-updates/main TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/multiverse TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/restricted TranslationIndex Atteint http://fr.archive.ubuntu.com precise-updates/universe TranslationIndex Atteint http://fr.archive.ubuntu.com precise-backports/main Sources Atteint http://security.ubuntu.com precise-security/main Translation-en Atteint http://security.ubuntu.com precise-security/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/restricted Sources Atteint http://security.ubuntu.com precise-security/restricted Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/universe Sources Atteint http://fr.archive.ubuntu.com precise-backports/multiverse Sources Atteint http://fr.archive.ubuntu.com precise-backports/main i386 Packages Atteint http://fr.archive.ubuntu.com precise-backports/restricted i386 Packages Atteint http://fr.archive.ubuntu.com precise-backports/universe i386 Packages Atteint http://fr.archive.ubuntu.com precise-backports/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com precise-backports/main TranslationIndex Atteint http://fr.archive.ubuntu.com precise-backports/multiverse TranslationIndex Atteint http://fr.archive.ubuntu.com precise-backports/restricted TranslationIndex Atteint http://security.ubuntu.com precise-security/universe Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/universe TranslationIndex Atteint http://fr.archive.ubuntu.com precise/main Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/main Translation-fr Atteint http://fr.archive.ubuntu.com precise/main Translation-en Atteint http://fr.archive.ubuntu.com precise/multiverse Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/multiverse Translation-fr Atteint http://fr.archive.ubuntu.com precise/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise/restricted Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/restricted Translation-fr Atteint http://fr.archive.ubuntu.com precise/restricted Translation-en Atteint http://fr.archive.ubuntu.com precise/universe Translation-fr_FR Atteint http://fr.archive.ubuntu.com precise/universe Translation-fr Atteint http://fr.archive.ubuntu.com precise/universe Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/main Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/main Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/multiverse Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/restricted Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/restricted Translation-en Atteint http://fr.archive.ubuntu.com precise-updates/universe Translation-fr Atteint http://fr.archive.ubuntu.com precise-updates/universe Translation-en Ign http://extras.ubuntu.com precise/main Translation-fr_FR Ign http://extras.ubuntu.com precise/main Translation-fr Ign http://extras.ubuntu.com precise/main Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/main Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/multiverse Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/restricted Translation-en Atteint http://fr.archive.ubuntu.com precise-backports/universe Translation-en Ign http://archive.canonical.com precise/partner Translation-fr_FR Ign http://archive.canonical.com precise/partner Translation-fr Ign http://archive.canonical.com precise/partner Translation-en Lecture des listes de paquets... Fait Lecture des listes de paquets... Fait Construction de l'arbre des dépendances Lecture des informations d'état... Fait Les paquets suivants seront mis à jour : bash-completion 1 mis à jour, 0 nouvellement installés, 0 à enlever et 0 non mis à jour. Il est nécessaire de prendre 134 ko dans les archives. Après cette opération, 0 o d'espace disque supplémentaires seront utilisés. Souhaitez-vous continuer [O/n] ? o Réception de : 1 http://fr.archive.ubuntu.com/ubuntu/ precise-updates/main bash-completion all 1:1.3-1ubuntu8.1 [134 kB] 134 ko réceptionnés en 2s (53,6 ko/s) (Lecture de la base de données... 236748 fichiers et répertoires déjà installés.) Préparation du remplacement de bash-completion 1:1.3-1ubuntu8 (en utilisant .../bash-completion_1%3a1.3-1ubuntu8.1_all.deb) ... Dépaquetage de la mise à jour de bash-completion ... Traitement des actions différées (« triggers ») pour « man-db »... Paramétrage de bash-completion (1:1.3-1ubuntu8.1) ... Installation de la nouvelle version du fichier de configuration /etc/bash_completion.d/tar ... miloud@miloud-System-Product-Name:~$ Dernière modification par ljere (Le 20/05/2013, à 21:01) Sefiane Hors ligne sefiane Re : [résolu] Logithèque / Synaptic Bonjour moko138 j'espère que vous comprenez les fautes d’orthographes : c'est-ce -----au lieu de Qu'est vous ? merci, Sefiane, Sefiane Hors ligne moko138 Re : [résolu] Logithèque / Synaptic Bonjour sefiane, 1) Je te propose qu'on se tutoie. 2) Une excellente chose est que ton dernier rapport ne se termine pas par des messages d'erreur. 3) Ta demande initiale portait sur la Logithèque et sur synaptic. 3.a) A présent, as-tu de nouveau accès à la Logithèque ? 3.b) Et colle dans un terminal gksudo synaptic est-ce que synaptic se lance ? Si non, colle ici la réponse du terminal. 4) Affichage des images : - Si ton navigateur est Firefox, menu Edition/ Préférences/ onglet "contenu", coche "charger les images automatiquement". - Si ton navigateur est autre, à toi de trouver où est le réglage. Hors ligne
Audiofeeline Modifier GRUB avec GRUB CUSTOMIZER Bonjour à tous, alors que je surfais paisiblement, je suis tombé sur un article de Tux-Planet qui présente GRUB CUSTOMIZER : http://www.tux-planet.fr/grub-customizer/ Je tenais à vous en faire part car ça faisait un petit moment que je cherchais une telle solution. Bien à vous ! Hors ligne @nne Re : Modifier GRUB avec GRUB CUSTOMIZER Merci. Qui l'essaye en premier ? Hurricane Spoky Re : Modifier GRUB avec GRUB CUSTOMIZER Moi, je vous tiens au courant. Hors ligne Hurricane Spoky Re : Modifier GRUB avec GRUB CUSTOMIZER Euh salut a tous, petit problème au niveau de l'installation, regardez Les NOUVEAUX paquets suivants seront installés : grub-customizer hwinfo libhd16 0 mis à jour, 3 nouvellement installés, 0 à enlever et 0 non mis à jour. Il est nécessaire de prendre 981ko dans les archives. Après cette opération, 2 998ko d'espace disque supplémentaires seront utilisés. Souhaitez-vous continuer [O/n] ? O Annulation. root@NSA:/home/archer# Au niveau de "Souhaitez-vous continuer [O/n]", euh... au dernières nouvelles il faut appuyer sur le "o" + MAJ non ? Eh bien regardez Souhaitez-vous continuer [O/n] ? O Annulation. Interessant non ? Help me ! Hors ligne @nne Re : Modifier GRUB avec GRUB CUSTOMIZER Tu veux dire "o" plus valider, je suppose. Moi je mets toujours "o" minuscule, essaie "y" pour voir ? Dernière modification par @nne (Le 07/01/2011, à 21:27) Hurricane Spoky Re : Modifier GRUB avec GRUB CUSTOMIZER Pour "y" Les NOUVEAUX paquets suivants seront installés : grub-customizer hwinfo libhd16 0 mis à jour, 3 nouvellement installés, 0 à enlever et 0 non mis à jour. Il est nécessaire de prendre 981ko dans les archives. Après cette opération, 2 998ko d'espace disque supplémentaires seront utilisés. Souhaitez-vous continuer [O/n] ? y Annulation. Pour "o" Les NOUVEAUX paquets suivants seront installés : grub-customizer hwinfo libhd16 0 mis à jour, 3 nouvellement installés, 0 à enlever et 0 non mis à jour. Il est nécessaire de prendre 981ko dans les archives. Après cette opération, 2 998ko d'espace disque supplémentaires seront utilisés. Souhaitez-vous continuer [O/n] ? o Annulation. archer@NSA:~$ Et oui "o" + ↳ (Entrée) Dernière modification par iSpoky93LoL (Le 07/01/2011, à 21:34) Hors ligne @nne Re : Modifier GRUB avec GRUB CUSTOMIZER Et en essayant "n" ? (Parti comme c'est ... ) Hurricane Spoky Re : Modifier GRUB avec GRUB CUSTOMIZER n ? pour no, je suppose ? Ca n'annulera pas la commande ? Les NOUVEAUX paquets suivants seront installés : grub-customizer hwinfo libhd16 0 mis à jour, 3 nouvellement installés, 0 à enlever et 0 non mis à jour. Il est nécessaire de prendre 981ko dans les archives. Après cette opération, 2 998ko d'espace disque supplémentaires seront utilisés. Souhaitez-vous continuer [O/n] ? n Annulation. archer@NSA:~$ Hors ligne @nne Re : Modifier GRUB avec GRUB CUSTOMIZER Si mais là au moins c'est normal !!! Bon ben t'as plus qu'à poster tout ça dans les commentaire sur le site où tu as pris les instructions. Édit : au fait, tu n'as pas eu de problèmes à l'ajout du PPA ? Dernière modification par @nne (Le 07/01/2011, à 21:45) Hurricane Spoky Re : Modifier GRUB avec GRUB CUSTOMIZER L'ajout du PPA ??? Moi, j'ai suivi cette commande : sudo add-apt-repository ppa:danielrichter2007/grub-customizer sudo apt-get update sudo apt-get install grub-customizer sudo cp -Rp /etc/grub.d /etc/grub.d.ori Et le retour root@NSA:/home/archer# sudo add-apt-repository ppa:danielrichter2007/grub-customizer Executing: gpg --ignore-time-conflict --no-options --no-default-keyring --secret-keyring /etc/apt/secring.gpg --trustdb-name /etc/apt/trustdb.gpg --keyring /etc/apt/trusted.gpg --primary-keyring /etc/apt/trusted.gpg --keyserver keyserver.ubuntu.com --recv 59DAD276B942642B1BBD0EACA8AA1FAA3F055C03 gpg: requête de la clé 3F055C03 du serveur hkp keyserver.ubuntu.com gpg: clé 3F055C03: « Launchpad PPA for Daniel Richter » n'a pas changé gpg: Quantité totale traitée: 1 gpg: inchangée: 1 root@NSA:/home/archer# sudo apt-get update Atteint http://fr.archive.ubuntu.com maverick Release.gpg Ign http://fr.archive.ubuntu.com/ubuntu/ maverick/main Translation-en Atteint http://fr.archive.ubuntu.com/ubuntu/ maverick/main Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick/multiverse Translation-en Atteint http://fr.archive.ubuntu.com/ubuntu/ maverick/multiverse Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick/restricted Translation-en Atteint http://fr.archive.ubuntu.com/ubuntu/ maverick/restricted Translation-fr Atteint http://archive.canonical.com maverick Release.gpg Ign http://archive.canonical.com/ubuntu/ maverick/partner Translation-en Ign http://fr.archive.ubuntu.com/ubuntu/ maverick/universe Translation-en Atteint http://fr.archive.ubuntu.com/ubuntu/ maverick/universe Translation-fr Atteint http://fr.archive.ubuntu.com maverick-updates Release.gpg Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-updates/main Translation-en Atteint http://archive.ubuntu.com maverick Release.gpg Ign http://archive.canonical.com/ubuntu/ maverick/partner Translation-fr Atteint http://archive.canonical.com maverick Release.gpg Ign http://archive.canonical.com/ maverick/partner Translation-en Ign http://archive.canonical.com/ maverick/partner Translation-fr Atteint http://extras.ubuntu.com maverick Release.gpg Ign http://extras.ubuntu.com/ubuntu/ maverick/main Translation-en Atteint http://ppa.launchpad.net maverick Release.gpg Ign http://ppa.launchpad.net/danielrichter2007/grub-customizer/ubuntu/ maverick/main Translation-en Ign http://ppa.launchpad.net/danielrichter2007/grub-customizer/ubuntu/ maverick/main Translation-fr Ign http://extras.ubuntu.com/ubuntu/ maverick/main Translation-fr Atteint http://archive.ubuntu.com maverick Release Atteint http://extras.ubuntu.com maverick Release Atteint http://ppa.launchpad.net maverick Release Atteint http://archive.ubuntu.com maverick/main Sources Atteint http://archive.ubuntu.com maverick/restricted Sources Atteint http://extras.ubuntu.com maverick/main Sources Atteint http://archive.canonical.com maverick Release Atteint http://extras.ubuntu.com maverick/main i386 Packages Atteint http://archive.canonical.com maverick Release Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-updates/main Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-updates/multiverse Translation-en Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-updates/multiverse Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-updates/restricted Translation-en Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-updates/restricted Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-updates/universe Translation-en Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-updates/universe Translation-fr Atteint http://fr.archive.ubuntu.com maverick-security Release.gpg Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-security/main Translation-en Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-security/main Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-security/multiverse Translation-en Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-security/multiverse Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-security/restricted Translation-en Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-security/restricted Translation-fr Atteint http://ppa.launchpad.net maverick/main Sources Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-security/universe Translation-en Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-security/universe Translation-fr Atteint http://fr.archive.ubuntu.com maverick-proposed Release.gpg Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-proposed/main Translation-en Atteint http://fr.archive.ubuntu.com/ubuntu/ maverick-proposed/main Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-proposed/multiverse Translation-en Atteint http://fr.archive.ubuntu.com/ubuntu/ maverick-proposed/multiverse Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-proposed/restricted Translation-en Atteint http://fr.archive.ubuntu.com/ubuntu/ maverick-proposed/restricted Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-proposed/universe Translation-en Atteint http://fr.archive.ubuntu.com/ubuntu/ maverick-proposed/universe Translation-fr Atteint http://fr.archive.ubuntu.com maverick-backports Release.gpg Atteint http://ppa.launchpad.net maverick/main i386 Packages Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-backports/main Translation-en Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-backports/main Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-backports/multiverse Translation-en Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-backports/multiverse Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-backports/restricted Translation-en Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-backports/restricted Translation-fr Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-backports/universe Translation-en Ign http://fr.archive.ubuntu.com/ubuntu/ maverick-backports/universe Translation-fr Atteint http://fr.archive.ubuntu.com maverick Release Atteint http://fr.archive.ubuntu.com maverick-updates Release Atteint http://archive.canonical.com maverick/partner Sources Atteint http://fr.archive.ubuntu.com maverick-security Release Atteint http://fr.archive.ubuntu.com maverick-proposed Release Atteint http://fr.archive.ubuntu.com maverick-backports Release Atteint http://fr.archive.ubuntu.com maverick/restricted Sources Atteint http://fr.archive.ubuntu.com maverick/main Sources Atteint http://fr.archive.ubuntu.com maverick/multiverse Sources Atteint http://fr.archive.ubuntu.com maverick/universe Sources Atteint http://fr.archive.ubuntu.com maverick/main i386 Packages Atteint http://fr.archive.ubuntu.com maverick/restricted i386 Packages Atteint http://fr.archive.ubuntu.com maverick/universe i386 Packages Atteint http://fr.archive.ubuntu.com maverick/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com maverick-updates/restricted Sources Atteint http://fr.archive.ubuntu.com maverick-updates/main Sources Atteint http://fr.archive.ubuntu.com maverick-updates/multiverse Sources Atteint http://fr.archive.ubuntu.com maverick-updates/universe Sources Atteint http://fr.archive.ubuntu.com maverick-updates/main i386 Packages Atteint http://fr.archive.ubuntu.com maverick-updates/restricted i386 Packages Atteint http://fr.archive.ubuntu.com maverick-updates/universe i386 Packages Atteint http://fr.archive.ubuntu.com maverick-updates/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com maverick-security/restricted Sources Atteint http://fr.archive.ubuntu.com maverick-security/main Sources Atteint http://fr.archive.ubuntu.com maverick-security/multiverse Sources Atteint http://fr.archive.ubuntu.com maverick-security/universe Sources Atteint http://fr.archive.ubuntu.com maverick-security/main i386 Packages Atteint http://fr.archive.ubuntu.com maverick-security/restricted i386 Packages Atteint http://fr.archive.ubuntu.com maverick-security/universe i386 Packages Atteint http://fr.archive.ubuntu.com maverick-security/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com maverick-proposed/restricted Sources Atteint http://fr.archive.ubuntu.com maverick-proposed/main Sources Atteint http://fr.archive.ubuntu.com maverick-proposed/multiverse Sources Atteint http://fr.archive.ubuntu.com maverick-proposed/universe Sources Atteint http://fr.archive.ubuntu.com maverick-proposed/restricted i386 Packages Atteint http://fr.archive.ubuntu.com maverick-proposed/main i386 Packages Atteint http://fr.archive.ubuntu.com maverick-proposed/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com maverick-proposed/universe i386 Packages Atteint http://fr.archive.ubuntu.com maverick-backports/restricted i386 Packages Atteint http://fr.archive.ubuntu.com maverick-backports/main i386 Packages Atteint http://fr.archive.ubuntu.com maverick-backports/multiverse i386 Packages Atteint http://fr.archive.ubuntu.com maverick-backports/universe i386 Packages Atteint http://archive.canonical.com maverick/partner i386 Packages Atteint http://archive.canonical.com maverick/partner i386 Packages Lecture des listes de paquets... Fait root@NSA:/home/archer# sudo apt-get install grub-customizer Lecture des listes de paquets... Fait Construction de l'arbre des dépendances Lecture des informations d'état... Fait Les paquets suivants ont été installés automatiquement et ne sont plus nécessaires : libwxgtk2.8-0 libwxbase2.8-0 wine1.2-gecko python-wxversion python-wxgtk2.8 winbind Veuillez utiliser « apt-get autoremove » pour les supprimer. Les paquets supplémentaires suivants seront installés : hwinfo libhd16 Les NOUVEAUX paquets suivants seront installés : grub-customizer hwinfo libhd16 0 mis à jour, 3 nouvellement installés, 0 à enlever et 0 non mis à jour. Il est nécessaire de prendre 981ko dans les archives. Après cette opération, 2 998ko d'espace disque supplémentaires seront utilisés. Souhaitez-vous continuer [O/n] ? O Annulation. root@NSA:/home/archer# J'ai posté le problème sur le forum de TuxPlanet A cette adresse : http://www.tux-planet.fr/forum/viewtopi … d=731#p731 Dernière modification par iSpoky93LoL (Le 07/01/2011, à 21:52) Hors ligne eric63 Re : Modifier GRUB avec GRUB CUSTOMIZER Au niveau de "Souhaitez-vous continuer [O/n]" , euh... au dernières nouvelles il faut appuyer sur le "o" + MAJ non ? Eh bien regardez Souhaitez-vous continuer [O/n] ? O Annulation. même problème. J'ai outrepassé la difficulté en ouvrant kpackagekit ou synaptic. j'ai installé grub customiser; puis j'ai sauvegardé en konsole mon grub en grub.ori comme indiqué. j'ai ouvert grub customizer et je ne trouve pas qu'il simplifie trop les choses (peu d'explication aux différentes possibilités) --ce que je trouve pas top ou que je ne comprend pas. linux-xen - késako ? j'ai deux custom ? pourquoi dans les préférences entrée 1 (par emplacement) et ubuntu , with linux 2.6.35-24-generic je ne comprends pas la différence l'image de fond ne fonctionne pas (c'est pourtant un truc qui semble facile à appréhender) ça n'a rien changé dans la façon dont kubuntu s'amorce (l'image ne fonctionnait pas avant; message d'erreur tjs d'actualité) interface gtk beurk (mais bon c'est pas le problème) -- les plus bémolés heu au final je n'en trouve pas beaucoup accès facilité pour sélection du boot (quoique si on désélectionne ils restent dans la liste quand même donc je craint pour les 50 prochaines versions du kernel) un visuel des options (même si elles sont peu lisibles sur leurs fonctions) a suivre car il semble quand même prometteur Dernière modification par eric63 (Le 07/01/2011, à 22:29) Hors ligne Hurricane Spoky Re : Modifier GRUB avec GRUB CUSTOMIZER Help us ! Meme sous Kubuntu, il y a un problème ? Hors ligne eric63 Re : Modifier GRUB avec GRUB CUSTOMIZER oui même beug sous kubuntu 10.10 et kde 4.5.3 c'est pareil Hors ligne @nne Re : Modifier GRUB avec GRUB CUSTOMIZER ... J'ai posté le problème sur le forum de TuxPlanet A cette adresse : http://www.tux-planet.fr/forum/viewtopi … d=731#p731 Bien fait ! Apparemment, c'est encore une daube dont on peut se passer. Hurricane Spoky Re : Modifier GRUB avec GRUB CUSTOMIZER C'est pas cool, elle avait l'air intéressante cette interface graphique. Et le forum de Tux Planet n'est pas aussi repondant (enfin pas aussi rapidement) que celui d'Ubuntu ! Hors ligne eric63 Re : Modifier GRUB avec GRUB CUSTOMIZER [O/n] on peut faire de deux façon soit o pour oui le majuscule n'est pas obligatoire soit la touche [enter] qui valide la lettre en majuscule par défaut içi le o j'ai pas essayé avec y pour yes Hors ligne Hurricane Spoky Re : Modifier GRUB avec GRUB CUSTOMIZER Nobody else can propose someone solutions ? Hors ligne Gemnoc Re : Modifier GRUB avec GRUB CUSTOMIZER Bonjour, Nobody else can propose someone solutions ? Tu as essayé ce qu'eric63 proposait ? J'ai outrepassé la difficulté en ouvrant kpackagekit ou synaptic. Au passage, c'est quoi cette idée d'écrire en anglais ici, c'est pour faire cool ? (je ne suis pas anglophobe, bien au contraire, mais c'est à mon avis déplacé d'en faire l'usage ici) Pour ma part j'aime bien cet outil, mais j'étais déjà plutôt familier avec la configuration manuelle de Grub. linux-xen - késako ? j'ai deux custom ? pourquoi Curieux, je n'ai pas ça, tel que le montre la capture d'écran que j'ai mise dans la doc. dans les préférences entrée 1 (par emplacement) et ubuntu , with linux 2.6.35-24-generic je ne comprends pas la différence Je n'en suis pas certain, mais je crois que « entrée 1 (par emplacement) » sélectionnera n'importe quel noyau qui se trouve en haut de la liste. Tu auras remarqué que dans la fenêtre principale, des boutons permettent de déplacer vers le haut ou le bas les entrées de la liste. Si tu choisis plutôt « ubuntu , with linux 2.6.35-24-generic », alors le démarrage se fera avec ce noyau, peu importe son classement dans la liste. Par exemple, lors de la prochaine mise à jour de noyau, disons le 35-25, le 35-24 sera relégué au second rang (donc entrée 2). « entrée 1 » va alors utiliser le noyau plus récent. l'image de fond ne fonctionne pas (c'est pourtant un truc qui semble facile à appréhender) ça n'a rien changé dans la façon dont kubuntu s'amorce (l'image ne fonctionnait pas avant; message d'erreur tjs d'actualité) J'ai eu aussi ce problème. Dans la doc anglophone de GRUB2, on indique que les formats d'image supportés sont le PNG, le TGA et le JPEG 8-bits. Mon image était une jpg, mais je ne sais pas comment régler le nombre de bits dans Gimp. J'ai donc converti mon image en PNG, et ça fonctionne. J'y vois même un avantage : ayant une carte Nvidia avec pilote proprio activé, j'ai dû appliquer une des modifs proposées dans la doc Plymouth pour que l'affichage soit correct, mais j'avais un écran noir pendant 10 secondes entre le menu Grub et l'écran de Plymouth. Au moins maintenant, c'est un fond d'écran. interface gtk beurk (mais bon c'est pas le problème) Je pourrais dire la même chose des applications KDE sous GNOME. Pensez à consulter la documentation ! :-) Hors ligne Hurricane Spoky Re : Modifier GRUB avec GRUB CUSTOMIZER Désolé pour l'anglais, mais je le suis presque de naissance ^^' Hors ligne Hurricane Spoky Re : Modifier GRUB avec GRUB CUSTOMIZER Ne me bannis pas du forum, je t'en supplie ! Hors ligne Gemnoc Re : Modifier GRUB avec GRUB CUSTOMIZER Salut, Tu aurais très bien pu écrire tes deux réponses en une seule. Par ailleurs, tu n'as rien à craindre de moi, je ne suis pas un modérateur du forum, seulement un administrateur de la documentation. Mais tu ne dis pas si tu as essayé d'installer Grub-customizer par KPackageKit ou Synaptic ? En tout cas, moi je crois que je l'ai définitivement adopté, on peut presque tout changer avec. Je viens de l'installer sur mon second PC (un portable). Dernière modification par Gemnoc (Le 09/01/2011, à 03:07) Pensez à consulter la documentation ! :-) Hors ligne Votezblanc Re : Modifier GRUB avec GRUB CUSTOMIZER J'ai 2 versions d'Ubuntu sur mon PC (Kubuntu et Ubuntu Studio) donc je me demandais s'il étais possible de changer le nom des entrées. Dans Grub customizer si on clique deux fois sur une entrée il nous propose de la renommer mais je ne veux pas faire de bétises... bon, je teste et reviens vous dire ça. Edit : Ok, ça marche sans problèmes! Ce qui est plutôt pratique sinon mes deux Ubuntu s'affichent avec le même nom. Dernière modification par Votezblanc (Le 11/01/2011, à 14:53) Hors ligne @nne Re : Modifier GRUB avec GRUB CUSTOMIZER ... Mais tu ne dis pas si tu as essayé d'installer Grub-customizer par KPackageKit ou Synaptic ? Il n'est pas dans synaptic. eric63 Re : Modifier GRUB avec GRUB CUSTOMIZER Il n'est pas dans synaptic. des fois que tu aurais oublié une étape, je détaille mon cheminement: tu as installé le ppa ? sudo add-apt-repository ppa:danielrichter2007/grub-customizer et fait un sudo apt-get update ? avec le terminal comme il est dit au début de la doc. une fois fait ça, j'ai laissé ouvert le terminal et commencé à utiliser kpackagekit à ce moment là. (je suppose que c'est pareil pour synaptic) j'ai demandé une mise à jour manuelle il a suffit que je recherche grub et je suis tombé dessus et j'ai pu finir le processus avec les dépendances demandées. Hors ligne @nne Re : Modifier GRUB avec GRUB CUSTOMIZER Encore plus simple, une fois le PPA installé, il fallait faire : sudo apt-get install <nom_du_paquet>