id
stringlengths 5
11
| text
stringlengths 0
146k
| title
stringclasses 1
value |
|---|---|---|
doc_23530200
|
When I try to request the /result route I get the error below:
Error: Can't set headers after they are sent.
at validateHeader (_http_outgoing.js:494:11)
at ServerResponse.setHeader (_http_outgoing.js:501:3)
at ServerResponse.header (/home/cabox/workspace/APIs/movie_search_app/node_modules/express/lib/response.js:767:10)
at ServerResponse.contentType (/home/cabox/workspace/APIs/movie_search_app/node_modules/express/lib/response.js:595:15)
at ServerResponse.send (/home/cabox/workspace/APIs/movie_search_app/node_modules/express/lib/response.js:145:14)
at Request._callback (/home/cabox/workspace/APIs/movie_search_app/app.js:18:13)
at Request.self.callback (/home/cabox/workspace/APIs/movie_search_app/node_modules/request/request.js:186:22)
at emitTwo (events.js:126:13)
at Request.emit (events.js:214:7)
at Request.<anonymous> (/home/cabox/workspace/APIs/movie_search_app/node_modules/request/request.js:1163:10)
The full code in my app.js file is below:
var express = require("express");
var app = express();
var request = require("request");
app.set("view engine", "ejs")
app.get("/", function(req, res){
res.send("Hello, it workds! ... and this is the home page");
});
app.get("/results", function(req, res){
res.send("Hello, it workds!");
request("http://omdbapi.com/?s=california&apikey=thewdb", function(error, response, body){
if (!error && response.statusCode ==200) {
var data = JSON.parse(body);
res.send(results.search(0).title);
res.render("results", {data: data});
}
});
});
app.listen(3000,function(){
console.log("Movie App has started!!!");
});
Sincerely hope someone can help provide some guidance on how to handle/resolve this error.
A: You send already a string as response and try later to send the rendered page to the client, too, which isn't working...
app.get("/results", function(req, res){
[...]
if (!error && response.statusCode ==200) {
[...]
// HERE IS YOUR PROBLEM
res.send(results.search(0).title);
res.render("results", {data: data});
}
});
What does "res.render" do, and what does the html file look like?
http://expressjs.com/en/api.html
A: @moneydhaze thanks for your response. You made me look through the code thoroughly and start up simply. it appears the res.send and res.render were conflicting. when I took out the res.render and simplified the code; it worked. See the simplified code below:
var express = require("express");
var app = express();
var request = require("request");
app.set("view engine", "ejs")
app.get("/", function(req, res){
res.send("This is the Home Page")
});
app.get("/results", function(req, res){
request("http://www.omdbapi.com/?s=guardians+of+the+galaxy&apikey=thewdb", function(error, response, body){
if (!error && response.statusCode ==200){
res.send(body);
}
})
});
app.listen(3000,function(){
console.log("Movie App has started!!!");
});
| |
doc_23530201
|
@Named
@ApplicationScoped
@AnBasicDAO
public class BasicDAOImpl implements BasicDAO, Serializable {
/**
*
*/
private static final long serialVersionUID = 1L;
@PersistenceContext(unitName="generalPU")
protected EntityManager entityManager;
This is my persistence.xml:
<persistence-unit name="generalPU" transaction-type="RESOURCE_LOCAL">
<provider>org.hibernate.ejb.HibernatePersistence</provider>
<non-jta-data-source>java:/comp/env/jdbc/testeDS</non-jta-data-source>
<properties>
<property name="hibernate.show_sql" value="false" />
<property name="hibernate.format_sql" value="false" />
<property name="hibernate.hbm2ddl.auto" value="update" />
<property name="hibernate.cache.use_second_level_cache"
value="false" />
<property name="hibernate.cache.use_query_cache" value="false" />
<property name="hibernate.jdbc.batch_size" value="50" />
</properties>
</persistence-unit>
This is my resource in server.xml (tomcat):
<Context docBase="/home/usertest/Programas/apache-tomcat-7.0.59/webapps/cardoso" path="/cardoso" reloadable="false" source="org.eclipse.jst.jee.server:cardoso">
<Resource name="jdbc/testeDS" auth="Container" type="javax.sql.DataSource"
username="postgres"
password="pgadmin"
driverClassName="org.postgresql.Driver"
url="jdbc:postgresql://localhost:5432/teste"
maxTotal="25"
maxIdle="10"
validationQuery="select 1" />
</Context>
My context.xml:
<?xml version="1.0" encoding="UTF-8"?>
<Context>
<ResourceLink global="jdbc/sabrecadoDS" name="jdbc/testeDS" type="javax.sql.DataSource"/>
<Manager pathname=""/> <!-- disables storage of sessions across restarts -->
<Resource name="BeanManager"
auth="Container"
type="javax.enterprise.inject.spi.BeanManager"
factory="org.jboss.weld.resources.ManagerObjectFactory"/>
</Context>
Something is wrong ? THe EntityManager is always null. A some time ago i used Spring (now CDI) and in applicationContext i defined the and worked normally.
A: CDI doesn't understand @PersistenceContext. So change it to:
@Inject
protected EntityManager entityManager;
But then you would have to define the EntityManager somewhere, lets say:
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.inject.Disposes;
import javax.enterprise.inject.Produces;
import javax.inject.Inject;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
import org.apache.deltaspike.jpa.api.transaction.TransactionScoped;
public class CdiApplicationConfig {
@Inject
private EntityManagerFactory emf;
@Produces
@ApplicationScoped
public EntityManagerFactory createEntityManagerFactory() {
EntityManagerFactory emf = Persistence.createEntityManagerFactory("generalPU");
return emf;
}
public void close(@Disposes EntityManagerFactory emf) {
emf.close();
}
@Produces
@TransactionScoped // is a bit better than @RequestScoped because it won't allow perform injection outside transaction context
public EntityManager createEntityManager() {
return emf.createEntityManager();
}
public void close(@Disposes EntityManager em) {
if (em.isOpen()) {
em.close();
}
}
}
Above code uses DeltaSpike for transaction management - but you can use whatever you want instead.
One more tip: I'm not sure if slash is allowed between java:/comp in the datasource name referenced inside persistence.xml, so please check also:
<non-jta-data-source>java:comp/env/jdbc/testeDS</non-jta-data-source>
As you can see, CDI is very similar to Spring.
| |
doc_23530202
|
stream.on is not a function
at destroyer (internal/streams/pipeline.js:27:10)
at internal/streams/pipeline.js:79:12
at Array.map (<anonymous>)
at pipeline (internal/streams/pipeline.js:76:28)
at Promise (internal/util.js:274:30)
at new Promise (<anonymous>)
at pipeline (internal/util.js:273:12)
on line of pipeLinePromisified.
can someone explain what is wrong here ??
const fs = require("fs");
const util = require("util");
const { pipeline } = require("stream");
const writeStreamPromisified = util.promisify(fs.createWriteStream);
function someFunction(req) {
//extract bnary data req.on('data')
req.on('end', async () => {
let fileName = 'something.gz';
//define streams and cipher with the secret key
const iv = crypto.randomBytes(16);
const secret = req.headers["secret"];
const cipher = crypto.createCipheriv(
config.algorithmToEncrypt,
secret,
iv
);
const compressStream = zlib.createGzip();
let writeStream = fs.createWriteStream(
`${config.uploadFolder}/${fileName}.gz`
);
try {
//trying to pipe result which is binary data to cipher to be encrypted
// then compres it using gzipstream and finally write in writestream
req.body = result; // result is binary data
await pipeLinePromisified(
req,
cipher,
compressStream,
writeStream
);
return resolve({
code: 201,
payload: {
message: "Success!",
fileName: `${fileName}.gz`,
filePath: `somePath`
}
});
} catch (err) {
console.log(err);
return reject({
code: 400,
payload: {
error: `Error`
}
});
}
})
}
| |
doc_23530203
|
$st = $db->query("select cnt, link from rolls where def = 1 limit 1");
$str = $st->fetch()['cnt'];
$link = $st->fetch()['link'];
echo $str; // this works
echo $link; // doesn't work - nothing is echoed
How to get both values?
A: You can use fetch_assoc() like that:
$st = $db->query("select cnt, link from rolls where def = 1 limit 1");
$str = $st->fetch_assoc();
echo $str['cnt'].' '.$str['link '];
| |
doc_23530204
|
I can't manually do:
GRANT CONNECT ON DATABASE X TO readonly;
GRANT CONNECT ON DATABASE Y TO readonly;
GRANT CONNECT ON DATABASE Z TO readonly;
so on so forth, as I have many databases.
Is there an equivalent to something like:
GRANT CONNECT ON DATABASE * TO readonly;
My overall hope, is to grant connect and read only (select access) to the readonly user of this database to certain members of my organisation.
Best Regards,
Neil D.
A: With psql, you can use \gexec:
SELECT format('GRANT CONNECT ON DATABASE %I TO readonly;', datname) FROM pg_database \gexec
\gexec interprets each line of the query result as an SQL statement and executes it.
A: For pg 14:
GRANT pg_read_all_data TO your_user;
Then it can connect to all database, as read-only.
Tips:
*
*Seems the user can still create tables for itself in public schema, though can't write existing tables owned by other user.
| |
doc_23530205
|
OSError: [Errno 107] Transport endpoint is not connected
Upon googling, I found that this is because the connection may have dropped. But I run both the client and server side of the program in the same machine itself. I tried connecting again from the client end and I get this:
OSError: [Errno 106] Transport endpoint is already connected
indicating that the previous connection is still intact. I am pretty confused as to what is happening and how to make it work. Here is a screenscreen shot which shows what I am trying to do and the problem:
A: I tested your code with a little change on python 3.5.0 and it works:
I think the trick is in sock.accept() method which returns a tuple:
socket.accept() Accept a connection. The socket must be bound to an
address and listening for connections. The return value is a pair
(conn, address) where conn is a new socket object usable to send and
receive data on the connection, and address is the address bound to
the socket on the other end of the connection.
server
#server
>>> import socket
>>> sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
>>> sock.bind(("localhost", 8081))
>>> sock.listen(2)
>>> conn, addr = sock.accept()
>>> data= conn.recv(1024).decode("ascii")
client:
#client
>>> import socket
>>> sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
>>> sock.connect(("localhost",8081))
>>> sock.send("hi".encode())
2
>>> sock.send("hiiiiiii".encode())
8
>>> sock.send(("#"*1020).encode())
1020
A: I've been getting the same error, and I realized that it's because of one small issue: In the handle_client function, I put server.recv instead of conn.recv, so the server.py program was trying to receive data from its own side.
Below is the corrected version:
server.py:
import socket
import threading
def handle_client(conn, addr):
while True:
msg_length = conn.recv(64).decode("utf-8") # I got the error when I put server.recv
if msg_length:
msg = conn.recv(int(msg_length)).decode('utf-8') # Here too
print(msg)
def start():
server.listen()
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((socket.gethostbyname(socket.gethostname()), 5050))
start()
client.py:
import socket
import threading
def send(msg):
client.send(str(len(msg)).encode('utf-8').ljust(64))
client.send(msg.encode('utf-8'))
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((socket.gethostbyname(socket.gethostname()), 5050))
send("Hello world!")
| |
doc_23530206
|
My code is below:
class Pokayokes_model extends CI_Model {
public function get( $name = false, $number = false )
$this->db->select('*');
$this->db->from('poke_yoke p');
$this->db->join('rysunek r', 'r.nazwa_rys=p.nazwa_rys', 'left');
$this->db->where('r.nazwa_rys',$name);
$query = $this->db->get();
foreach ($query->result_array() as $row)
{
echo $res1 = $row['nazwa_art'];
echo $res2 = $row['kolory_art'];
$start = 7;
$end = 7 + $number;
for ($i = $start; $i < $end; $i++)
{
$j = $i - 6;
echo "<br>";
echo $res3 = $row['py_'.$j.''];
}
$qResults[] = $res1.','.$res2.','.$res3;
echo "<pre>";
var_dump($qResults);
echo "</pre>";
}
return ?;
}
A know that I need use something like this:
foreach ($query->result() as $row)
{
echo $row->title;
echo $row->name;
echo $row->body;
}
but I don't know how make of this line $row['py_'.$j.''] object.
In database I have columns like py_1, py_2, py_3, ... etc but not every time a need all of it. Sometimes I need only 3, sometime 10, so I need to generate dynamically.
15.08.15 edit:
I'm trying for the last 2 days make this code work, but it didn't.
I think this code is not right.
I need to this part (below) make object but I don't know how?
for ($i = $start; $i < $end; $i++)
{
$j = $i - 6;
$res3 = $row['py_'.$j.''];
}
Because I want use this:
foreach ($query->result() as $row)
{
echo $row->title;
echo $row->name;
echo $row->body;
}
I have one question more...
How I display this result form this loop? I need to add to array? What I need write next to result?
I want to output be like this at the end:
[
{"nazwa_art":"11_13_1532","kolory_art":"C,M,Y,K","py_1":"Y","py_2":"Y","py_3":"M","py_4":"M","py_5":"M","py_6":"M"},
{"nazwa_art":"11_13_1512","kolory_art":"C,M,Y,K","py_1":"Y","py_2":"Y","py_3":"M","py_4":"M","py_5":"x","py_6":"x"}
]
21.08.15 edit:
I made another modification of my code and it looks like this:
class Pokayokes_model extends CI_Model {
public function get( $pokayokeName = false, $number = false )
{
$this->db->select('*');
$this->db->from('poke_yoke p');
$this->db->join('rysunek r', 'r.nazwa_rys=p.nazwa_rys', 'left');
$this->db->where('r.nazwa_rys',$pokayokeName);
$query = $this->db->get();
foreach ($query->result_array() as $row)
{
$res1 = $row['nazwa_art'];
$res2 = $row['kolory_art'];
$qResults = array();
$qResults['nazwa_art'] = $res1;
$qResults['kolory_art'] = $res2;
$start = 7;
$end = 7 + $number;
for ($i = $start; $i < $end; $i++)
{
$j = $i - 6;
$res3 = $row['py_'.$j.''];
$qResults['py_'.$j.''] = $res3;
}
}
echo json_encode($qResults);
}
}
And the output looks like this:
{"nazwa_art":"11_15_0492","kolory_art":"C,M,Y,K,PBlue5575,PGreenYellow5576","py_1":"x","py_2":"PGreenYellow5576","py_3":"PGreenYellow5576","py_4":"PBlue5575","py_5":"PBlue5575" }
but should be 3 results and not only one.
And I don't know why in Angular is null?
A: Use as follows
class Pokayokes_model extends CI_Model {
public function get( $name = false, $number = false )
{
$this->db->select('*');
$this->db->from('poke_yoke p');
$this->db->join('rysunek r', 'r.nazwa_rys=p.nazwa_rys', 'left');
$this->db->where('r.nazwa_rys',$name);
$query = $this->db->get();
foreach ($query->result_array() as $row)
{
$res1 = $row['nazwa_art'];
$res2 = $row['kolory_art'];
$start = 7;
$end = 7 + $number;
for ($i = $start; $i < $end; $i++)
{
$j = $i - 6;
$res3 = $row['py_'.$j.''];
}
$qResults[] = $res1.','.$res2.','.$res3;
}
echo json_encode(['result'=>$qResults]);
}
}
| |
doc_23530207
|
mix.webpackConfig(webpack => {
return {
plugins: [
new webpack.ProvidePlugin({
$: 'jquery',
jQuery: 'jquery',
'window.jQuery': 'jquery',
Popper: ['popper.js', 'default'],
})
]
};
});
mix.sass('resources/assets/styles/index.scss', 'public/css/app.css')
.js('resources/assets/scripts/index.js', 'public/js/app.js')
.copyDirectory('resources/assets/static', 'public/static')
.version()
.sourceMaps();
and package.json:
"devDependencies": {
"jquery": "^3.3.1",
"axios": "^0.18.0",
"bootstrap": "^4.1.3",
"bootstrap-datepicker": "^1.7.1",
"browser-sync": "^2.24.6",
"browser-sync-webpack-plugin": "^2.0.1",
"chart.js": "^2.7.2",
"cross-env": "^5.2.0",
"datatables": "^1.10.18",
"easy-pie-chart": "^2.1.7",
"font-awesome": "4.7.0",
"fullcalendar": "^3.9.0",
"jquery-sparkline": "^2.4.0",
"jvectormap": "^2.0.4",
"laravel-mix": "^2.1.11",
"load-google-maps-api": "^1.2.0",
"lodash": "^4.17.10",
"masonry-layout": "^4.2.2",
"perfect-scrollbar": "^1.1.0",
"popper.js": "^1.14.3",
"skycons": "^1.0.0",
"vue": "^2.5.16"
}
and in my blade footer script:
@section('footer')
<script type="text/javascript">
if (typeof jQuery != 'undefined') { alert(jQuery.fn.jquery); }
$(function() {
$('#cc-number').validateCreditCard(function(result) {
$('.log').html('Card type: ' + (result.card_type == null ? '-' : result.card_type.name)
+ '<br>Valid: ' + result.valid
+ '<br>Length valid: ' + result.length_valid
+ '<br>Luhn valid: ' + result.luhn_valid);
});
});
</script>
@endsection
after i run npm run dev and load my page, i receive error:
Uncaught ReferenceError: $ is not defined
and my alert(jQuery.fn.jquery); is not triggered
how do i load jquery from npm so then i can use it at inline html code in blade?
A: I use mix.copy instead mix.js for jQuery and it works great!
example:
mix.copy('node_modules/jquery/dist/jquery.min.js', 'public/vendor/jquery/jquery.min.js');
A: in app.blade.php
<script src="{{ asset('js/app.js') }}" defer></script>
remove defer so make it
<script src="{{ asset('js/app.js') }}" ></script>
https://github.com/laravel/framework/issues/17634#issuecomment-375473990
A: Try this in your main.js file
global.$ = global.jQuery = require('jquery');
A: I had this issue in Laravel 5.7 when I tried using some of the default views that it provided. I was trying to use JavaScript (which required using jQuery) in some blade view templates that extended the default layout/app.blade.php template.
But for me the problem was not the window.$ not being assigned the window.jQuery library, because it was being added as far as the resources/js/bootstrap.js file was concerned. (This is a file that appears to be precompiled into public/js/app.js by Laravel Mix. You can see this is the case by looking into webpack.mix.js, within it, you'll find this statement:
mix.js('resources/js/app.js', 'public/js')
where
resources/js/app.js requires resources/js/bootstrap.js .
If you wish to manually compile this for yourself first run:
npm install then npm run dev when in development mode, or npm run prod when in production mode.
but anyways (I've digressed) ....
It turned out that the issue was here:
resources/views/layouts/app.blade.php
There was an instance of script tag with a defer attribute like so:
<script src="{{ asset('js/app.js') }}" defer></script>
That defer attribute was causing all the problems.
So I removed it like so:
<script src="{{ asset('js/app.js') }}"></script>
and the problem of jQuery not being defined was solved.
I prefer to defer my JavaScript loading by simply moving the script tags closer to the end of body HTML tag anyways.
| |
doc_23530208
|
package
{
public MyClass
{
var myA:Number = 10 ; //<< initializing here
public function MyClass()
{
myA = 20; //<< initializing here
}
}
}
Which one of the above is the right way to follow ?
A: At a conference I attended, I was advised, during a session about optimization, that it is unwise to do many assignments and operations in the constructor, or when declaring variables outside functions; for, the compiler does not put those sections through any vigorous optimization.
This would leave me to believe that it is best to declare your variables outside of your functions, and then assign them in an initialization function, unless these were variables passed along to the constructor as parameters and you want to avoid passing them along again.
package{
public MyClass {
private var myA1:Number; //declare here
public var myA2:Number; //declare here
public function MyClass(arg1:Number = 10):void{
myA1 = arg1;//assigns myA1 a Number passed into the constructor, or 10
init();
}
public init():void{
myA2 = 20; //assigns myA2 a value of 20
}
}
}
As well, after a search on google I found this article that seems to agree.
http://voices.yahoo.com/flash-actionscript-3-optimization-guide-part-1-4793274.html
Keep in mind, that you may just want to do things a certain way that makes things easier for you, and then optimize later; for, just getting it done is more important than style imho.
Hope that helps.
| |
doc_23530209
|
I've configured the agent, then I put the ssh url on jenkins.
I've configured the ssh on github as well.
Failed to connect to repository : Command "git ls-remote -h -- git@github.com:user/maven-project.git HEAD" returned status code 128:
stdout:
stderr: git@github.com: Permission denied (publickey).
fatal: Could not read from remote repository.
Please make sure you have the correct access rights
and the repository exists
A: The issue was solved that way
In windows, Jenkins will use the the SSH key of the user it is running as, which is located in the %USERPROFILE%.ssh folder ( on XP, that would be C:\Documents and Settings\USERNAME.ssh, and on 7 it would be C:\Users\USERNAME.ssh). Therefore, you need to force Jenkins to run as the user that has the SSH key configured. To do that, right click on My Computer, and hit "Manage". Click on "Services". Go to Jenkins, right click, and select "Properties". Under the "Log On" tab, choose the user Jenkins will run as, and put in the username and password (it requires one). Then restart the Jenkins service by right clicking on Jenkins (in the services window), and hit "Restart".
Jenkins does not support passphrases for SSH keys. Therefore, if you set one while running the initial Github configuration, rerun it and don't set one.
A: I'm going to say you don't have the credentials configured properly as you did not mention that.
A similar Stack Overflow response is here, for a slightly different worded error. Similar issue here.
The Jenkins site and others have good examples on setting up Jenkins with GitHub.
| |
doc_23530210
|
<script type="text/javascript">
var __cs = __cs || [];
__cs.push(["setCsAccount", "code"]);
</script>
<script type="text/javascript" async src="https://...cs.min.js"></script>
I tried to add it to one of the themes i have as a library by using
(function ($) {
Drupal.behaviors.myModuleBehavior = {
attach: function (contect, settings) {
document.write('<script type="text/javascript">...</script>');
}
}
})
But nothing happened, there is another theme in the website with the following files
I'm not sure I'm going on at it in the correct way since i'm new to drupal but the most important thing to me is that i prefer not to add any modules or outside libraries to the website
A: Override the html.html.twig of whatever theme your theme is inheriting from and just put your javascript in the head section.
*
*find what theme your theme is inheriting from, it will say in your themes .info.yml file, look for "base theme". For example HERE you can see that the classy theme is inheriting from the stable theme.
*Copy the html.html.twig file from your base theme and put it in your theme, keeping the directory structure is probably a good idea, but not absolutely necessary. If you can't find the file in your base theme, or your theme does not have a base theme, you could just use the html.html.twig from one of the core themes, eg the stable themes html.html.twig file.
*Add your javascript to the head section of your html.html.twig file. Add the first code that you posted, probably no need for drupal behaviors.
| |
doc_23530211
|
My Urls:
url(r'^$', HomePageView.as_view(), name="home_page"),
url(r'^profile/(?P<pk>\d+)/$', MemberUpdate.as_view(template_name="profile.html"), name="profile_page"),
url(r'^signup/$', SignupUser.as_view(), name="signup_user"),
My views:
class HomePageView(TemplateView):
template_name = "index.html"
login_form = AuthenticationForm()
signup_form = UserCreationForm()
def get_context_data(self, **kwargs):
context = super(HomePageView, self).get_context_data(**kwargs)
context['login_form'] = self.login_form
context['signup_form'] = self.signup_form
context['signup_action'] = reverse("signup_user")
return context
class SignupUser(CreateView):
model = settings.AUTH_USER_MODEL
form_class = MemberForm
template_name = "index.html"
def get_success_url(self):
return reverse("profile_page", args=[self.object.pk])
def form_invalid(self, form):
# here, how do I pass form.errors back to the home_page?
# I know messages framework is possible but this isn't as
# easy to add each error to its respective field as
# form.errors would be.
return HttpResponseRedirect(reverse('home_page'))
...and I would have a third view for the login form's POST data. Is this an acceptable way to manage my forms/views, or am I better off simply writing one overall view that distinguishes between the signup and login form within its post method with an if statment?
| |
doc_23530212
|
Table 1
AA
BB
CC
DD
EE
Table 2
bb
aa
bb1
bb2
cc1
cc2
cc3
I need help to do the below steps using Excel VBA code
*
*Use Table 1 and loop thru each data in table 1 and compare to Table 2
*If table 2 only have 1 match, just replace the Table 1 data from the table 2 value on the same row of data from table 1
*If have multiple match from table 2, them prompt user to select which data from table 2 need to be written in table 1
Matching Criteria are as follows
AA should match to aa,aa1,aa2,,,,,,
BB shoud match bb,bb1,bb2,,,,,,,,
Below is the code that I have written
Private Sub CommandButton2_Click()
Dim attr1 As Range, data1 As Range
Dim item1, item2, item3, lastRow, lastRow2
Dim UsrInput, UsrInput2 As Variant
Dim Cnt As Integer, LineCnt As Integer
Dim MatchData(1 To 9000) As String
Dim i As Integer, n As Integer, j As Integer, p As Integer
Dim counter1 As Integer, counter2 As Integer
Dim match1(1 To 500) As Integer
Dim matchstr1(1 To 500) As String
Dim tmpstr1(1 To 500) As String
Dim storestr(1 To 500) As String
Dim tmpholderstr As String
counter1 = 1
counter2 = 0
j = 0
p = 0
tmpholderstr = ""
For i = 1 To 500
storestr(i) = ""
Next i
For i = 1 To 500
tmpstr1(i) = ""
Next i
For i = 1 To 500
matchstr1(i) = ""
Next i
For i = 1 To 500
match1(i) = 0
Next i
For i = 1 To 9000
MatchData(i) = ""
Next i
UsrInput = InputBox("Enter Atribute Column")
UsrInput2 = InputBox("Enter Column Alphabet to compare")
With ActiveSheet
lastRow = .Cells(.Rows.Count, UsrInput).End(xlUp).Row
'MsgBox lastRow
End With
With ActiveSheet
lastRow2 = .Cells(.Rows.Count, UsrInput2).End(xlUp).Row
'MsgBox lastRow
End With
Set attr1 = Range(UsrInput & "2:" & UsrInput & lastRow)
Set data1 = Range(UsrInput2 & "2:" & UsrInput2 & lastRow2)
'Debug.Print lastRow
'Debug.Print lastRow2
For Each item1 In attr1
item1.Value = Replace(item1.Value, " ", "")
Next item1
For Each item1 In attr1
If item1.Value = "" Then Exit For
counter1 = counter1 + 1
item1.Value = "*" & item1.Value & "*"
For Each item2 In data1
If item2 = "" Then Exit For
If item2 Like item1.Value Then
counter2 = counter2 + 1
match1(counter2) = counter1
matchstr1(counter2) = item2.Value
tmpstr1(counter2) = item1.Value
Debug.Print item1.Row
Debug.Print "match1[" & counter2; "] = " & match1(counter2)
Debug.Print "matchstr1[" & counter2; "] = " & matchstr1(counter2)
Debug.Print "tmpstr1[" & counter2; "] = " & tmpstr1(counter2)
End If
Next item2
Next item1
' Below is the code that go thru the array and try to write to table 1
' But it is not working as expected.
For n = 1 To 500
If matchstr1(n) = "" Then Exit For
If match1(n) <> match1(n + 1) Then
Range("K" & match1(n)) = matchstr1(n)
Else
i = 0
For j = n To 300
If matchstr1(j) = "" Then Exit For
i = i + 1
If match1(j) = match1(j + 1) Then
tmpstr1(i) = matchstr1(j)
End If
Next j
End If
Next n
End Sub
A: Try the following. Your two tables are suppose to be in a sheet named "MyData", where there is also a command button (CommandButton2). Add also a UserForm (UserForm1), and in that UserForm add another command button (CommandButton1).
In the module associated with CommandButton2, copy the following code:
Public vMyReplacementArray() As Variant
Public iNumberOfItems As Integer
Public vUsrInput As Variant, vUsrInput2 As Variant
Public lLastRow As Long, lLastRow2 As Long
Public rAttr1 As Range, rData1 As Range, rItem1 As Range, rItem2 As Range
Public iCounter1 As Integer
Sub Button2_Click()
vUsrInput = InputBox("Enter Atribute Column")
vUsrInput2 = InputBox("Enter Column Alphabet to compare")
With ActiveSheet
lLastRow = .Cells(.Rows.Count, vUsrInput).End(xlUp).Row
End With
With ActiveSheet
lLastRow2 = .Cells(.Rows.Count, vUsrInput2).End(xlUp).Row
End With
Set rAttr1 = Range(vUsrInput & "2:" & vUsrInput & lLastRow)
Set rData1 = Range(vUsrInput2 & "2:" & vUsrInput2 & lLastRow2)
ReDim vMyReplacementArray(1 To 1) As Variant
For Each rItem1 In rAttr1
For Each rItem2 In rData1
If (InStr(1, rItem2, rItem1, vbTextCompare)) > 0 Then
vMyReplacementArray(UBound(vMyReplacementArray)) = rItem1.Value & "-" & rItem2.Value
ReDim Preserve vMyReplacementArray(1 To UBound(vMyReplacementArray) + 1) As Variant
End If
Next rItem2
Next rItem1
iNumberOfItems = UBound(vMyReplacementArray) - LBound(vMyReplacementArray)
UserForm1.Show
End Sub
And in the Userform, the following:
Dim k As Integer
Private Sub UserForm_initialize()
Dim myElements() As String
Dim theLabel As Object
Dim rad As Object
Class1 = ""
k = 1
For i = 1 To iNumberOfItems
myElements = Split(vMyReplacementArray(i), "-")
If myElements(0) <> Class1 Then
Set theLabel = UserForm1.Controls.Add("Forms.Label.1", "Test" & i, True)
theLabel.Caption = myElements(0)
theLabel.Left = 80 * k
theLabel.Width = 20
theLabel.Top = 10
k = k + 1
j = 1
End If
Set rad = UserForm1.Controls.Add("Forms.OptionButton.1", "radio" & j, True)
If j = 1 Then
rad.Value = True
End If
rad.Caption = myElements(1)
rad.Left = 80 * (k - 1)
rad.Width = 60
rad.GroupName = k - 1
rad.Top = 50 + 20 * j
j = j + 1
Class1 = myElements(0)
Next i
End Sub
Private Sub CommandButton1_Click()
Dim ctrl As MSForms.Control
Dim dict(5, 1)
Dim i
'## Iterate the controls, and associates the GroupName to the Button.Name that's true.
i = 0
For Each ctrl In Me.Controls
If TypeName(ctrl) = "OptionButton" Then
If ctrl.Value = True Then
dict(i, 0) = ctrl.GroupName
dict(i, 1) = ctrl.Caption
i = i + 1
End If
End If
Next
'For i = 0 To k
'MsgBox "grupo: " & dict(i, 0) & "elem: " & dict(i, 1)
'Next
j = 0
For i = 1 To iNumberOfItems
myElements = Split(vMyReplacementArray(i), "-")
For Each rItem1 In rAttr1
If rItem1 = myElements(0) Then
rItem1 = dict(j, 1)
j = j + 1
End If
Next
Next i
End Sub
| |
doc_23530213
|
my app does a sql query and returns an int value for each month's number of entries and if doesnt find any it returns 0.
//scale variable is an int
//ene is a return value from another method that runs before
//in case there are no students in january I dont have to graph anything
//so this 'if' doesnt run
Scale = 50;
if (Ene != 0) {
System.out.println(Ene + " ene stdds");
// this prints out 11 ene stdds
double EneBH = 449 * (Ene / Scale);
int EneBHeight = (int) Math.round(EneBH);
int EneBYLocal = 612 - EneBHeight;
EneP.setBounds(76, EneBYLocal, 7, EneBHeight);
EneP.setVisible(true);
} else {
//if the last if didnt run I want to know if it hidd the label
System.out.println("HIDDEN ENE");
EneP.setVisible(false);
}
*Ene P is the very first jlabel for graphing, it looks kida gray and is at the enero zone.
*EneP prints out 11 students but never shows up, doesnt print hidden ene, it just doesnt show up
*EneP will have the same code than the other jlabels if I solve it or you solve it, please
A: I have found a Solution for My Own Problem
The solution was easy turns out that If you divide Ene (the return variable from a query that saves as an integer value) by the Scale variable which is also an integer it causes java to return 0
So I have discovered this on my own by placing a system.out.print() after every line and printing the values of every variable.
I noticed 11/50 shouldn't return 0, so I changed the scale variable to be a double and still returned 0; but I changed both and now it works just right.
Scale = 50;
if (Ene != 0) {
System.out.println(Ene + " ene stdds");
// this prints out 11 ene stdds
double EneBH = 449 * (Ene / Scale);
int EneBHeight = (int) Math.round(EneBH);
int EneBYLocal = 612 - EneBHeight;
EneP.setBounds(76, EneBYLocal, 7, EneBHeight);
EneP.setVisible(true);
} else {
//if the last if didnt run I want to know if it hidd the label
System.out.println("HIDDEN ENE");
EneP.setVisible(false);
}
enter image description here
| |
doc_23530214
|
This is my Set Alarm Activity:
@Override
public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
Intent intent = new Intent(Intent.ACTION_GET_CONTENT);
Uri uri = Uri.parse(Environment.getExternalStorageDirectory().getPath()
+ "/Audio/");
intent.setDataAndType(uri, "audio/*");
startActivityForResult(intent, 1);
}
@Override
protected void onActivityResult(int requestCode,int resultCode,Intent data){
if(requestCode == 1){
if(resultCode == RESULT_OK){
//the selected audio.
Uri uri = data.getData();
Intent n = new Intent(this, AlarmReceiver.class);
n.putExtra("song", uri.toString());
}
}
super.onActivityResult(requestCode, resultCode, data);
}
Code for Broadcast Receiver:
@Override
public void onReceive(Context context, Intent intent) {
MediaPlayer mp;
Intent in = new Intent();
Uri notification = RingtoneManager.getDefaultUri(RingtoneManager.TYPE_NOTIFICATION);
Ringtone r = RingtoneManager.getRingtone(context.getApplicationContext(), notification);
r.play();
Uri ir = in.getParcelableExtra("song");
mp = MediaPlayer.create(context, ir);
mp.start();
}
Logcat
08-21 04:05:35.011 1553-1571/system_process E/BluetoothAdapter: Bluetooth binder is null
08-21 04:05:35.012 1553-1571/system_process E/KernelCpuSpeedReader: Failed to read cpu-freq
java.io.FileNotFoundException: /sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state: open failed: ENOENT (No such file or directory)
at libcore.io.IoBridge.open(IoBridge.java:452)
at java.io.FileInputStream.<init>(FileInputStream.java:76)
at java.io.FileInputStream.<init>(FileInputStream.java:103)
at java.io.FileReader.<init>(FileReader.java:66)
at com.android.internal.os.KernelCpuSpeedReader.readDelta(KernelCpuSpeedReader.java:49)
at com.android.internal.os.BatteryStatsImpl.updateCpuTimeLocked(BatteryStatsImpl.java:8002)
at com.android.server.am.BatteryStatsService.updateExternalStats(BatteryStatsService.java:1366)
at com.android.server.am.BatteryStatsService$BatteryStatsHandler.handleMessage(BatteryStatsService.java:125)
at android.os.Handler.dispatchMessage(Handler.java:102)
at android.os.Looper.loop(Looper.java:148)
at android.os.HandlerThread.run(HandlerThread.java:61)
at com.android.server.ServiceThread.run(ServiceThread.java:46)
Caused by: android.system.ErrnoException: open failed: ENOENT (No such file or directory)
at libcore.io.Posix.open(Native Method)
at libcore.io.BlockGuardOs.open(BlockGuardOs.java:186)
at libcore.io.IoBridge.open(IoBridge.java:438)
at java.io.FileInputStream.<init>(FileInputStream.java:76)
at java.io.FileInputStream.<init>(FileInputStream.java:103)
at java.io.FileReader.<init>(FileReader.java:66)
at com.android.internal.os.KernelCpuSpeedReader.readDelta(KernelCpuSpeedReader.java:49)
at com.android.internal.os.BatteryStatsImpl.updateCpuTimeLocked(BatteryStatsImpl.java:8002)
at com.android.server.am.BatteryStatsService.updateExternalStats(BatteryStatsService.java:1366)
at com.android.server.am.BatteryStatsService$BatteryStatsHandler.handleMessage(BatteryStatsService.java:125)
at android.os.Handler.dispatchMessage(Handler.java:102)
at android.os.Looper.loop(Looper.java:148)
at android.os.HandlerThread.run(HandlerThread.java:61)
at com.android.server.ServiceThread.run(ServiceThread.java:46)
08-21 04:05:35.012 1553-1571/system_process E/KernelUidCpuTimeReader: Failed to read uid_cputime
java.io.FileNotFoundException: /proc/uid_cputime/show_uid_stat: open failed: ENOENT (No such file or directory)
at libcore.io.IoBridge.open(IoBridge.java:452)
at java.io.FileInputStream.<init>(FileInputStream.java:76)
at java.io.FileInputStream.<init>(FileInputStream.java:103)
at java.io.FileReader.<init>(FileReader.java:66)
at com.android.internal.os.KernelUidCpuTimeReader.readDelta(KernelUidCpuTimeReader.java:71)
at com.android.internal.os.BatteryStatsImpl.updateCpuTimeLocked(BatteryStatsImpl.java:8031)
at com.android.server.am.BatteryStatsService.updateExternalStats(BatteryStatsService.java:1366)
at com.android.server.am.BatteryStatsService$BatteryStatsHandler.handleMessage(BatteryStatsService.java:125)
at android.os.Handler.dispatchMessage(Handler.java:102)
at android.os.Looper.loop(Looper.java:148)
at android.os.HandlerThread.run(HandlerThread.java:61)
at com.android.server.ServiceThread.run(ServiceThread.java:46)
Caused by: android.system.ErrnoException: open failed: ENOENT (No such file or directory)
at libcore.io.Posix.open(Native Method)
at libcore.io.BlockGuardOs.open(BlockGuardOs.java:186)
at libcore.io.IoBridge.open(IoBridge.java:438)
at java.io.FileInputStream.<init>(FileInputStream.java:76)
at java.io.FileInputStream.<init>(FileInputStream.java:103)
at java.io.FileReader.<init>(FileReader.java:66)
at com.android.internal.os.KernelUidCpuTimeReader.readDelta(KernelUidCpuTimeReader.java:71)
at com.android.internal.os.BatteryStatsImpl.updateCpuTimeLocked(BatteryStatsImpl.java:8031)
at com.android.server.am.BatteryStatsService.updateExternalStats(BatteryStatsService.java:1366)
at com.android.server.am.BatteryStatsService$BatteryStatsHandler.handleMessage(BatteryStatsService.java:125)
at android.os.Handler.dispatchMessage(Handler.java:102)
at android.os.Looper.loop(Looper.java:148)
at android.os.HandlerThread.run(HandlerThread.java:61)
at com.android.server.ServiceThread.run(ServiceThread.java:46)
08-21 04:05:35.012 1553-1571/system_process E/KernelWakelockReader: neither /proc/wakelocks nor /d/wakeup_sources exists
08-21 04:05:35.017 1553-1571/system_process W/BatteryStatsImpl: Couldn't get kernel wake lock stats
08-21 04:05:35.086 2268-10915/com.google.android.gms D/DropBoxEntryAddedChimeraService: User is not opted-in to Usage & Diagnostics.
08-21 04:05:54.610 1553-1575/system_process W/ProcessCpuTracker: Skipping unknown process pid 11191
08-21 04:05:54.610 1553-1575/system_process W/ProcessCpuTracker: Skipping unknown process pid 11196
08-21 04:06:00.026 1553-1567/system_process I/ProcessStatsService: Prepared write state in 1ms
08-21 04:06:00.052 1553-1565/system_process W/BroadcastQueue: Skipping deliver [foreground] BroadcastRecord{2cfd4a7 u-1 android.intent.action.TIME_TICK} to ReceiverList{53b0aef 10154 myapps.wycoco.com.alarmapp/10064/u0 remote:8835bce}: process crashing
08-21 04:06:47.744 2268-11950/com.google.android.gms I/EventLogChimeraService: Aggregate from 1471750607524 (log), 1471750607524 (data)
08-21 04:06:47.857 2268-11954/com.google.android.gms D/DropBoxEntryAddedChimeraService: User is not opted-in to Usage & Diagnostics.
08-21 04:07:00.053 1553-1712/system_process W/BroadcastQueue: Skipping deliver [foreg
A: Create a global variable for song uri on your Set Alarm class.
Uri songUri;
On your onItemClick method change setDataAndType(uri, "*/*"); to setDataAndType(uri, "audio/*"); so the chooser may only display audio files for the user.
You will also have to change startActivity(Intent.createChooser(intent, "Open"); to startActivityForResult(Intent.createChooser(intent, "Open"), 1); this will allow you to get what ever was chosen from the chooser.
Override onActivityResult on the same class. Then set the songUri global variable.
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if (data != null) {
songUri = data.getData();
}
}
finally, put extra on the Intent that is pointing to the AlarmReceiver.class
intent.putExtra("song",songUri);
on your Broadcast Receiver class change mp = MediaPlayer.create(context, R.raw.closer); to this
try{
mp = MediaPlayer.create(context, (Uri)intent.getParcelableExtra("song"));
}catch(NullPointerException e) {
mp = MediaPlayer.create(context, R.raw.closer);
}
A NullPointerException will be triggered if no song is chosen that's because intent.getParcelableExtra("song") will be null. If a song is chosen place it on the try block and get the extra.
A: Try this one.
@Override
public void onReceive(Context context, Intent intent)
{
AudioManager audio = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
MediaPlayer player = MediaPlayer.create(getApplicationContext(), RingtoneManager.getDefaultUri(RingtoneManager.TYPE_NOTIFICATION));
try {
player.setVolume((float) (audio.getStreamVolume(AudioManager.STREAM_NOTIFICATION) / 7.0),
(float) (audio.getStreamVolume(AudioManager.STREAM_NOTIFICATION) / 7.0));
} catch (Exception e) {
e.printStackTrace();
}
player.start();
}
| |
doc_23530215
|
['+user_name(dp.grantee_principal_id)+'] ;' from
sys.database_permissions dp inner join sys.certificates sa on
sa.certificate_id = dp.major_id
A: You need to add COLLATE DATABASE_DEFAULT in the JOINS.
SELECT 'GRANT '+permission_name+' ON ASSEMBLY::['+ sa.name +'] to ['+user_name(dp.grantee_principal_id)+'] ;'
FROM sys.database_permissions dp
INNER JOIN sys.certificates sa ON sa.certificate_id COLLATE DATABASE_DEFAULT = dp.major_id COLLATE DATABASE_DEFAULT
A: Maybe this will work.
select 'GRANT '+permission_name+' ON ASSEMBLY::['+ sa.name +'] to
['+user_name(dp.grantee_principal_id)+'] ;'
from sys.database_permissions dp inner join sys.certificates sa on
sa.certificate_id COLLATE DATABASE_DEFAULT = dp.major_id COLLATE DATABASE_DEFAULT
| |
doc_23530216
|
Is there a way to have a ListView that's not in a ScrollView or has the scrolling disabled?
A: Another way to do this is to wrap the ListView in whatever custom scrolling solution you've cooked up. If you simply set the height of the ListView to be exactly as high as all the rows (using the list view's LayoutParams), then it will act like a normal view and won't be able to scroll; therefore you can add it to another scroll view. The difficulty with this method is that you have to know the height of your list ahead of time, so you have to know the height of each row. Also, this will create all the rows at once and so you won't be able to take advantage of the view-recycling feature.
If you don't have an easy way of calculating the height ahead of time, you can trick the ListView into doing it for you by overriding onMeasure and giving it your own height spec:
// Calculate height of the entire list by providing a very large
// height hint. But do not use the highest 2 bits of this integer;
// those are reserved for the MeasureSpec mode.
int expandSpec = MeasureSpec.makeMeasureSpec(Integer.MAX_VALUE >> 2,
MeasureSpec.AT_MOST);
super.onMeasure(widthMeasureSpec, expandSpec);
A: A ListView is not in a ScrollView. A ListView does scrolling instrinsically. I do not believe you can disable scrolling through a simple API. If so, your choices are either to subclass ListView and try to find ways to override scrolling behaviors, clone ListView and eliminate scrolling behaviors, or write your own AdapterView that renders things the way you want.
| |
doc_23530217
|
aoi@aoi:~$ sudo apt-get install python3-sqlalchemy
[sudo] password for aoi:
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following packages were automatically installed and are no longer required:
libgio-cil libmono-corlib4.0-cil libmono-system-security4.0-cil libmono-system-xml4.0- cil libmono-i18n-west4.0-cil cli-common guile-1.8-libs
libmono-system-configuration4.0-cil librhythmbox-core5 mono-runtime libgdu-gtk0 libavahi-ui-gtk3-0 mono-4.0-gac mono-gac
linux-headers-3.5.0-23-generic libglib2.0-cil linux-headers-3.5.0-23 libming1 thunderbird-globalmenu libmusicbrainz3-6 libmono-i18n4.0-cil
libmono-security4.0-cil libmono-system4.0-cil
Use 'apt-get autoremove' to remove them.
Suggested packages:
python-sqlalchemy-doc
The following NEW packages will be installed:
python3-sqlalchemy
0 upgraded, 1 newly installed, 0 to remove and 81 not upgraded.
Need to get 450 kB of archives.
After this operation, 2,808 kB of additional disk space will be used.
Get:1 http://ph.archive.ubuntu.com/ubuntu/ precise-updates/main python3-sqlalchemy all 0.7.4-1ubuntu0.1 [450 kB]
Fetched 450 kB in 40s (11.0 kB/s)
Selecting previously unselected package python3-sqlalchemy.
(Reading database ... 346102 files and directories currently installed.)
Unpacking python3-sqlalchemy (from .../python3-sqlalchemy_0.7.4-1ubuntu0.1_all.deb) ...
Setting up python3-sqlalchemy (0.7.4-1ubuntu0.1) ...
Did I do something wrong?
A: This gets ya sqlalchemy version 0.8.4 for python 2.x
ubuntu is a lot slow in keeping packages up to date
sudo apt-get install python3-sqlalchemy
So for Python 3 / Ubuntu peeps here is how to install the latest version of sqlalchemy
sudo apt-get install python3-pip
# Now you have pip3
sudo apt-get install python3-all-dev
# Required for sqlalchemy's c extensions to be installed
sudo pip3 install SQLAlchemy
# note using pip, not pip3, will install for Python 2.7
:~$ python3
>>> import sqlalchemy
>>> sqlalchemy.__version__
>>> '0.9.8'
>>> print("Who's the man?")
>>> You are baby!
>>> quit()
A: By using apt-get you have installed sqlalchemy in the default directory for Ubuntu's default Python 3, which is 3.2. If you search in the directory /usr/lib/python3/dist-packages you'll find the sqlalchemy module (or just type locate sqlalchemy). However, this isn't where a custom Python will look for its modules. You'll need to download the source and compile it with the correct Python, something like /opt/python3.3/bin/python3.3 setup.py install in the source directory for sqlalchemy. See the instructions here: http://docs.python.org/3.3/install
A: It seems that the python3-sqlalchemy is outdated. You can try to use pip3 to get the latest version instead.
sudo pip3 install sqlalchemy
should do the trick.
| |
doc_23530218
|
If you take a closer look at this, a for loop is executed for the length of arr_2, and the array extends () is executed.It turns out that the processing speed becomes extremely heavy when arr_2 becomes long.
Wouldn't it be possible to process at high speed by making array creation well?
# -*- coding: utf-8 -*-
import numpy as np
arr_1 = np.array([[0, 0, 1], [0, 0.5, -1], [-1, 0, -1], [0, -0.5, -1], [1, 0, -1]])
arr_2 = np.array([[0, 1, 2], [0, 1, 2]])
all_arr = []
for p in arr_2:
all_arr = [
(arr_1[0], p), (arr_1[1], p), (arr_1[2], p),
(arr_1[0], p), (arr_1[1], p), (arr_1[4], p),
(arr_1[0], p), (arr_1[2], p), (arr_1[3], p),
(arr_1[0], p), (arr_1[3], p), (arr_1[4], p),
(arr_1[1], p), (arr_1[2], p), (arr_1[4], p),
(arr_1[2], p), (arr_1[3], p), (arr_1[4], p)]
all_arr.extend(all_arr)
vtype = [('type_a', np.float32, 3), ('type_b', np.float32, 3)]
res = np.array(all_arr, dtype=vtype)
print(res)
A: I couldn't figure out why you used this indexing for arr_1 so I just copied it
import numpy as np
arr_1 = np.array([[0, 0, 1], [0, 0.5, -1], [-1, 0, -1], [0, -0.5, -1], [1, 0, -1]])
arr_2 = np.array([[0, 1, 2], [0, 1, 2]])
weird_idx = np.array([0,1,2,0,1,4,0,2,3,0,3,4,1,2,4,2,3,4])
weird_arr1 = arr_1[weird_idx]
all_arr = [(wiered_arr1[i],arr_2[j]) for j in range(len(arr_2)) for i in range(len(wiered_arr1)) ]
vtype = [('type_a', np.float32, 3), ('type_b', np.float32, 3)]
res = np.array(all_arr, dtype=vtype)
you can also repeat the arrays
arr1_rep = np.tile(weird_arr1.T,2).T
arr2_rep = np.repeat(arr_2,weird_arr1.shape[0],0)
res = np.empty(arr1_rep.shape[0],dtype=vtype)
res['type_a']=arr1_rep
res['type_b']=arr2_rep
A: Often with structured arrays it is faster to assign by field instead of the list of tuples approach:
In [388]: idx = [0,1,2,0,1,4,0,2,3,0,3,4,1,2,4,2,3,4]
In [400]: res1 = np.zeros(36, dtype=vtype)
In [401]: res1['type_a'][:18] = arr_1[idx]
In [402]: res1['type_a'][18:] = arr_1[idx]
In [403]: res1['type_b'][:18] = arr_2[0]
In [404]: res1['type_b'][18:] = arr_2[1]
In [405]: np.allclose(res['type_a'], res1['type_a'])
Out[405]: True
In [406]: np.allclose(res['type_b'], res1['type_b'])
Out[406]: True
| |
doc_23530219
|
I want to use manage.py to do some things, but when I run something like
su zulip ./manage.py shell
But I get this error message:
./manage.py: line 2: syntax error near unexpected token `('
./manage.py: line 2: `from __future__ import (print_function)'
Does anyone have a idea what's wrong? Thanks in advance!
A: This is an incorrect usage of su; it caused su to launch bash ./manage.py shell, which tried to interpret manage.py as a Bash script. You meant one of these:
su zulip -c './manage.py shell'
sudo -u zulip ./manage.py shell
| |
doc_23530220
|
enqueueWork(context, MessagesRetentionKotlin::class.java, UNIQUE_JOB_ID, work)
in the kotlin, but not in the Scala. Here I am sharing my Scala code for the class extending JobIntentService, will be great if someone can help me out. Thanks.
class MessagesRetention(context: Context, work: Intent) extends MessagesRetentionTrait with JobIntentService{
override def onCreate():Unit= {
super.onCreate()
Log.d(TAG, "onCreate")
}
override def onHandleWork(intent: Intent):Unit= {
Log.d(TAG, "onHandleWork")
val input: String = intent.getStringExtra("inputExtra")
for (i <- 0 to 10) {
Log.d(TAG, "$input - $i")
if (isStopped) return
SystemClock.sleep(1000)
}
}
override def onDestroy():Unit= {
super.onDestroy()
Log.d(TAG, "onDestroy")
val serviceIntent = new Intent(this, MessagesRetentionImpl.getClass)
serviceIntent.putExtra("inputExtra", "Test")
MessagesRetention.enqueueWorkk(this, serviceIntent)
}
}
object MessagesRetention {
private val TAG = "MessagesRetention"
val UNIQUE_JOB_ID = 10101
def enqueueWorkk(context: Context, work: Intent):Unit= {
enqueueWork(context, MessagesRetentionImpl.getClass, UNIQUE_JOB_ID, work)
}
}
A: Being a newbie in scala, I was not passing the right service context to the JobIntentService's enqueueWork method. Instead of using .getclass method I used the classOf[T] for service class passing and it worked.
enqueueWork(context, classOf[MessagesRetention], UNIQUE_JOB_ID, work)
| |
doc_23530221
|
output_zones = {'CHANGE' : {}}
I would like to get:
{'zone_name': {... a set of dictionaries...}}
What is the correct syntax? This code is wrong:
zone_list = zone_name
output_zones = {f"{zone_list}:", {}}
output_zones[zone_list].update(zone_info)
A: try this:
output_zones = {'{}'.format(zone_list): {}}
| |
doc_23530222
|
How can I fix this?
| |
doc_23530223
|
sum<IntList<1, 2, 3>>();
As it's known that C++ standards don't allow function partial specialization, I would like to use C++20 concept/requires to the similar stuff as function partial specialization.
Here is my code:
#include <iostream>
template<int...N>
class IntList;
template<int...N>
concept IsIntList = IntList<N...>{};
template<typename T>
int sum() {
return 0;
}
template<int...N>
requires IsIntList<N...>
int sum() {
return (N + ...);
}
int main() {
std::cout << sum<IntList<1, 2>>() << std::endl;
return 0;
}
But it could not produce what I want. Put my code in C++ Insights. The first sum is instantiated, instead of the second sum.
Here is the result of C++Insights:
#include <iostream>
template<int...N>
class IntList;
template<int...N>
concept IsIntList = IntList<N...>{};
template<typename T>
int sum() {
return 0;
}
/* First instantiated from: insights.cpp:21 */
#ifdef INSIGHTS_USE_TEMPLATE
template<>
int sum<IntList<1, 2> >()
{
return 0;
}
#endif
template<int...N>
requires IsIntList<N...>
int sum() {
return (N + ...);
}
int main()
{
std::cout.operator<<(sum<IntList<1, 2> >()).operator<<(std::endl);
return 0;
}
What's the correct way to solve this problem? Thanks!
A: The central problem you're encountering is that you have a type template parameter whose type you want to be constrained to being some specialization of some template. That's not a thing you can do with a requires clause. At least, not easily.
It's best to avoid this problem. You're only encountering it because you insist that sum's template parameter must be some specialization of IntList instead of the integers themselves directly. The best way to handle this is by ditching this assumption:
template<int... Ints>
constexpr int sum(IntList<Ints...>)
{ return (0 + ... + Ints); }
You then call this function as so: sum(IntList<1, 2>{}). Note that IntList needs to have a constexpr default constructor.
A: Your definition of the concept of IsIntList is wrong, it only evaluates the value of IntList<N...>{}, and since IntList is not a bool type, IsIntList always return false.
You should use template partial specialization to define the IsIntList.
template<int...N>
class IntList;
template<class T>
inline constexpr bool IsIntList = false;
template<int...N>
inline constexpr bool IsIntList<IntList<N...>> = true;
For a specialized version of the sum, just constrain IsIntList<T> to be true, then you can extract the value of the IntList with the help of a tag class and template lambda to calculate the sum.
template<class>
struct tag{};
template<class T>
requires IsIntList<T>
int sum() {
return []<int...N>(tag<IntList<N...>>)
{ return (N + ... + 0); }(tag<T>{});
}
Demo.
| |
doc_23530224
|
My Controller:
namespace GBCustomer\Http\Controllers;
use GBCustomer\Customer;
use Illuminate\Http\Request;
class CustomerController extends Controller
{
public function listCustomers()
{
$customers = DB::table('customers')->paginate(10);
return view('customer.index',['customers' => $customers]);
}
}
My route
Route::get('/customer/list', 'CustomerController@listCustomers');
My view (part)
<table id="datatable-buttons" class="table table-striped table-bordered"
cellspacing="0" width="100%">
<thead>
<tr>
<th>ID Customer</th>
<th>Firstname</th>
<th>Lastname</th>
<th>Email</th>
</tr>
<tbody>
@foreach( $customers as $customer )
<tr role="row">
<td> <a href="customer/show/{{ $customer->id_customer }}">
{{ $customer->id_customer }} </a></td>
<td>{{ $customer->firstname }}</td>
<td>{{ $customer->lastname }}</td>
<td>{{ $customer->email }}</td>
</tr>
@endforeach
</tbody>
</table>
If I put DB::table('customers')->paginate(1500); show 1500 results paginated in 15 results.
If I put DB::table('customers')->paginate(2000); show 2000 results paginated in 15 results.
If I put DB::table('customers')->paginate(); show 15 results paginated in 15 results.
If I change this line for:
$customers = Customer::all(); show all results (about 22000) but in one page!
?¿?¿
Thanks in advance
| |
doc_23530225
|
It seems the cause of the problem is because of a line of code @mediaSingle('cover_img'). When I removed this line of code, the error message did not pop up.
I doubted if the version of the packages installed are different so I have tried building my project without using docker and run php composer update to test it. But it worked well.
How to fix this problem? Thank you!
My create-fields.blade.php
<div class="box-body">
<select class="selectpicker" style="width:100%;" name="title_color">
<option value="0c15c0" data-icon="color1" >#0c15c0</option>
</select>
<br>
@mediaSingle('cover_img')
<p>Cover Image Size (320px x 215px)</p>
{!! Form::normalTextarea('title', 'Title', $errors) !!}
{!! Form::normalTextarea('content', 'Content', $errors) !!}
<div class="form-group ">
<label>Date</label>
<input type="date" name="date" required="required">
</div>
</div>
| |
doc_23530226
|
My objective is to load the Control in GridViewrow based on the property in the viewmodel.
Sample Code:
Here is the sample xaml which I tried.
<ListView Margin="10" Name="lvUsers">
<ListView.View>
<GridView x:Name="gridview">
<GridViewColumn Header="Type">
<GridViewColumn.CellTemplate>
<DataTemplate>
<ContentControl>
<ContentControl.Style>
<Style TargetType="{x:Type ContentControl}">
<Style.Triggers>
<DataTrigger Binding="{Binding IsImage}" Value="True">
<Setter Property="ContentTemplate">
<Setter.Value>
<DataTemplate>
<TextBlock Text="Text goes here"
Foreground="Red"/>
</DataTemplate>
</Setter.Value>
</Setter>
</DataTrigger>
<DataTrigger Binding="{Binding IsImage}" Value="False">
<Setter Property="ContentTemplate">
<Setter.Value>
<DataTemplate>
<TextBlock Text="{Binding Itemsource}"/>
</DataTemplate>
</Setter.Value>
</Setter>
</DataTrigger>
</Style.Triggers>
</Style>
</ContentControl.Style>
</ContentControl>
</DataTemplate>
</GridViewColumn.CellTemplate>
</GridViewColumn>
</GridView>
</ListView.View>
</ListView>
And my sample xaml.cs code
public partial class MainWindow : Window
{
public MainWindow()
{
InitializeComponent();
List<myClass> mc = new List<myClass>();
mc.Add(new myClass() { Itemsource = "test", IsImage = false });
mc.Add(new myClass() { Itemsource = "test", IsImage = true });
lvUsers.ItemsSource = mc;
}
}
class myClass
{
public string Itemsource { get; set; }
public bool IsImage { get; set; }
}
Issue: DataTriggers are working as expected but the binding inside the Triggers is giving me Empty
<TextBlock Text="{Binding Itemsource}"/>
I am expecting the above line to display test in the respective row but it displaying Empty row.
A: Couldn't figure out why the TextBlock's DataContext is null. Its probably due to the custom content template. But any how you can fix this by searching for the ContentControl ancestor type like this.
<DataTrigger Binding="{Binding IsImage}" Value="False">
<Setter Property="ContentTemplate">
<Setter.Value>
<DataTemplate>
<TextBlock Text="{Binding DataContext.Itemsource, RelativeSource={RelativeSource FindAncestor, AncestorType=ContentControl}}"/>
</DataTemplate>
</Setter.Value>
</Setter>
</DataTrigger>
This will fetch the data from the parent controls data binding.
Edit
It's because of ContentControl, you need to do content binding like - <ContentControl Content="{Binding}">. Then data context will be available as it is and <TextBlock Text="{Binding Itemsource}"/> will work.
<ContentControl Content="{Binding}">
<ContentControl.Style>
<Style TargetType="{x:Type ContentControl}">
<Style.Triggers>
<DataTrigger Binding="{Binding IsImage}" Value="True">
<Setter Property="ContentTemplate">
<Setter.Value>
<DataTemplate>
<TextBlock Text="Text goes here"
Foreground="Red"/>
</DataTemplate>
</Setter.Value>
</Setter>
</DataTrigger>
<DataTrigger Binding="{Binding IsImage}" Value="False">
<Setter Property="ContentTemplate">
<Setter.Value>
<DataTemplate>
<TextBlock Text="{Binding Itemsource}"/>
</DataTemplate>
</Setter.Value>
</Setter>
</DataTrigger>
</Style.Triggers>
</Style>
</ContentControl.Style>
</ContentControl>
| |
doc_23530227
|
Location
"10007 (40.71363051943297, -74.00913138370635)"
"10002 (40.71612146793143, -73.98583147024613)"
"10012 (40.72553802086304, -73.99789641059084)"
"10009 (40.72664935898081, -73.97911148500697)"
I need to separate them into three different columns like Zipcode, Latitude and Longitude.
I tried to doing this
extract(Location, c("Zip-Code","Latitude", "Longitude"), "\\(([^,]+), ([^)]+)\\)")
I want to use the latitude and longitude to plot the map using ggmap
Thanks
A: s.tmp = "10007 (40.71363051943297, -74.00913138370635)"
For ZIP:
gsub('([0-9]+) .*', '\\1', s.tmp)
For latitude:
gsub('.*\\((.*),.*', '\\1', s.tmp)
For longitude:
gsub('.*, (.*)\\).*', '\\1', s.tmp)
A: Basic regex extraction:
library(purrr)
c("10007 (40.71363051943297, -74.00913138370635)", "10002 (40.71612146793143, -73.98583147024613)",
"10012 (40.72553802086304, -73.99789641059084)", "10009 (40.72664935898081, -73.97911148500697)") %>%
stringi::stri_match_all_regex("([[:digit:]]+)[[:space:]]+\\(([[:digit:]\\.\\-]+),[[:space:]]+([[:digit:]\\.\\-]+)\\)") %>%
map_df(dplyr::as_data_frame) %>%
dplyr::select(zip=V2, latitude=V3, longitude=V4)
## # A tibble: 4 × 3
## zip latitude longitude
## <chr> <chr> <chr>
## 1 10007 40.71363051943297 -74.00913138370635
## 2 10002 40.71612146793143 -73.98583147024613
## 3 10012 40.72553802086304 -73.99789641059084
## 4 10009 40.72664935898081 -73.97911148500697
More readable:
library(purrr)
library(stringi)
library(dplyr)
library(purrr)
dat <- c("10007 (40.71363051943297, -74.00913138370635)",
"10002 (40.71612146793143, -73.98583147024613)",
"10012 (40.72553802086304, -73.99789641059084)",
"10009 (40.72664935898081, -73.97911148500697)")
zip <- "([[:digit:]]+)"
num <- "([[:digit:]\\.\\-]+)"
space <- "[[:space:]]+"
lp <- "\\("
rp <- "\\)"
comma <- ","
match_str <- zip %s+% space %s+% lp %s+% num %s+% comma %s+% space %s+% num %s+% rp
dat %>%
stri_match_all_regex(match_str) %>%
map_df(as_data_frame) %>%
select(zip=V2, latitude=V3, longitude=V4)
| |
doc_23530228
|
StreamSource source = new StreamSource(new StringReader(MESSAGE));
StreamResult result = new StreamResult(System.out);
webServiceTemplate.sendSourceAndReceiveToResult("http://someUri",
source, new SoapActionCallback("someCallBack"), result);
return result;
I get the result, But I want to extract it to some sort of xml or even as a string (Just want to see the contents in order to generate the response).
How can I do this?
A: Try this one:
try {
StreamSource source = new StreamSource(new StringReader("<xml>blabla</xml>"));
StringWriter writer = new StringWriter();
StreamResult result = new StreamResult(writer);
TransformerFactory tFactory = TransformerFactory.newInstance();
Transformer transformer = tFactory.newTransformer();
transformer.transform(source,result);
String strResult = writer.toString();
} catch (Exception e) {
e.printStackTrace();
}
A: You can get the reader of your StreamSource by using getReader(). You should then be able to use read(char[] cbuf) to write the contents of the stream to a character array which can easily be converted into a string and printed to the console if you wish.
A: If none of these works, try this
System.out.println(result.getOutputStream().toString());
Assuming you have this kind of structure ,
private static StreamResult printSOAPResponse(SOAPMessage soapResponse) throws Exception {
TransformerFactory transformerFactory = TransformerFactory.newInstance();
Transformer transformer = transformerFactory.newTransformer();
Source sourceContent = soapResponse.getSOAPPart().getContent();
System.out.print("\nResponse SOAP Message = ");
StreamResult result = new StreamResult(System.out);
transformer.transform(sourceContent, result);
return result;
}
You can try this way , although the same thing, wanted to point it out clearly
System.out.println(printSOAPResponse(soapResponse).getOutputStream().toString());
A: If you use Spring you could also use this way:
import org.springframework.core.io.Resource;
import org.apache.commons.io.IOUtils;
....
@Value("classpath:/files/dummyresponse.xml")
private Resource dummyResponseFile;
....
public String getDummyResponse() {
try {
if (this.dummyResponse == null)
dummyResponse = IOUtils.toString(dummyResponseFile.getInputStream(),StandardCharsets.UTF_8);
} catch (IOException e) {
logger.error("Fehler in Test-Service: {}, {}, {}", e.getMessage(), e.getCause(), e.getStackTrace());
throw new RuntimeException(e);
}
return dummyResponse;
}
| |
doc_23530229
|
-+ src/
---+ index.ts
---+ foobar.ts
---+ routes/
-----+ router.d.ts
-----+ router.ts
How would I be able to import only the router.ts file in index.ts and import router.d.ts in foobar.ts?
A: This isn't possible. TypeScript's module resolution algorithm will always prefer the .ts file over a .d.ts file of the same name, since it assumes the latter is a build output of the former. You should name your files differently if you want them to both be in the same compilation.
| |
doc_23530230
|
double fitness(const double x[], const int &dim) {
double sum = 0.0;
double x1[dim];
...
return sum;
}
The same code runs without an error on g++ 4.8. So how can I do the same thing under Visual Studio 2015.
Is my problem with compiler or code implementation?
A: double x1[dim]; is a VLA (variable length array). It is not standard C++.
The reason why it works on gcc is that gcc has an extension which allows VLAs. VS2015 doesn't, so that's why it doesn't compile (it has its own set extensions though, just not that one).
The best alternative is a std::vector:
std::vector<double> x1(dim); //array of size dim
If you can't use that, you can still use a manual dynamic array (although that is not recommended):
double* x1 = new double[dim];
delete[] x1; //Don't forget to delete it when you are done
//Alternatively, create a class which wraps the dynamic array, so you can use RAII
A: In the past, when I wanted to use VLA on MSVC, a workaround was to use alloca() whenever I wanted or needed VLA feature on a compiler that does not support VLA.
| |
doc_23530231
|
class foo
{
public foo(String txt) : base(new MyInnerClass()) { }
private class MyInnerClass
{ }
}
the problem is that i want to access my String txt to MyInnerClass, but i don't know how. The value of String txt comes from other class that will access this class foo. Any idea how?
A: You can either create a settable property on MyInnerClass, or give it a constructor that takes a string, as you've done with foo.
class foo
{
public foo(String txt) : base(new MyInnerClass(txt)) { }
private class MyInnerClass
{
private string text;
public MyInnerClass(string txt)
{
this.text = txt;
}
}
}
A: just pass it into constructor of base.
class foo {
public foo(String txt):
base(new MyInnerClass(txt)){
}
private class MyInnerClass { //some code lines } }
| |
doc_23530232
|
The following code work greatly with file or directory under POSIX system:
def flock(fd, blocking=False, exclusive=False):
if exclusive:
flags = fcntl.LOCK_EX
else:
flags = fcntl.LOCK_SH
if not blocking:
flags |= fcntl.LOCK_NB
fcntl.flock(fd, flags)
But I only find a way to perform lock access for file, not directory with the following code:
def flock(fd, blocking=False, exclusive=False):
if blocking:
flags = msvcrt.LK_NBLCK
else:
flags = msvcrt.LK_LOCK
msvcrt.locking(fd.fileno(), flags, os.path.getsize(fd.name))
Have you got any idea how to improve this code and be able to lock directory access ?
Bertrand
A: I don't believe it's possible to use flock() on directories in windows. PHPs docs on flock() indicate that it won't even work on FAT32 filesystems.
On the other hand, Windows already tends to not allow you to delete files/directories if any files are still open. This, plus maybe using ACLs intelligently, might get you a 95% equivalent solution.
A: You probably can do something like this to indirectly lock a directory with the latter flock function.
for file in os.listdir(dir):
f = open(file)
flock(f)
This is a limited version, since the user will be able to create new files in the directory.
A: Yep you are right, at least I can try to lock every file of the directory but it can be painful because I need to walk into all the subdirectories of my directory.
In POSIX system it's easy because directories are seen like files so, no problem with that. But in Windows when I try to open a directory, it doesn't really like that.
open(dirname)
raises exception:
OSError: [Errno 13] Permission denied: dirname
I am not really sure my solution is actually the good way to do it.
| |
doc_23530233
|
The usual requirements for such a class are that it should be startable, stopable and restartable. Does anyone see any issues with this pattern that I use?
public class MyThread implements Runnable {
private boolean _exit = false;
private Thread _thread = null;
public void start () {
_exit = false;
if (_thread == null) {
_thread = new Thread(this, "MyThread");
_thread.start();
}
}
public void run () {
while (!_exit) {
//do something
}
}
public void stop () {
_exit = true;
if (_thread != null) {
_thread.interrupt();
_thread = null;
}
}
}
I am looking for comments around if I am missing something, or if there is a better way to write this.
A: I would advise against use of the Thread class directly. The Executors framework available since Java 5 simplifies a lot of the issues involved in threading. The idea is that your class would perform the task required, and all of the threading functionality is managed by an Executor, saving you the effort of dealing with the complexity of threading.
A good intro can on the Java Executors framework can be found here.
A: *
*Make the boolean flag volatile.
*When you call stop don't interrupt the thread, but just set the _exit flag to true.
*If you are going to interrupt, then put a try/catch/finally around the while loop and catch the interrupt exception, cleanup the state of the objects you're working with and exit. And be careful not to cause a deadlock!
*Finally, you could use a CountDownLatch, or something of the sort, in order to signal that the thread finished.
The other thing is contention... you're not showing anything that will be modified by the thread, so depending on what gets modified you might have to synchronize (lock, etc..).
A: Well, the class itself is not thread safe. That's not necessarily a problem as long as that's documented and observed in code. If it's not you could lose references to Thread objects that will run in parallel, if two consumers get inside the start() method at the same time.
Flags used as semaphores should also of course be made volatile.
The API of the class is a little bit strange. You implement Runnable, saying to other classes "use my run method to invoke me" but then mimic the start method of a full Thread object. You may want to hide the run method inside an inner class. Otherwise it's somewhat confusing how one is intended to use the object.
And as always, any pattern that involves the words new Thread() rather than using a pool is somewhat suspect in the abstract. Would need to know about what you're actually doing with it to really comment intelligently on that though.
A: 1) You should declare _exit as volatile to prevent thread visibility issues.
If stop() may be called by multiple threads, _thread should also be volatile.
2) The call to interrupt will throw an InterruptedException only for interruptible blocking operations. You may need to take more actions, depending on what blocking operations you perform in the thread
3) If you want the class instances to be re-usable, you should set _exit to false in the start() method.
A: I'd prefer a guarded block (http://java.sun.com/docs/books/tutorial/essential/concurrency/guardmeth.html) on 'this'. You can notify the thread to come out of the loop very quickly, and then check the 'finished' var again. Where you'd normally use Thread.sleep(x), you use this.wait(x) with a synchronized (this) block around the entire loop. You also need to be in a synchronized (this) block to call this.notifyAll().
A: The _exit variable should be volatile. Also, it would be useful practice to follow a more normal coding convention. :-)
| |
doc_23530234
|
Here is the front-end code:
async function updateArtwork(
artworkId: string,
title: string,
medium: string,
description?: string,
price?: number,
printPrice?: number
): Promise<string> {
try {
const res = await fetch(`${config.apiUrl}updateArtwork`, {
method: 'POST',
body: JSON.stringify({
artworkId,
title,
description,
price,
printPrice,
medium,
}),
headers: new Headers({ 'Content-Type': 'application/json' }),
});
if (res) {
return res.json();
} else {
throw new Error('Could not save changes to this artwork');
}
} catch (err) {
throw new Error('Could not save changes to this artwork');
}
}
Here's the firebase function:
export const updateArtwork = functions.https.onRequest(
async (request, response) => {
doChecks(request);
const {
artworkId,
title,
description,
price,
printPrice,
medium,
} = request.body;
try {
await db.collection('artworks').doc(artworkId).update({
title,
description,
price,
printPrice,
medium,
});
return cors(request, response, () => {
return response
.status(200)
.json('Successfully saved changes to artist');
});
} catch (error) {
return response.status(500);
}
}
);
The function doChecks() just logs various things about the request to the console, including the headers and the body. Bafflingly, according to these logs request.body is {}, even though when I check the dev tools network tab, the body is populated. Similarly, the logs tell me that there is no content-type header on the request, while dev tools tells me the content-type is application/json, which is as expected.
In addition, the above call in the front-end makes two API calls, as opposed to the expected one. I know it does this because I did the following:
*
*set a breakpoint on the const res = await fetch... line
*hit that breakpoint and checked the dev tools network tab to confirm that no requests have yet been made
*clicked "step over" in dev tools to step to the next line (if (res)...)
*checked the network tab and saw two POST calls to the same endpoint
What makes this even more strange is that none of the above code has changed recently. It's possible that recent environment changes (upgrading npm packages, etc.) have somehow messed things up, but it's unclear what effect they would have.
Any help on this would be greatly appreciated!
EDIT
Here is more detail on the doChecks() function mentioned above. Here is the function:
const doChecks = (request: any) => {
console.log('request.body:');
console.log(request.body);
console.log('request.headers:');
console.log(request.headers);
console.log(`content type is ${request.headers['content-type']}`);
};
For the above call, here is what is logged:
> request.body:
> {}
> request.headers:
> {
> host: 'localhost:5001',
> connection: 'keep-alive',
> accept: '*/*',
> 'access-control-request-method': 'POST',
> 'access-control-request-headers': 'content-type,pragma',
> origin: 'http://localhost:3001',
> 'sec-fetch-mode': 'cors',
> 'sec-fetch-site': 'same-site',
> 'sec-fetch-dest': 'empty',
> referer: 'http://localhost:3001/cart',
> 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36',
> 'accept-encoding': 'gzip, deflate, br',
> 'accept-language': 'en-US,en;q=0.9'
> }
> content type is undefined
A: After much tearing out of hair, I've found what looks to be the solution. I simply wrapped all the business logic of the firebase function inside the cors() call as below. None of the docs or examples I've seen have done this, so I'm not sure why this works. If anyone knows, please let me know!
export const updateArtwork = functions.https.onRequest(
async (request, response) => {
return cors(request, response, async () => {
try {
const {
artworkId,
title,
description,
price,
printPrice,
medium,
} = request.body;
await db.collection('artworks').doc(artworkId).update({
title,
description,
price,
printPrice,
medium,
});
} catch (err) {
return cors(request, response, () => {
response.status(500).end(err);
});
}
response.status(200).send('Successfully saved changes to artist');
});
}
);
| |
doc_23530235
|
Example: We've been using Expert PDF components in websites from http://www.html-to-pdf.net
The product has a managed ephtmltopdf.dll assembly that relies on the unmanaged "helper" DLL epengine.dll also sitting alongside in the web app's bin/ folder, and sometimes an exception is thrown when the app starts up...
Under Kentico 9 (and earlier versions) an epengine exception is thrown and appears in the Kentico Event Log whenever the website starts up.
Under Kentico 10 an epengine exception occurs and prevents the website from running altogether.
I'm trying to correlate the difference between these two behaviours under the same component configuration.
This is the epengine CMS Event log entry under Kentico 9 and earlier versions (does not prevent website from running):
Event type: Error
Event time: 7/18/2017 4:00:06 AM
Source: Discovery
Event code: E:\Kentico_V9\CMS\bin\epengine.dll
User ID: 65
User name: public
Description: Could not load file or assembly 'epengine.dll' or one of its dependencies. The module was expected to contain an assembly manifest.
The file E:\Kentico_V9\CMS\bin\epengine.dll is not an assembly or the assembly was compiled for a later version of the .NET runtime.
Machine name: OX
Event URL: /register/all
URL referrer: /Public-(1)/Search-Results
User agent: Mozilla/5.0 (Windows NT 6.1; Trident/7.0; BOIE9;ENUS; rv:11.0) like Gecko
Under Kentico 10 this is the epengine error that prevents the site from running.
*** Assembly Binder Log Entry (17/07/2017 @ 4:36:56 PM) ***
The operation failed.
Bind result: hr = 0x80131018. No description available.
Assembly manager loaded from: C:\Windows\Microsoft.NET\Framework\v4.0.30319\clr.dll
Running under executable C:\Windows\SysWOW64\inetsrv\w3wp.exe
--- A detailed error log follows.
=== Pre-bind state information ===
LOG: DisplayName = epengine
(Partial)
WRN: Partial binding information was supplied for an assembly:
WRN: Assembly Name: epengine | Domain ID: 2
WRN: A partial bind occurs when only part of the assembly display name is provided.
WRN: This might result in the binder loading an incorrect assembly.
WRN: It is recommended to provide a fully specified textual identity for the assembly,
WRN: that consists of the simple name, version, culture, and public key token.
WRN: See whitepaper http://go.microsoft.com/fwlink/?LinkId=109270 for more information and common solutions to this issue.
LOG: Appbase = file:///C:/inetpub/wwwroot/website/CMS/
LOG: Initial PrivatePath = C:\inetpub\wwwroot\website\CMS\bin
LOG: Dynamic Base = C:\Windows\Microsoft.NET\Framework\v4.0.30319\Temporary ASP.NET Files\root\672d45d4
LOG: Cache Base = C:\Windows\Microsoft.NET\Framework\v4.0.30319\Temporary ASP.NET Files\root\672d45d4
LOG: AppName = f7cc5d08
Calling assembly : (Unknown).
===
LOG: This bind starts in default load context.
LOG: Using application configuration file: C:\inetpub\wwwroot\website\CMS\web.config
LOG: Using host configuration file: C:\Windows\Microsoft.NET\Framework\v4.0.30319\aspnet.config
LOG: Using machine configuration file from C:\Windows\Microsoft.NET\Framework\v4.0.30319\config\machine.config.
LOG: Policy not being applied to reference at this time (private, custom, partial, or location-based assembly bind).
LOG: Attempting download of new URL file:///C:/Windows/Microsoft.NET/Framework/v4.0.30319/Temporary ASP.NET Files/root/672d45d4/f7cc5d08/epengine.DLL.
LOG: Attempting download of new URL file:///C:/Windows/Microsoft.NET/Framework/v4.0.30319/Temporary ASP.NET Files/root/672d45d4/f7cc5d08/epengine/epengine.DLL.
LOG: Attempting download of new URL file:///C:/inetpub/wwwroot/website/CMS/bin/epengine.DLL.
LOG: Assembly download was successful. Attempting setup of file: C:\inetpub\wwwroot\website\CMS\bin\epengine.dll
LOG: Entering download cache setup phase.
ERR: Error extracting manifest import from file (hr = 0x80131018).
ERR: Setup failed with hr = 0x80131018.
ERR: Failed to complete setup of assembly (hr = 0x80131018). Probing terminated.
*** Assembly Binder Log Entry (17/07/2017 @ 4:36:56 PM) ***
The operation failed.
Bind result: hr = 0x80131018. No description available.
Assembly manager loaded from: C:\Windows\Microsoft.NET\Framework\v4.0.30319\clr.dll
Running under executable C:\Windows\SysWOW64\inetsrv\w3wp.exe
--- A detailed error log follows.
=== Pre-bind state information ===
LOG: DisplayName = epengine
(Partial)
WRN: Partial binding information was supplied for an assembly:
WRN: Assembly Name: epengine | Domain ID: 2
WRN: A partial bind occurs when only part of the assembly display name is provided.
WRN: This might result in the binder loading an incorrect assembly.
WRN: It is recommended to provide a fully specified textual identity for the assembly,
WRN: that consists of the simple name, version, culture, and public key token.
WRN: See whitepaper http://go.microsoft.com/fwlink/?LinkId=109270 for more information and common solutions to this issue.
LOG: Appbase = file:///C:/inetpub/wwwroot/website/CMS/
LOG: Initial PrivatePath = C:\inetpub\wwwroot\website\CMS\bin
LOG: Dynamic Base = C:\Windows\Microsoft.NET\Framework\v4.0.30319\Temporary ASP.NET Files\root\672d45d4
LOG: Cache Base = C:\Windows\Microsoft.NET\Framework\v4.0.30319\Temporary ASP.NET Files\root\672d45d4
LOG: AppName = f7cc5d08
Calling assembly : (Unknown).
===
LOG: This bind starts in default load context.
LOG: Using application configuration file: C:\inetpub\wwwroot\website\CMS\web.config
LOG: Using host configuration file: C:\Windows\Microsoft.NET\Framework\v4.0.30319\aspnet.config
LOG: Using machine configuration file from C:\Windows\Microsoft.NET\Framework\v4.0.30319\config\machine.config.
LOG: Policy not being applied to reference at this time (private, custom, partial, or location-based assembly bind).
LOG: Attempting download of new URL file:///C:/Windows/Microsoft.NET/Framework/v4.0.30319/Temporary ASP.NET Files/root/672d45d4/f7cc5d08/epengine.DLL.
LOG: Attempting download of new URL file:///C:/Windows/Microsoft.NET/Framework/v4.0.30319/Temporary ASP.NET Files/root/672d45d4/f7cc5d08/epengine/epengine.DLL.
LOG: Attempting download of new URL file:///C:/inetpub/wwwroot/website/CMS/bin/epengine.DLL.
LOG: Assembly download was successful. Attempting setup of file: C:\inetpub\wwwroot\website\CMS\bin\epengine.dll
LOG: Entering download cache setup phase.
ERR: Error extracting manifest import from file (hr = 0x80131018).
ERR: Setup failed with hr = 0x80131018.
ERR: Failed to complete setup of assembly (hr = 0x80131018). Probing terminated.
Whenever I try any of the following in both Kentico 9 (or earlier) and in 10, the epengine error always surfaces earlier in Kentico 10 preventing the website from running instead of showing inside the Kentico CMS Event Log.
*
*use the Nuget sources for epengine instead
*use to the latest release of the epengine component and try older versions too (between Expert 9.0.5 - 11.0)
*upgrade/downgrade .NET versions
*use same app pool settings, .NET version, ACL permissions (and vary these to test)
*A new/base install of Kentico 10 (still surfaces the error earlier)
The difference in timing of error expression seems to be whether I use this component in Kentico 9 or 10.
What I would like to know is if there's a difference in assembly loading, probing, or handling of exceptions that might help explain why this component's error would stop the Kentico 10 ASP.NET website from loading, but NOT stop a Kentico 9 website from loading with the same IIS & .NET configuration.
(Note that I'm also tackling the PDF component error head on by contacting the vendor - ultimately resolution would be the best solution).
A: Not ideal. This solution is a workaround to the problem of the ephtmltopdf.dll and epengine.dll assemblies throwing an error when loading from the app's bin/ folder during web application spin-up causing the Kentico 10 website to not load.
This solution is based on @rocky's comment under the original question.
This effectively causes the Expert PDF component to load after the Kentico site is already running.
Not all usages of the PDF generator have been tested. The code sample here will successfully download the given URL as a PDF document using Expert PDF component in an environment in which a direct reference to the component does not work. (it works on my computer)
Steps to work around -
*
*Remove all assembly references from your application for the Expert PDF components; also remove the C# using statements. You will get compile errors where the PDF classes and constructs are used in code.
*Dynamically load the Expert PDF assembly from outside the bin when you need it (as shown in the code sample - see Assembly.LoadFile).
*Dynamically instantiate the component (as shown in the code sample - see dynamic + CreateInstance )
*The remainder of your Expert PDF code can remain the same.
// In ~/TestPdf.aspx.cs code-behind page, inside a Kentico 10 website
namespace CMSApp
{
using System;
//using ExpertPdf.HtmlToPdf; // << Namespace no longer available to C# compiler.
using System.Reflection;
public partial class test1 : System.Web.UI.Page
{
protected void Page_Load(object sender, EventArgs e)
{
// var converter = new ExpertPdf.HtmlToPdf.PdfConverter(); // << Type no longer available to C# compiler.
// Dynamically load the Expert PDF Assembly, Type and an instance...
Assembly assemb = System.Reflection.Assembly.LoadFile(@"C:\KenticoBaseInstalls\Kentico10.2-app\LibMore\ExpertPdf-HtmlToPdf-v11.0.0\Bin\.NET_4.0\ephtmltopdf.dll");
dynamic converter = assemb.CreateInstance("ExpertPdf.HtmlToPdf.PdfConverter", true);
// Continue to use old PDF code but without compiler type checks and VS Editor Intellisense.
byte[] pdfBytes = converter.GetPdfBytesFromUrl("https://www.iana.org/domains/reserved");
Response.ClearHeaders();
Response.ContentType = "application/octet-stream";
Response.AppendHeader("Content-Disposition", "attachment; filename=example.pdf");
Response.BinaryWrite(pdfBytes);
Response.Flush();
Response.End();
}
}
}
NOTE: There are other constructs in the ExpertPDF assembly that will fail and which you will have to resolve like the above example such as:
*
*static UnitsConverter.PixelsToPoints(..)
*HtmlToPdfArea class
*ImageArea class
*PdfPageSize enum
*etc.
Addendum Notes to Solution
In the above code sample System.Reflection.Assembly.LoadFile(..) is prone to location issue between different environments or if the file is moved. A more robust substitute is to use application base subdirectories to get the assembly as follows. Note that Kentico 10 uses this strategy and we are piggybacking on their CMS folder structure:
Create an additional subfolder in ~/CMSDependencies that can be probed in the website as follows by adding the Expert PDF DLLs (both managed and unmanaged) into it. You will end up with this tree structure.
+---CMSDependencies
+---ExpertPdfHtmlToPdf.11.0.0
epengine.dll
ephtmltopdf.dll
Add the name of the the Expert Pdf folder into the privatePath of the probing element in web.config without replacing any other values by using a semicolon.
<runtime>
<assemblyBinding xmlns="urn:schemas-microsoft-com:asm.v1">
<probing privatePath="CMSDependencies\Newtonsoft.Json.6.0.0.0;CMSDependencies\ExpertPdfHtmlToPdf.11.0.0"/>
</assemblyBinding>
</runtime>
Load the assembly in C# by simple name as such:
Assembly assemb = System.Reflection.Assembly.Load(new AssemblyName("ephtmltopdf"));
A: The issue was resolved by hotfixing the website to Kentico 10.0.29 (highest hotfix at time of this writing), as per suggestion from Kentico Support.
Communication included below for reference. Bold is my emphasis on the solution.
From: Kentico Support
Sent: July-25-17 9:46 AM
To: John Kane
Subject: RE: Expert PDF component not compatible with Kentico 10 unless loaded dynamically Ticket:0072002734
Hello John,
Thank you for your message.
Upon some investigation it is possible that this issue was fixed in Kentico 10 hotfix 10.0.25. Could you please try applying the latest hotfix?
The bug was related to the application start and our developers think this could be related.
Also, could you please check all occurrences of native dll references, including a web.config file. This comment is seems to be related and accurate - Could not load file or assembly '\bin\ABCpdf8-64.dll' or one of its dependencies. The module was expected to contain an assembly manifest
Best regards,
Juraj Ondrus
Tech. support lead
I've also verified the Expert PDF component runs properly on a hotfixed base Kentico 10 install by downloading a URL and converting it to a PDF document programatically. (See Expert PDF website and documentation for code samples.)
Quickest way to Repo this Issue
Because we are in the middle of a Kentico upgrade from 8.2 to 10 and solving multiple issues with custom code & custom components, I'm leaving the simplest repo scenario here that proves the problem when using Expert PDF, and the solution, for anybody else who might have a similar issue, during an upgrade or first install.
Repo Issue
*
*Use the Kentico 10 installer to create a new website application.
*Verify the website is running Kentico 10 with no hotfixes, will report as 10.0.0.
*Using Nuget, reference the ExpertPdfHtmlToPdf package (version 9.5 through 11 is what I tinkered with).
*Compile and run the website - the website does not load and instead displays a .NET error with a "epengine" message like posted in the question.
Repo Solution
*
*Apply the latest available hotfix to the Kentico 10 install, following Kentico hotfix instructions.
*Verify site now reports as 10.0.x where x is the applied hotfix number. Ensure it's >= #25 as mentioned by Kentico support.
*Compile and run the website - the error is gone and the site runs properly.
| |
doc_23530236
|
I want to prevent the user of the device from being able to copy the firmware code from the device via ADB.
It's a USER build variant.
The user must have ADB access and can't limit it to specific commands.
Currently to get the code he could just do, for example:
adb pull /system/framework/services.jar
How can I modify AOSP to limit this access? Preferably l would like to prevent access to any other way to get the code from a running device.
Note:
*
*I know obfuscation is an option, rather have a stronger prevention.
*The user is prevented from going into bootloader mode.
A: You can restrict shell process to which all partitions it can get access to. This can be achieved by making the changes in SELinux policy.
Reference:
https://source.android.com/security/selinux/customize
| |
doc_23530237
|
Can this be achieved without development intervention on the application side?
Is this possible to do it with Tomcat? Thank you.
A: You can do it through Tomcat default web.xml configuration
<session-config>
<session-timeout>10</session-timeout> <!-- 10 minutes -->
</session-config>
For full reference: https://tomcat.apache.org/tomcat-5.5-doc/appdev/web.xml.txt
The file can be located in conf/web.xml (relative to your tomcat installation)
Edit
You can also invalidate a given session using session.invalidate() method
A: you can set the time when you create a session using setMaxInactiveInterval(int interval);
either make it constant or fetch from database depending on users time..!
| |
doc_23530238
|
A: Just change inside launch.json:
"stopOnEntry": false,
| |
doc_23530239
|
A: If the Javascript is yours then instead of calling the alert you could instead call a function that calls Objective C and invoke an iOS native alert.
If the Javascript isn't yours then using UIWebView you can inject some Javascript to override the default behaviour and change it to call an iOS native alert i.e. something like this
window.alert = function(message) {
window.location = "myScheme://" + message"
};
Then look for myScheme and extract message in UIWebView's shouldStartLoadWithRequest
| |
doc_23530240
|
I have referred to various examples, one of the closest is here: Using NSPredicate to filter an NSArray based on NSDictionary keys. However, I don't wish to have a value to the key. My problem is that I want to find the key first. I tried different syntaxes, but it did not help.
What I have done so far:
NSString *key = @"open_house_updated_endhour";
NSPredicate *predicateString = [NSPredicate predicateWithFormat:@"%K", key];
//NSPredicate *predicateString = [NSPredicate predicateWithFormat:@"%K contains [cd]", key]; Doesn't work.
//NSPredicate *predicateString = [NSPredicate predicateWithFormat:@"%K == %@", key]; Won't work because it expects a value here.
NSLog(@"predicate %@",predicateString);
NSArray *filtered = [updatedDateAndTime filteredArrayUsingPredicate:predicate]; // updatedDateAndTime is the NSMutableArray
A: A dictionary delivers nil as value for absent key. So simply compare the key against nil.
NSPredicate *predicateString = [NSPredicate predicateWithFormat:@"%K==NULL", key]; // Dictionaries not having the key
NSPredicate *predicateString = [NSPredicate predicateWithFormat:@"%K!=NULL", key]; // Dictionaries having the key
A: Try it:
NSArray *Myarray = [NSArray arrayWithObject:[NSMutableDictionary dictionaryWithObject:@"my hello string" forKey:@"name"]];
NSArray *filteredarray = [Myarray filteredArrayUsingPredicate:[NSPredicate predicateWithFormat:@"(name == %@)", @"my hello string"]];
NSLog("%@",filteredarray);
Another example :
NSString *mycategory = @"iamsomeone";
NSArray *myitems = @[@{ @"types" : @[@"novel", @"iamsomeone", @"dog"] },
@{ @"types" : @[@"cow", @"iamsomeone-iam", @"dog"] },
@{ @"types" : @[@"cow", @"bow", @"cat"] }];
NSPredicate *mypredicate = [NSPredicate predicateWithBlock:^BOOL(id evaluatedObject, NSDictionary *bindings) {
NSArray *categories = [evaluatedObject objectForKey:@"types"];
return [categories containsObject:mycategory];
}];
NSArray *outpuArray = [myitems filteredArrayUsingPredicate:mypredicate];
NSLog(@"hello output:%@",outpuArray);
| |
doc_23530241
|
class SimpleNode
{
string Name;
SimpleType Type;
List<SimpleList> Children;
}
class SimpleList
{
//some list-based properties
List<SimpleNode> Items;
}
I need to be able to look up and edit nodes without resorting to:
Node.Children[0].Items[0].Children[1].Items[3]
or the like. I attempted this via a function that returned an element of the tree, but editing the returned element didn't seem to affect the actual tree. Adding to the complexity is that I don't want to preclude two identical nodes to exist in different points in the tree.
As is obvious, this is my first time with a tree structure and could really use some help.
Upon further investigation (per Drew), the composite pattern concept makes sense, espcially the method forwarding/aggregate return concepts. The trick here is that I might have two Nodes with identical identifiers (Type & Name) at two different points in the tree.
The code that didn't work was something like (it's long gone):
Node GetNode(Type type, string name)
{ // returns node }
I tried something like:
Node nodeToEdit = GetNode(params);
nodeToEdit.Name = "New Name";
or
nodeToEdit.Children.Add(new Node());
but it didn't seem to take in the tree. If it should have, let me know because I must have missed something.
A: As also stated in the comments, when working with trees you sooner or later end up walking the tree and performing actions on the items. Typically, you use some sort of recursive function that receives a node as a parameter and calls itself with the children.
In your sample, there are two kinds of items in the tree that do not share a common interface. This adds some complexity as you have to inspect two types of items.
A simple function to walk the tree is shown in the sample below. The method receives a node as a parameter and the level of the node. Also, there are two callbacks: one for the SimpleNode items and another that is called for the SimpleList items. As the process of walking the tree is the same, you can share this code and provide specific methods that handle the elements of the tree for a scenario.
void TreeWalk(SimpleNode node, int level, Action<SimpleNode, int> inspectNode, Action<SimpleList, int> inspectList)
{
if (node == null)
return;
if (inspectNode != null)
inspectNode(node, level);
foreach(var simpleLst in node.Children)
{
if (inspectList != null)
inspectList(simpleLst, level + 1);
foreach(var child in simpleLst.Items)
TreeWalk(child, level + 2, inspectNode, inspectList);
}
}
As a node is required, the methods first validates the input parameter. Then it calls the callback method that can perform the action on the SimpleNode. Examples for such an action could be to change the node value if certain criteria are met.
The method then inspects the children of the node. First, it calls the callback method for the SimpleList items and afterwards iterates the Items of the simpleList. For each node, it calls itself again for the child. The level is also incremented.
The following sample shows how to change a node value if certain criteria are met. As there is no action to perform for the SimpleList items, null is used as parameter for the corresponding callback.
// Change node "11" to "11x"
var nameToLookFor = "11";
var newName = "11x";
TreeWalk(data, 0,
(node, level) => {
if (node.Name == nameToLookFor)
node.Name = newName;
},
null);
For testing reasons, it is helpful to print the contents of the tree. You can achieve this with another set of callback methods:
TreeWalk(root, 0,
(node, level) => Console.WriteLine(new String(' ', level * 2) + string.Format("Node: {0} - {1}", node.Name, node.Type)),
(list, level) => Console.WriteLine(new String(' ', level * 2) + "List"));
| |
doc_23530242
|
onFilePicked (e) {
const files = e.target.files
const fr = new FileReader ()
fr.readAsDataURL(files[0])
fr.addEventListener('load', () => {
let imageFile = files[0] // this is an image file that can be sent to server...
this.submit(imageFile);
})
}
When user changes/chooses file, this function gets called, then submit function in which I got something like this:
this.formData = new FormData();
this.formData.append('image', imageFile);
await axios.post(url, this.formData);
Now, in api, I got something like this:
file_get_contents($request->file('image'))
Problem: This throws an exception stating that file_get_contents(): Filename cannot be empty. The real issue is that from desktop Chrome, it works fine (not throwing any exception), but from mobile Chrome, this is what happens.
I am also using Sentry Error handling and that thrown exception I talked about I receive it in Sentry. Sentry also tells me what the image looks like in request:
{
client_filename: IMG_20190908_131814.jpg,
client_media_type: ,
size: 0
}
| |
doc_23530243
|
from
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginStart="@dimen/_10sdp"
android:layout_marginRight="@dimen/_10sdp"
android:gravity="center"
android:orientation="horizontal">
<ImageView
android:id="@+id/MapCenter"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_above="@id/rvMap"
android:layout_centerInParent="true"
android:src="@drawable/map_center" />
<androidx.recyclerview.widget.RecyclerView
android:id="@+id/rvMap"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginBottom="@dimen/_40sdp" />
</RelativeLayout>
to
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginStart="@dimen/_10sdp"
android:layout_marginRight="@dimen/_10sdp"
android:gravity="center"
android:orientation="horizontal">
<ImageView
android:id="@+id/MapCenter"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_above="@id/rvMap"
android:layout_centerInParent="true"
android:src="@drawable/map_center" />
<androidx.recyclerview.widget.RecyclerView
android:id="@+id/rvMap"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginBottom="@dimen/_40sdp" />
</fragment>
When I changed to fragment, It's not properly showing inside components, please let me know how to use Ui components inside fragment.
A: Your LinearLayout widget is not close, do this
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginStart="@dimen/_10sdp"
android:layout_marginRight="@dimen/_10sdp"
android:gravity="center"
android:orientation="horizontal">
<ImageView
android:id="@+id/MapCenter"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_above="@id/rvMap"
android:layout_centerInParent="true"
android:src="@drawable/map_center" />
<androidx.recyclerview.widget.RecyclerView
android:id="@+id/rvMap"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginBottom="@dimen/_40sdp" />
</LinearLayout>
| |
doc_23530244
|
I have checked using WCF tracing and Fiddler and the data is definitely going over the wire. Seems to be when the XML is deserialized into .net objects something goes wrong and some properties get set to null.
I've tried rebuilding the proxy in case some mismatch there.
EDIT: OK all fixed now. The data objects were POCO's generated from the EF T4 template and which have a separate field for the foreign key id's. I was forgetting to set this on the client, as soon as I did I started to get the complex properties returned on the server. Not exactly sure why though.
A: Have you used DataMember on these properties? and if the type is class, you should use DataContract on these types too.
Actually, when you use WCF tracing, you can see the xml.
| |
doc_23530245
|
The scripts we have take YAML input from our end users and build out VMware ESXi clusters to their specification. We are trying to expand the scripts so that we can apply different configuration depending on the type of cluster the user specifies in the YAML. We want the end user to be able to expand this out to create as many clusters as needed. All the while applying different configuration based on the type of cluster they input. We also want to be able to easily expand Cluster"X"Type out in the future for other types we eventually define.
YAML input examples:
Cluster1: <Name>
Cluster1Type: <Basic, DR, or Replicate>
Cluster2: <Name>
Cluster2Type: <Basic, DR, or Replicate>
I know I could do this in a fairly unclean manner of hardcoding a very long if and statement. Something like:
If ($Cluster1Type -eq 'DR') {<Code to execute on $Cluster1>}
ElseIf ($Cluster1Type -eq 'Replicate') {<Code to execute on $Cluster1>}
Else {<Code to execute on $Cluster1>}
If ($Cluster2Type -eq 'DR') {<Code to execute on $Cluster2>}
ElseIf ($Cluster2Type -eq 'Replicate') {<Code to execute on $Cluster2>}
Else {<Code to execute on $Cluster2>}
I know there has to be a better way to go about this. vSphere 6.5 can have up to 64 clusters per vCenter if I remember right, definitely, don't want to hardcode 64 if else statements every time we need to check what cluster type the end user has assigned to a specific cluster name. I have been looking for a clean solution but my inexperience is making it challenging to find an answer on my own.
I was also thinking it may be possible to use a variable array for the cluster names and then prompt the user executing our PowerShell scripts to input the cluster type for each cluster name they input into the array. I still think there might be an even better way than this though? Possibly a method of running a loop on every ClusterX and ClusterXType variable in an incremental method?
A: Are you saying something like this?
This is assuming the user is only allowed to enter one cluster type at a time.
# Specify the number of cluster nodes to create
$ClusterCount = Read-Host -Prompt 'Enter the number of guests to create'
# Enter a cluster type to create
$ClusterType = Read-Host -Prompt 'Enter the type - Basic, DR, Replicate'
1..$ClusterCount |
ForEach{
"Working cluster type $ClusterType on new node name Cluster$PSITEM"
<#
If ($ClusterType -eq 'DR') {"Code to execute on Cluster$PSItem"}
ElseIf ($ClusterType -eq 'Replicate') {"Code to execute on Cluster$PSItem"}
Else {<Code to execute on $Cluster1>}
#>
}
# Results
Enter the number of guests to create: 3
Enter the type - Basic, DR, Replicate: Basic
Working cluster type Basic on new node name Cluster1
Working cluster type Basic on new node name Cluster2
Working cluster type Basic on new node name Cluster3
A: You can use the New-Variable command to create a Variable that uses another variable for the name
$iteration = 1
New-Variable -Name Cluster$iteration
This Creates a variable named $Cluster1
Get-Variable C*
Name Value
---- ----
Cluster1
A: We ended up instead, creating an array of objects in YAML. Then importing the YAML into our scripts and calling each by Clusters.Name / Clusters.Type. Thanks for the help everyone definitely got me learning various ways to accomplish this task or similar tasks.
Clusters:
- Name: XXXXX
Type: XXXXX
| |
doc_23530246
|
e.g.:
I have columns:
X Y Z
-1 1 2
2 2 -1
3 -1 3
I want to use avg() for all values except for -1. I CANNOT use where<> -1 as each row contains it once.
A: Use WHERE to filter away the values you don't want to include in your average, for example to include all numbers in the average except for -1:
SELECT
(SELECT AVG(x) FROM table1 WHERE x >= 0) AS x,
(SELECT AVG(y) FROM table1 WHERE y >= 0) AS y,
(SELECT AVG(z) FROM table1 WHERE z >= 0) AS z
Note that if you really want to include all numbers except -1 as you said in your question then you should change the WHERE clause to x <> -1 but I doubt that this is what you want.
A: Try the following:
SELECT AVG(IF(x <> -1, x, NULL)) AS avgX,
AVG(IF(y <> -1, y, NULL)) AS avgY,
AVG(IF(z <> -1, x, NULL)) AS avgZ
FROM mytable;
| |
doc_23530247
|
I've made a project on ruby on rails(rails 3.2.11)(ruby 1.9.3)
The gems i'm using have higher version.
Now i've to commit the project on beta version of my site.
In the beta version,the gems are installed but they are of lower version and of course i can't update the gems there cause' other projects are dependent on them and they will stop working.
Please help me--
Here is my gem file
gem 'rails', '3.2.11'
gem 'will_paginate'
gem 'mysql2'
gem 'devise', '1.1.rc0'
gem 'json'
group :assets do
gem 'sass-rails', '~> 3.2.3'
gem 'coffee-rails', '~> 3.2.1'
gem 'uglifier', '>= 1.0.3'
end
gem 'jquery-rails'
and here's my environment.rb
#### Load the rails application
require File.expand_path('../application', __FILE__)
#### Initialize the rails application
OfficeSpace::Application.initialize!
Please tell me how i can commit this project on beta.(Tell what changes i've to make in my project as i can't make any on beta)
These are the gems beta is using
abstract (1.0.0)
actionmailer (3.2.8, 3.0.4, 2.3.6)
actionpack (3.2.8, 3.0.9, 3.0.4, 2.3.6)
actionwebservice (1.2.6)
activemodel (3.2.8, 3.0.9, 3.0.4, 3.0.3)
activerecord (3.2.8, 3.0.4, 2.3.6, 2.3.2)
activerecord-import (0.2.9)
activeresource (3.2.8, 3.0.4, 2.3.6, 2.3.2)
activesupport (3.2.8, 3.0.4, 3.0.3, 2.3.6)
addressable (2.3.2, 2.2.6)
algorithms (0.3.0)
amazon-ec2 (0.9.17, 0.9.15)
ar-extensions (0.9.5, 0.9.2)
arel (3.0.2, 3.0.0, 2.0.10)
atk (1.1.6)
atom (0.3)
attr_required (0.0.5)
autoparse (0.3.2, 0.2.3)
aweber (1.5.0)
aws-s3 (0.6.2)
aws-ses (0.4.4, 0.4.2)
bayes_motel (0.1.0)
bitly (0.6.1)
blekko (0.0.3)
bluecloth (2.1.0)
builder (3.0.0, 2.1.2)
bundler (1.0.10)
cairo (1.12.3)
childprocess (0.3.6)
cobravsmongoose (0.0.2)
columnize (0.3.2)
configatron (2.10.0)
cookiejar (0.3.0)
crack (0.1.8)
createsend (2.5.0)
curb (0.7.18, 0.7.10)
daemon_controller (0.2.5)
daemons (1.1.9)
data_objects (0.10.7)
diff-lcs (1.1.3)
dm-core (1.2.0)
dm-do-adapter (1.2.0)
dm-sqlite-adapter (1.2.0)
do_sqlite3 (0.10.7)
domain_name (0.5.3)
em-http-request (1.0.3)
em-socksify (0.2.1)
em-twitter (0.1.4)
erubis (2.7.0, 2.6.6)
eventmachine (1.0.0.rc.4)
extlib (0.9.15)
facebooker2 (0.0.11, 0.0.10)
faraday (0.7.4)
faraday_middleware (0.8.8, 0.7.0)
fastercsv (1.5.5)
fastthread (1.0.7)
fb_graph (2.6.4, 2.4.1)
feedtools (0.2.29)
ffi (1.2.0)
file-tail (1.0.5)
gcm (0.0.2)
gcm_on_rails (0.1.3)
geocoder (1.1.6, 1.1.3)
geoip (1.1.2)
geokit (1.6.5)
gibbon (0.3.5)
glib2 (1.1.6)
google-api-client (0.4.6, 0.3.0)
google-search (1.0.2)
google_alerts (0.0.1)
google_plus (0.2.0)
googleajax (1.0.1)
googlebase (0.2.1)
googlereader (0.0.4)
grabz_it (0.0.4)
grabzit (1.1.0)
has_vimeo_video (0.0.5)
hashie (1.2.0, 1.1.0)
hashr (0.0.22)
highline (1.6.2)
hike (1.2.1)
hominid (3.0.2)
hpricot (0.8.3)
htmlentities (4.3.1)
httmultiparty (0.3.6)
http_parser.rb (0.5.3)
httpadapter (1.0.1)
httparty (0.8.3)
httpauth (0.2.0)
httpclient (2.2.4)
hubspot (0.0.2)
i18n (0.6.0, 0.5.0, 0.4.0)
imgkit (1.3.7)
instagram (0.8.5)
journey (1.0.4, 1.0.3)
json (1.5.1, 1.4.6)
jwt (0.1.5, 0.1.4)
koala (1.5.0)
launchy (2.1.2, 2.0.5)
libwebsocket (0.1.7.1)
libxml-ruby (1.1.4)
linecache (0.43)
linkedin (0.3.7)
locale (2.0.5)
mail (2.4.4, 2.4.0, 2.2.19)
mechanize (2.5.1)
memcache-client (1.8.5)
mime-types (1.18, 1.16)
mislav-will_paginate (2.3.11)
mogli (0.0.37)
multi_json (1.3.7, 1.0.0)
multi_xml (0.5.3, 0.2.2)
multipart-post (1.1.3)
mysql (2.8.1)
n_gram (0.0.1)
net-http-digest_auth (1.2.1)
net-http-persistent (2.7)
nokogiri (1.5.5, 1.4.4, 1.4.3.1)
nori (1.1.3)
ntlm-http (0.1.1)
oauth (0.4.5, 0.4.4, 0.4.3)
oauth2 (0.8.0, 0.5.2, 0.5.0)
omniauth (1.0.1)
omnicontacts (0.2.1)
pango (1.1.6)
passenger (3.0.2, 2.2.15)
payment (1.0.1)
pkg-config (1.1.4)
polyglot (0.3.3, 0.3.2, 0.3.1)
r_hapi (0.1.2)
rack (1.4.4, 1.4.1, 1.2.5, 1.2.3, 1.1.0)
rack-cache (1.2, 1.1)
rack-mount (0.6.14, 0.6.13)
rack-oauth2 (1.0.0, 0.14.2)
rack-openid (1.3.1)
rack-protection (1.3.2)
rack-ssl (1.3.2)
rack-test (0.6.1, 0.5.7)
rails (3.0.9, 3.0.4, 2.3.6)
railties (3.2.8, 3.0.9, 3.0.4)
rake (0.9.2.2, 0.9.2, 0.8.7)
rbx-require-relative (0.0.5)
rdoc (3.12, 3.9.1)
RedCloth (4.2.8)
rest-client (1.6.7)
rest-open-uri (1.0.0)
retryable (1.3.1)
rspec (2.11.0)
rspec-core (2.11.1)
rspec-expectations (2.11.1)
rspec-mocks (2.11.1)
ruby-debug (0.10.4)
ruby-debug-base (0.10.4)
ruby-hmac (0.4.0)
ruby-openid (2.2.2)
ruby-openid-apps-discovery (1.2.0)
rubygems-update (1.5.0)
rubyzip (0.9.9)
sanitize (2.0.3)
selenium-webdriver (2.27.1)
signet (0.4.1)
simple-rss (1.2.3)
simple_oauth (0.1.9, 0.1.5)
simple_youtube (3.0.0)
sinatra (1.3.3)
spell_checker (0.0.2)
sprockets (2.1.3, 2.1.2)
spruz (0.2.5)
sqlite3 (1.3.6, 1.3.4)
stemmer (1.0.1)
supermodel (0.1.6)
syntax (1.0.0)
thor (0.16.0, 0.14.6)
tilt (1.3.3)
tmail (1.2.7.1)
treetop (1.4.10, 1.4.9)
trollop (2.0)
truncate_html (0.5.5)
tweetstream (2.1.0)
twitter (1.6.2)
twitter_oauth (0.4.3)
tzinfo (0.3.33, 0.3.29, 0.3.24)
unf (0.0.5)
unf_ext (0.0.5)
uuidtools (2.1.3)
vapir-common (1.10.1)
vapir-firefox (1.10.1)
vimeo (1.5.3)
webrobots (0.0.13)
websocket (1.0.4)
whatlanguage (1.0.0)
will_paginate (2.3.11)
xml-simple (1.1.1, 1.0.14, 1.0.12)
yajl-ruby (1.1.0)
yamler (0.1.0)
yard (0.8.3)
youtube_it (2.1.7)
youtube_search (0.1.6)
And these are the gems my project is using
Gems included by the bundle:
* actionmailer (3.2.11)
* actionpack (3.2.11)
* activemodel (3.2.11)
* activerecord (3.2.11)
* activeresource (3.2.11)
* activesupport (3.2.11)
* arel (3.0.2)
* builder (3.0.4)
* bundler (1.2.3)
* coffee-rails (3.2.2)
* coffee-script (2.2.0)
* coffee-script-source (1.4.0)
* devise (1.1.rc0)
* erubis (2.7.0)
* execjs (1.4.0)
* hike (1.2.1)
* i18n (0.6.1)
* journey (1.0.4)
* jquery-rails (2.2.1)
* json (1.7.7)
* mail (2.4.4)
* mime-types (1.21)
* multi_json (1.6.0)
* mysql2 (0.3.11)
* polyglot (0.3.3)
* rack (1.4.5)
* rack-cache (1.2)
* rack-ssl (1.3.3)
* rack-test (0.6.2)
* rails (3.2.11)
* railties (3.2.11)
* rake (10.0.3)
* rdoc (3.12.1)
* sass (3.2.5)
* sass-rails (3.2.6)
* sprockets (2.2.2)
* thor (0.17.0)
* tilt (1.3.3)
* treetop (1.4.12)
* tzinfo (0.3.35)
* uglifier (1.3.0)
* warden (0.10.7)
* will_paginate (3.0.4)
So you see there's incompatibility.How to resolve that issue??
A: Sounds like you need to use rvm. It allows you to have multiple gemsets that you can assign to a project, giving you the ability to pick what version of gems each project uses. Check out: https://rvm.io
| |
doc_23530248
|
A: DTA is very good when it has sufficient work load to operate on.
However, for 1 or 2 Odd queries, DTA can't suggest you very good solutions.
I suggest, Review the indexes suggested and check if they need to be created.
Do not create all indexes suggested as it may have adverse effect on the overall system.
I believe if your select queries are very slow, then it should be duw to missing index on the columns in your where clause. Choose the best option from DTA, review it and then create your indexes and stats as required.
| |
doc_23530249
|
The red light Fix button even no luck.
I have checked Jupyter notbook and its already installed. Here is the screenshot:
Any idea how to fix this or am i doing something wrong?
| |
doc_23530250
|
> echo "$OUTPUT"
sn: name1
uid: uname1
mail: user1@mail.com
roomNumber: e2
sn: name2
uid: uname2
mail: user2@mail.com
roomNumber: e2
sn: name3
uid: uname3
roomNumber: e2
sn: name4
uid: uname4
mail: user4@mail.com
roomNumber: e2
I'm using awk to process each user into a single line so that it ends up like this:
name1|uname1|user1@mail.com|e2
name2|uname2|user2@mail.com|e2
name3|uname3||e2
name4|uname4|user4@mail.com|e2
The trouble is my code below can't handle the missing mail attribute, so it reuses the variable from the previous user and looks like this:
name1|uname1|user1@mail.com|e2
name2|uname2|user2@mail.com|e2
name3|uname3|user2@mail.com|e2
name4|uname4|user4@mail.com|e2
The awk command used is:
echo "$OUTPUT" | awk -v OFS='|' '{split($0,a,": ")} \
/^sn:/{sn=a[2]} \
/^uid:/{uid=a[2]} \
/^mail:/{mail=a[2]} \
/^roomNumber:/{room=a[2]; print sn, uid, mail, room}'
Is there a way to handle a missing attribute such as mail in the example above please?
Thanks.
A: As you notice, your input is very well structured in records. Each record is separated by a set of blank lines. You can exploit this with awk.
The idea in the following is to read each multi-line record which has key-value pairs of the form (key: value)
sn: name2
uid: uname2
mail: user2@mail.com
roomNumber: e2
We will tell awk to extract that information accordingly and store it in an array data. We will then use this array to rebuild the data in the way you want. If a key does not exist in the record, it will return a empty value when requested:
awk 'BEGIN{RS=""; FS="\n"; OFS="|"}
{ delete data; }
{ for(i=1;i<=NF;++i) {
match($i,/: +/);
key=substr($i,1,RSTART-1); value=substr($i,RSTART+RLENGTH);
data[key]=value }
}
{ print data["sn"], data["uid"], data["mail"], data["roomNumber"] }' file
This method is very generic and extremely flexible if you want to change anything later on.
On the presented example, this outputs:
name1|uname1|user1@mail.com|e2
name2|uname2|user2@mail.com|e2
name3|uname3||e2
name4|uname4|user4@mail.com|e2
A: Assuming your input lines per record are always ordered as shown and it's only the email field that could ever be missing:
$ awk -v RS= -F': |\n' -v OFS='|' '{print $2, $4, (NF>6 ? $6 : ""), $NF}' file
name1|uname1|user1@mail.com|e2
name2|uname2|user2@mail.com|e2
name3|uname3||e2
name4|uname4|user4@mail.com|e2
A: Just set the vars to the empty string after printing:
$ awk -v OFS='|' '{split($0,a,": ")}
/^sn:/{sn=a[2]}
/^uid:/{uid=a[2]}
/^mail:/{mail=a[2]}
/^roomNumber:/{room=a[2]; print sn, uid, mail, room; sn=uid=mail=room=""}' file
name1|uname1|user1@mail.com|e2
name2|uname2|user2@mail.com|e2
name3|uname3||e2
name4|uname4|user4@mail.com|e2
| |
doc_23530251
|
#przejscie
{
width: 340px;
display: block;
opacity: 0;
max-width: 100%;
margin: 0 auto;
position: relative;
top: 500px;
transition: transform 20s, background 50s;
-o-transition: -o-transform 16s, background 0.5s;
-moz-transition: -moz-transform 0.5s, background 0.5s;
-webkit-transition: -webkit-transform 3s, opacity 2s, background 0.5s;
}
#przejscie:target
{
transform: translate(-200);
-o-transform: scale(1.2);
-moz-transform: scale(1.2);
-webkit-transform: translate(0px,-300px );
opacity: 1;
}
and my form:
<html>
<head>
<script src="{% static 'js/artyom.min.js' %}"></script>
<script src="{% static 'js/jquery-2.1.4.min.js' %}"></script>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" integrity="sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7" crossorigin="anonymous">
<link rel="stylesheet" type="text/css" href="{% static 'css/animacje.css' %}"/>
</head>
<h1><a href="#przejscie">Welcome register in our application</a></h1>
<div id="przejscie" class="container col-sm-3 srodek col-md-offset-2">
<form method="post" action="/PPProject-web/register" class="form-signin">
<label for="email" class="sr-only">Adres e-mail</label>
<input name="email" class="form-control" id="email" type="email" placeholder="E-mail" required>
<label for="imie" class="sr-only">Imię</label>
<input name="imie" class="form-control" id="imie" placeholder="Imię" required>
<label for="nazwisko" class="sr-only">Adres e-mail</label>
<input name="nazwisko" class="form-control" id="nazwisko" placeholder="Nazwisko" required>
<label for="username" class="sr-only">Nazwa użytkownika</label>
<input name="username" class="form-control" id="username" placeholder="Nazwa użytkownika" required>
<label for="password" class="sr-only">Adres e-mail</label>
<input name="password" type="password" class="form-control" id="password" placeholder="Hasło" required>
<button class="btn btn-lg btn-primary btn-block" type="submit">Zarejestruj się</button>
</form>
</div>
</html>
ok target works fine but still can't center this div.
A: Off the top of my head, your document appears to be missing a <body> tag.
Edit: Ok, I think it definitely has something to do with the absolute positioning, and the fact that your form is taking up 100% of the width.
margin: auto looks at the width of the element including padding and borders, calculates the difference between that and the width of the container, and "fills it in" equally on both sides with margin. So if the form is taking up the whole width of the body then there won't be any margins.
Absolute positioning takes an element "out of flow," essentially causing it to ignore other elements. The browser doesn't know how to calculate the margin in that case so it treats it as zero.
Perhaps you could try position:relative instead? Or you can set zero for all four position properties to center horizontally and vertically. (See this answer: css absolute position won't work with margin-left:auto margin-right: auto.)
I tested it adding position: relative and width:50% to #przejscie and it seemed to work.
Further reading: https://developer.mozilla.org/en-US/docs/Web/CSS/position#Absolute_positioning
A: the automatic margin (margin: 0 auto) has no reference to the space where focus when using the absolute position. you need to change your "absolute" position "relative".
if you need to use absolute positioning I recommend this code:
#przejscie{
display: block;
max-width: 100%;
width:400px;
margin:0 auto;
position: relative;
background:salmon;
}
#przejscie2{
width:400px;
position:relative;
background:red;
}
.cont-form{
width:100%;
height:auto;
display: flex;
justify-content: center;
background:yellow;
}
<div id="przejscie" class="container col-sm-3 srodek col-md-offset-2">
<form method="post" action="/PPProject-web/register" class="form-signin">
<label for="email" class="sr-only">Adres e-mail</label>
<input name="email" class="form-control" id="email" type="email" placeholder="E-mail" required>
<label for="imie" class="sr-only">Imię</label>
<input name="imie" class="form-control" id="imie" placeholder="Imię" required>
<label for="nazwisko" class="sr-only">Adres e-mail</label>
<input name="nazwisko" class="form-control" id="nazwisko" placeholder="Nazwisko" required>
<label for="username" class="sr-only">Nazwa użytkownika</label>
<input name="username" class="form-control" id="username" placeholder="Nazwa użytkownika" required>
<label for="password" class="sr-only">Adres e-mail</label>
<input name="password" type="password" class="form-control" id="password" placeholder="Hasło" required>
<button class="btn btn-lg btn-primary btn-block" type="submit">Zarejestruj się</button>
</form>
</div>
<br><br>
<div class="cont-form">
<div id="przejscie2" class="container col-sm-3 srodek col-md-offset-2">
<form method="post" action="/PPProject-web/register" class="form-signin">
<label for="email" class="sr-only">Adres e-mail</label>
<input name="email" class="form-control" id="email" type="email" placeholder="E-mail" required>
<label for="imie" class="sr-only">Imię</label>
<input name="imie" class="form-control" id="imie" placeholder="Imię" required>
<label for="nazwisko" class="sr-only">Adres e-mail</label>
<input name="nazwisko" class="form-control" id="nazwisko" placeholder="Nazwisko" required>
<label for="username" class="sr-only">Nazwa użytkownika</label>
<input name="username" class="form-control" id="username" placeholder="Nazwa użytkownika" required>
<label for="password" class="sr-only">Adres e-mail</label>
<input name="password" type="password" class="form-control" id="password" placeholder="Hasło" required>
<button class="btn btn-lg btn-primary btn-block" type="submit">Zarejestruj się</button>
</form>
</div>
</div>
A: You need to define the width of the element you are centering
| |
doc_23530252
|
it is my code in javascript:
function UploadFile() {
var fileName = $('#uploadFile').val().replace(/.*(\/|\\)/, '');
if (fileName != "") {
var formData = new FormData();
formData.append('file', $('input[type=file]')[0].files[0]);
$.ajax({
url: 'p1.aspx/uploadPic',
data: formData,
processData: false,
contentType: false,
type: 'POST',
success: function (dt) {
alert(dt.d);
}
});
}
}
code in p1.aspx page:
[WebMethod]
public static string uploadPic(HttpPostedFile file)
{
return file.FileName;
}
it is not work and not return any thing!what is wrong?how can get image on server?
best reagrds.
| |
doc_23530253
|
Great thanks.
Benja
A: You can do it with geotif images like so How to accurately make a map for a map-overlay
| |
doc_23530254
|
The can be seen here http://imgur.com/a/hQqVP the green node is not effected by the physics simulation.
// set scene up
self.anchorPoint = CGPointMake(0.5, 0.5);
self.backgroundColor = [SKColor grayColor];
self.view.showsPhysics = YES;
// set up the two bodies to be connected
SKSpriteNode* testSpriteOne = [[SKSpriteNode alloc] initWithColor:[SKColor yellowColor] size:NODE_SIZE];
testSpriteOne.physicsBody = [SKPhysicsBody bodyWithRectangleOfSize:NODE_SIZE];
testSpriteOne.position = CGPointMake(-20, -10);
testSpriteOne.physicsBody.dynamic = YES;
[self addChild:testSpriteOne];
SKSpriteNode* testSpriteTwo = [[SKSpriteNode alloc] initWithColor:[SKColor greenColor] size:NODE_SIZE];
testSpriteTwo.physicsBody = [SKPhysicsBody bodyWithRectangleOfSize:NODE_SIZE];
testSpriteTwo.position = CGPointMake(93, 166);
testSpriteTwo.physicsBody.dynamic = NO;
[self addChild:testSpriteTwo];
// set up the joint
SKPhysicsJointLimit* ropeJoint = [SKPhysicsJointLimit jointWithBodyA:testSpriteTwo.physicsBody bodyB:testSpriteOne.physicsBody anchorA:testSpriteTwo.position anchorB:testSpriteOne.position];
[self.physicsWorld addJoint:ropeJoint];
My scene's anchor point is (0.5, 0.5), but if it is set to (0, 0) the physics simulation works, but is not in the right position. The other workaround solution is to create temporary alternate positions that are offset by the scaled width and height of the scene and use those in the creation of the joint. The code below shows this.
// works with offset
CGPoint AlternatePosition1 = CGPointMake(testSpriteOne.position.x + self.scene.size.width * self.scene.anchorPoint.x, testSpriteOne.position.y + self.scene.size.height * self.scene.anchorPoint.y);
CGPoint AlternatePosition2 = CGPointMake(testSpriteTwo.position.x + self.scene.size.width * self.scene.anchorPoint.x, testSpriteTwo.position.y + self.scene.size.height * self.scene.anchorPoint.y);
SKPhysicsJointLimit* ropeJoint = [SKPhysicsJointLimit jointWithBodyA:testSpriteOne.physicsBody bodyB:testSpriteTwo.physicsBody anchorA:AlternatePosition1 anchorB:AlternatePosition2];
First, I'm not sure why this works. The resulting points are not in the scene coordinates. Also, this "solution" will not work if for example the sprites are contained in a world node and the world node is changing its position in the scene. Even when the nodes positions in the scene are used in SKPhysicsJoint.
So, is there any way to have SKPhysicsJointLimit function properly in a scene with an anchor of (0.5,0.5) without having to offset the position of the nodes.
| |
doc_23530255
|
But even with the new version installed, it is not available to be setup as default JRE.
I tried some eclipse instructions to JDK 15 (#1 and #2), but without success with JBoss.
Does anyone have a tip?
Thank you
Solution: Use Eclipse 2020-09 (4.17)
A: Your error same with this error: Target is not a JDK root. System library was not found. Eclipse Oxygen 4.7 + Java9 error
So you have to find a support extension if its existing and available.
A: Make sure you have Eclipse 2020-09 (4.17) with the Java 15 Support for Eclipse 2020-09 (4.17) installed.
| |
doc_23530256
|
This is data.json:
{
"id": 1,
"name": "name1"
},
{
"id": 2,
"name": "name2"
}
This is index.js:
var express = require('express');
var app = express();
app.get('/user/:id', function(req, res) {
// do stuff here
})
app.listen(3000)
A: First of all, your JSON has to be an array, so change it to
[
{
"id": 1,
"name" : "name1"
},
{
"id" : 2,
"name" : "name2"
}
]
and then on your express csode
var express = require('express');
var fs = require('fs'); // util to read file
var app = express();
/**
JSON.parse is a function to parse string with JSON format to JavaScript object
fs.readFileSync is a function to read file synchronously, first parameter is the path to the file,
second parameter is the encoding type
*/
var users = JSON.parse(fs.readFileSync('path/to/data.json', 'UTF-8'));
// users now is [{ id: 1, name : "name1" }, { id : 2, name : "name2" }]
// users[0].name will gives you name1
app.get('/user/:id', function(req, res) {
var id = +req.params.id; // will contains data from :id, the + is to parse string to integer
var user = users.find(u => u.id === id); // find user from users using .find method
res.send(user); // send the data
})
app.listen(3000);
| |
doc_23530257
|
//Code
public List<TelerikBookingRequestingColumns> Select(int startRowIndex, int maximumRows, string sortExpressions, List<GridFilterExpression> filterExpressions)
{
int numberOfObjectsPerPage = maximumRows;
List<TelerikBookingRequestingColumns> objList = new List<TelerikBookingRequestingColumns>();
objList.AddRange(list);
if (!String.IsNullOrEmpty(sortExpressions))
{
string firstExpression = sortExpressions.Split(',')[0];
var queryResultorderby = (from listObject in objList
orderby firstExpression
select listObject);
objList= queryResultorderby.ToList<TelerikBookingRequestingColumns>();
}
var queryResultPage = (from listObject in objList
.Skip(startRowIndex)
.Take(numberOfObjectsPerPage)
select listObject);
return queryResultPage.ToList<TelerikBookingRequestingColumns>();
}
In the above code the firstExpression will be like (empid asc) or( empid desc) which means a column name and the sort expression. But this doesn't works.
How can I achieve this?
A: orderby requires lambda for key selector:
public static IOrderedEnumerable<TSource> OrderBy<TSource, TKey>(
this IEnumerable<TSource> source,
Func<TSource, TKey> keySelector
)
but you are trying to pass simple string. This will not work. Try to use Dynamic Linq if you want to sort results based on some string.
Get System.Linq.Dynamic from NuGet and apply sorting this way (assume firstExpression looks like propertyname + asc/desc):
var firstExpression = "empid asc";
var queryResultorderby = objList.AsQueryable()
.OrderBy(firstExpression);
| |
doc_23530258
|
This is my current component:
<GiftedChat text={emergencyText} onInputTextChanged={this.setEmergencyText} />
I could override the render methods, but how do I then render the original input?
A: Since React-native-gifted-chat does not have the disable props for it provides one more component, that is InputToolbar, if we render null value to we can almost disable the sending feature. just try the below code.
<InputToolbar
render={null}
text={emergencyText}
onInputTextChanged={this.setEmergencyText}
/>
It serves your purpose.
A: You can use the disableComposer prop.
<GiftedChat disableComposer={true} text={emergencyText} onInputTextChanged={this.setEmergencyText} />
| |
doc_23530259
|
I'm pretty sure it is not possible out-of-the-box, but I know that NHibernate is pretty hackable.
The cache region system doesn't really support the flexibility you have when you can instantiate a cache dependency as you put data into the cache.
Here's what the code might look like:
Session.CreateCriteria<Foo>("foo")
.SetCacheable()
.Add(Restriction.Eq("foo.Name", fooName))
.AddCacheDependency(new MyCustomCacheDependency(fooName))
.List<Foo>();
A: Cache dependencies are handled by the cache providers, not the core.
There are some examples of DB-based expiration in the documentation for the SysCache2 provider.
| |
doc_23530260
|
ListTile(
title: Text("Location"),
subtitle: if (_currentPosition != null) Text(_currentAddress),
trailing: IconButton(...),
),
this gives an error "Expected an identifier. dart(missing_identifier)"
A: You have to do it like this:
subtitle: _currentPosition == null?
Text('No position found') //code if above statement is true
:Text(_currentAddress), //code if above statement is false
| |
doc_23530261
|
public class ImageFileWatcher : IHostedService, IDisposable
{
private readonly ILogger _logger;
private readonly IConfiguration _configuration;
FileSystemWatcher _watcher;
public ImageFileWatcher(ILogger<ImageFileWatcher> logger, IConfiguration configuration)
{
_logger = logger;
_configuration = configuration;
// IConfigurationBuilder builder = new ConfigurationBuilder()
// .AddJsonFile("appsettings.json")
// .AddEnvironmentVariables();
// this._configuration = builder.Build();
}
public Task StartAsync(CancellationToken cancellationToken)
{
var watcherConfig = _configuration["WatcherConfig:filePath"];//this is where it comes up as null.
_logger.LogInformation($"Image File watcher started on path{watcherConfig}");
_watcher = new FileSystemWatcher(watcherConfig);
_watcher.Created += OnNewImage;
_watcher.EnableRaisingEvents = true;
return Task.CompletedTask;
}
Here is the appsettings.json file
{
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft": "Warning",
"Microsoft.Hosting.Lifetime": "Information"
}
},
"WatcherConfig":{
"filePath":"/Users/###/Documents/Development/ImageDrop"
}
}
Here is the Program.cs file
public class Program
{
public static void Main(string[] args)
{
CreateHostBuilder(args).Build().Run();
}
public static IHostBuilder CreateHostBuilder(string[] args) =>
Host.CreateDefaultBuilder(args)
.ConfigureAppConfiguration((hostContext, configBuilder) =>
{
configBuilder
.AddJsonFile("appsettings.json", optional: true, reloadOnChange: true)
.AddJsonFile("appsettings.Production.json", optional: true, reloadOnChange: true)
.AddEnvironmentVariables(prefix: "ImageService_")
.Build();
})
.ConfigureServices((hostContext, services) =>
{
services.AddHostedService<ImageFileWatcher>();
});
}
Here is the second attempt at the Program class:
public class Program
{
public static void Main(string[] args)
{
CreateHostBuilder(args).Build().Run();
}
public static IHostBuilder CreateHostBuilder(string[] args) =>
Host.CreateDefaultBuilder(args)
.ConfigureServices((hostContext, services) =>
{
IConfiguration builder = new ConfigurationBuilder()
.AddJsonFile("appsettings.json", optional: true, reloadOnChange: true)
.AddJsonFile("appsettings.Production.json", optional: true, reloadOnChange: true)
.AddEnvironmentVariables(prefix: "ImageService_")
.Build();
hostContext.Configuration = builder;
services.AddHostedService<ImageFileWatcher>();
});
}
A: Given that you are using the default appsettings.json setup, the use of CreateDefaultBuilder() should mean you have nothing extra to configure.
There's a couple of notes which may help:
*
*Remove any custom configuration setup as you don't need it (because of CreateDefaultBuilder)
*Make sure that the appsettings.json file is copied to the build output. If not, in Visual Studio, you can right-click on the file and set "Copy to Output Directory" to "Copy if newer".
Alternatively, if you want to configure manually, note that:
*
*When configuring the host, adding your config providers, you don't need to call the Build() method.
*In your second example of Program the configuration is being set up in ConfigureServices, which is not where it should be. I guess this is why it was necessary to use the line hostContext.Configuration = builder which should not be required.
A: You must read App settings in Program.cs while building Host service. Check out what Host.CreateDefaultBuilder does, and if you decide to not use the default builder you might need to load the settings implicit.
If you heave further questions, please attach the Program.cs file, as the IConfiguration depends on its initialization not the usage in the service.
A: I've also faced this problem in a similar approach.
Glad "dotnet clean" and "dotnet build" worked for you.
Below, just wanna to contribute with a different solution.
*
*Right button on appsettings.json > Properties > Copy to Output Directory > Copy always
*Program
static class Program
{
public static IConfigurationRoot Configuration { get; private set; }
static async Task Main(string[] args)
{
using IHost host = CreateHostBuilder(args).Build();
}
static IHostBuilder CreateHostBuilder(string[] args) =>
Host.CreateDefaultBuilder(args)
.ConfigureAppConfiguration((hostingContext, configuration) =>
{
configuration.Sources.Clear();
configuration
.AddJsonFile("appsettings.json", optional: false, reloadOnChange: true);
IConfigurationRoot configurationRoot = configuration.Build();
Configuration = configurationRoot;
}).ConfigureServices((services) =>
{
services.RegisterServices();
});
}
*
*Created a class called AppSettings and other class called EndPoint to interact with appsettings.json data
public class EndPoint
{
public string UrlWebApi { get; set; }
}
public class AppSettings
{
public EndPoint EndPoints { get; set; }
public string Application { get; set; }
public AppSettings()
{
IConfigurationBuilder builder = new ConfigurationBuilder();
builder.AddJsonFile(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "appsettings.json"));
var root = builder.Build();
root.Bind(this);
}
}
*
*Example of use of Appsettings class gettings URL from appsettings.json
public class MyAppService : IMyAppService
{
private readonly AppSettings _appSettings;
private readonly ILogger<MyAppService> _logger;
public MyAppService(AppSettings appSettings, ILogger<MyAppService> logger)
{
_appSettings = appSettings;
_logger = logger;
}
public async Task<string> ShowAppSettingsUse()
{
_logger.LogInformation("Getting endpoint URL from AppSettings...");
var urlFromAppSettings = _appSettings.EndPoints.UrlWebApi;
return urlFromAppSettings;
}
}
}
| |
doc_23530262
|
List=[df1,df2,df3].
and each dataFrame has the following structure. df1 has the structure
column1 column2 column3
4 3 4
4 5 7
7 6 6
8 6 4
df2 has the structure
column1 column2 column3
4 3 4
7 5 7
7 6 5
8 6 4
df3 has the structure
column1 column2 column3
4 3 5
4 1 7
7 6 6
8 6 4
I would like to compare the content of df1 column1 and column2(for each row) with the contain df2 (column1 and column2) and df3 (column1 and column2)
I wrote something thought about something like this:
for i in range(len(List)):# iterate through the list
for j in range(len(List[0].index.values)):# iterate through the the whole dataFrame
#I would like to so something like: if df1[column1][row1]=df2[column1][row1] then do ....
# now i dont know how to iterate through all the dataFrames simulatanously to compare the content of of column 1 and column 2(for each row k) of df1 with the content of column 1 and column 2 of df2 and column 1 and column 2 of df3.
I am stuck there
A: First, create dataframes with the data provided
import pandas as pd
df1 = pd.DataFrame({
'column1': [4,4,7,8],
'column2': [3,5,6,6],
'column3': [4,7,6,4]
})
print(df1)
# column1 column2 column3
# 0 4 3 4
# 1 4 5 7
# 2 7 6 6
# 3 8 6 4
df2 = df1.copy()
df2['column1'][1] = 7
df2['column3'][2] = 5
print(df2)
# column1 column2 column3
# 0 4 3 4
# 1 7 5 7
# 2 7 6 5
# 3 8 6 4
df3 = df1.copy()
df3['column2'][1] = 1
df3['column3'][0] = 5
print(df3)
# column1 column2 column3
# 0 4 3 5
# 1 4 1 7
# 2 7 6 6
# 3 8 6 4
Then, to get a dataframe of the same shape, with a boolean value indicating which
entries are equal in both dataframes
print(df1.eq(df2))
# column1 column2 column3
# 0 True True True
# 1 False True True
# 2 True True False
# 3 True True True
To get a series of booleans indicating for which columns all the
corresponding rows are equal in both dataframes
print(df1.eq(df2).all())
# column1 False
# column2 True
# column3 False
# dtype: bool
To get a series of booleans indicating for which rows all the corresponding
columns are equal in both dataframes
print(df1.eq(df2).all(axis='columns'))
# 0 True
# 1 False
# 2 False
# 3 True
# dtype: bool
To get a single boolean indicating wheter all corresponding entries are equal
in both dataframes
print(df1.equals(df2))
# False
If you need to combine every pair of dataframes and compare them, you can use
from itertools import combinations
List = [df1, df2, df3]
for a, b in combinations(enumerate(List, 1), 2):
print(f'df{a[0]}.equals(df{b[0]}): ', a[1].equals(b[1]))
# df1.equals(df2): False
# df1.equals(df3): False
# df2.equals(df3): False
| |
doc_23530263
|
FROM tableName
WHERE 'value%' IN/LIKE (col1, col2, col3, col4)
I want to find single value in multiple columns with like condition on value. That value with 'value%' must be found in col1 OR col2 OR ..... like this.
I have a big query with 10 columns to find, so need optimised query for this.
Thank you in advance.
A: You can do this:
SELECT *
FROM tableName
WHERE
col1 LIKE 'value%' OR
col2 LIKE 'value%' OR ..
Hope this helps.
A: Either you create a nice long query with union for each of the searches (MySQL is not really good at optimising or conditions) or you need to create fulltext index on all the columns involved and do a Boolean search.
The union query will look like the following:
select * from tablename where col1 LIKE 'value%'
union
select * from tablename where col2 LIKE 'value%'
...
The Boolean search on the fulltext index would look like as follows:
select * FROM tablename WHERE MATCH (col1,col2,...)
AGAINST ('+value*' IN BOOLEAN MODE);
| |
doc_23530264
|
What could be wrong here?
Link to Application: http://simateriaisportal.appspot.com
(Conta -> Dashboard).
The code i posted before was working correctly, the problem comes when content (dashboard) is filled dynamicaly
A: Have you tried to use firebug to find out where the padding is coming from?
A: What you've posted here renders similarly in Chrome and Firefox. That points to YUI Base or YUI Reset (or something else outside of this code).
Also, the Firefox render has extra space at the top & bottom of the white rows and at the top of the table itself. So, it's not just about a div.
A: Don't really know what to look for in your link, but, you div "panelDB" has a padding of 5px top and bottom, and you table inside it has a margin of 10px top and bottom... seems to be what you're talking about... remove does two and your good to go :)
A: Looks like you got it working; you should post the solution and mark it as the answer. Maybe we can all learn something.
| |
doc_23530265
|
{"status_code":200,"status_txt":"OK","data":{"img_name":"8zN9G.jpg","img_url":"http:\/\/s1.uploads.im\/8zN9G.jpg","img_view":"http:\/\/uploads.im\/8zN9G.jpg","img_width":"954","img_height":"1421","img_attr":"width=\"954\" height=\"1421\"","img_size":"327.8 KB","img_bytes":335711,"thumb_url":"http:\/\/s1.uploads.im\/t\/8zN9G.jpg","thumb_width":360,"thumb_height":536,"source":"base64 image string","resized":"0","delete_key":"8c9bd7ab84a7bd6f"}}
and dont know how to parse this data to database. I want to insert in my database the url of uploaded image. please help me.
A: you can use json_decode
$response = '{"status_code":200,"status_txt":"OK","data":{"img_name":"8zN9G.jpg","img_url":"http:\/\/s1.uploads.im\/8zN9G.jpg","img_view":"http:\/\/uploads.im\/8zN9G.jpg","img_width":"954","img_height":"1421","img_attr":"width=\"954\" height=\"1421\"","img_size":"327.8 KB","img_bytes":335711,"thumb_url":"http:\/\/s1.uploads.im\/t\/8zN9G.jpg","thumb_width":360,"thumb_height":536,"source":"base64 image string","resized":"0","delete_key":"8c9bd7ab84a7bd6f"}}';
$arr = json_decode($response, true);
echo "<pre>";
print_r($arr);
echo $arr['data']['img_url'];
OUPUT :
Array
(
[status_code] => 200
[status_txt] => OK
[data] => Array
(
[img_name] => 8zN9G.jpg
[img_url] => http://s1.uploads.im/8zN9G.jpg
[img_view] => http://uploads.im/8zN9G.jpg
[img_width] => 954
[img_height] => 1421
[img_attr] => width="954" height="1421"
[img_size] => 327.8 KB
[img_bytes] => 335711
[thumb_url] => http://s1.uploads.im/t/8zN9G.jpg
[thumb_width] => 360
[thumb_height] => 536
[source] => base64 image string
[resized] => 0
[delete_key] => 8c9bd7ab84a7bd6f
)
)
http://s1.uploads.im/8zN9G.jpg
| |
doc_23530266
|
A: Try to change the base class of your forms from Form to KryptonForm.
| |
doc_23530267
|
As shown in the figure above, how to set the mouse to move to the column highlight to change the color of the column, and restore the original color after the mouse is moved out
| |
doc_23530268
|
$scope.search={"className":{"Id":"101","Class":"business"}};
The above data coming from another control,now i'm accessing Class property like $scope.search.className.Class; , But i want convert property Class to lowercase like $scope.search.className.class; in angularjs only.
A: You may be able to achieve it by iterating over the entire object and writing a function to convert all properties to lowercase :
for(var prop in obj){
//Make a new property name with lowercase
obj[prop.toLowerCase()]=a[s];
//delete the original property with uppercase name
delete a[s];
}
Keep in mind this will not behave correctly if you have property names in your object with just the case difference like : {foo:"bar",Foo:"baz"}
| |
doc_23530269
|
//Used for connecting to database and executing queries.
//Index 0 of input string must be the query, Index 1 must be the tablename we demand
//We can only gather data from 1 table for each query, so if you need data from several tablecolumns, use multiple queries like:
//[0] = query, [1] = tablename, [2] = 2nd query, [3] = 2nd tablename, [4] = 3rd query, [5] = 3rd table name ... and so on (each query must come with a tablename)
public class DBHandler extends AsyncTask<String, Void, List<String>>
{
public AsyncResponse delegate;
@Override
protected List<String> doInBackground(String...query)
{
List<String> result = new ArrayList<String>();
String sql;
String tableresult = null;
Connection conn = null;
Statement st = null;
ResultSet rs = null;
try {
Class.forName("org.postgresql.Driver");
conn = DriverManager.getConnection("jdbc:postgresql://192.168.200.300:5439/dbname?user=anonymous&password=secretpw");
st = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); //necessary if you want to use rs.first() after rs.next(), it makes the resultset scrollable
for (int i = 0; i <= query.length-1; i = i+2) //queries are always stored in i=0 and/or in i+2, because i+1 contain the demanded tablenames for resultset handling
{
System.out.println("I is: " +i);
if (!query[i].isEmpty())
{
System.out.println(query[i]);
sql = query[i];
rs = st.executeQuery(sql);
while (rs.next())
if (!query[i + 1].isEmpty() || !rs.getString(query[i + 1]).isEmpty()) //if i+1 is empty, there is no demanded tablename. Used when we dont need any return values (ie. INSERT, UPDATE)
result.add(rs.getString(query[i + 1])); //demanded tablename is always stored in i+1
//We add an empty entry if we demand multiple tablenames so we can keep them seperate
//Might be replaced with any other char, but you will have to backtrack all usages of DBHandler and fix the filters there
if(i+2 < query.length)
result.add(" ");
}
rs.first(); //reset pointer for rs.next()
}
rs.close();
st.close();
conn.close();
System.out.println("End of AsyncTask");
}
catch (SQLException ex)
{
ex.printStackTrace();
}
catch (Exception e)
{
e.printStackTrace();
}
return result;
}
//onPostExecute returns query result in a List.
//We need to use interaces delegate feature to send the result to other classes, like "Auslieferung", which is implementing the interface
@Override
protected void onPostExecute(List<String> result)
{
super.onPostExecute(result);
System.out.println("Result: " +result.toString());
if (!result.isEmpty())
delegate.processFinish(result);
}
}
There is a for-loop in this Async Task.
for (int i = 0; i <= query.length-1; i = i+2)
And now finally I can explain my issue:
I usually use SELECT queries, sometimes I use an INSERT query (which can be done by a single query), but when I parse an Update Query, my for-loop stops iterating after the first pass, so i+2 never happens. The update queries look like this:
String updatequeries[] = {UPDATE delivery SET contactperson = 'Jon Doe' WHERE officeid = 5, " ", UPDATE delivery SET contactemail = 'abd@def.gh' WHERE officeid = 5, " "};
Why does this for loop stop running right after the first run? The debugger does not show anything unusual, everything was parsed right and there are no queries missing. Updating a table does not return any results, but nothing depends on result values here. I tried to run 20 update queries in a single string var, but the for loop stops after the first iteration anyway. No issues are displayed in the debugger or in the logs. Have I overseen something or is there anything I don't know? Might this be a bug? Please help me! This issue drives me crazy.
| |
doc_23530270
|
<customer>
<name>Müller</name>
</customer>
I parse the file using following code:
File xmlFile = new File("file.xml");
DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
DocumentBuilder dBuilder = dbFactory.newDocumentBuilder();
Document doc = dBuilder.parse(xmlFile)
And get the error that the entity ü is referenced but not declared. What I want is that the entry is being read but not parsed in any way, I want to get the value as it is written in the file.
How do I do that?
A: I tried setting:
dbFactory.setExpandEntityReferences(false);
but this doesn't work.
If you can't modify your xml content (using UTF-8 the xml can contain u umlaut), you might be able to add a DTD:
<!DOCTYPE definition [
<!ENTITY uuml "ü">
]>
If you can't modify your xml file, load the xml contents and prepend the DTD:
String dtd = "<!DOCTYPE definition [\n<!ENTITY uuml 'ü'>\n]>\n",
contents = <load xmlFile>;
Reader reader = new StringReader(dtd + contents);
InputSource src = new InputSource(reader);
Document doc = dBuilder.parse(src);
| |
doc_23530271
|
function hello(){}
document.write(typeof hello); // outputs function
document.write("<br/>");
document.write(hello.name); // outputs name
whereas
var hello = function(){}
document.write(typeof hello); // outputs function
document.write("<br/>");
document.write(hello.name); // doesn't output anything
Can you explain why? It seems that functions are not the same.
A: In the second example, the variable hello points to an anonymous function expression.
You can write named function expressions too
var foo = function bar() {};
console.log(foo.name); // "bar"
Using a named function expression is great for recursing in strict mode, but in old versions of IE you'll find the identifier leaks into the namespace.
A: When you use the second form, declare it this way:
var hello = function hello() {}
And you'll be able to get to the name property. It may seem redundant, but this is a pretty good habit, and we enforce it where we work. It makes stack traces easier to follow, and any minifier worth its salt will remove the actual name unless it's referred to. The names don't have to match.
| |
doc_23530272
|
The error:
"Unable to connect to any of the specified MySQL hosts"
The code:
string constring = "SERVER=198.49.72.34;PORT=3306;DATABASE=e_hearing;Uid=district_courts;Pwd=*googleisgood#";
try
{
MySqlConnection con;
con = new MySqlConnection();
con.ConnectionString = constring;
//con.ConnectionString = ConfigurationManager.ConnectionStrings["constring"].ToString();
con.Open();
MessageBox.Show("success");
}
catch(MySql.Data.MySqlClient.MySqlException ex)
{
MessageBox.Show(ex.Message);
}
A: First eliminate any possibility of a network issue by using Telnet to connect to the TCP address and Port, as described here http://blog.industrialnetworking.com/2011/09/using-telnet-to-test-open-ports.html
Second eliminate any coding errors by moving the database to the workstation machine, and replacing the IP address with localhost. Since you will always be able to talk to your local host TCP address, this will eliminate any network issues.
| |
doc_23530273
|
In order to do this the file needs to be brought into the DER format.
Using OpenSSL this can be done with:
openssl rsa -in private_key -pubout -outform DER
Piping this into python with:
base64.b32encode(hashlib.sha1(sys.stdin.read()[22:]).digest()[:10]).lower()'
will return the address correctly.
However I would like to perform the same using only python. My problem is that using the pycrypto module the DER output is different and the address therefore incorrect.
key = RSA.importKey(keyfile.read()).publickey()
print(key.exportKey(format='DER'))
Will result in a different output than the openssl call.
Is this just a matter of implementation that allows different results? Or am I making a mistake somewhere?
Any help would be appreciated
A: I was looking for something similar and, as of March 2019, OpenSSL recommends using pyca/cryptography instead of the crypto module. (source)
Here after is then what you intend to do: convert PEM to DER
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
with open("id_rsa", "rb") as keyfile:
# Load the PEM format key
pemkey = serialization.load_pem_private_key(
keyfile.read(),
None,
default_backend()
)
# Serialize it to DER format
derkey = pemkey.private_bytes(
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption()
)
# And write the DER format to a file
with open("key.der", "wb") as outfile:
outfile.write(derkey)
A: I want Convert Certificate file not the key file from DER to PEM, but Google took me here. thanks @alleen1's answer, I can convert certificate or key from DER to PEM and vice versa.
Step one, load the file.
Step two,save it to the format you want.
I ommit the process to get the "pem_data" and "der_data",you can get it from file or anywhere else. they should be bytes not string, use method .encode() when needed.
from cryptography import x509
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
# Step one, load the file.
# Load key file
# PEM
key = serialization.load_pem_private_key(pem_data, None, default_backend())
# DER
key = serialization.load_pem_private_key(der_data, None, default_backend())
# Load cert file
# PEM
cert = x509.load_pem_x509_certificate(pem_data, default_backend())
# DER
cert = x509.load_der_x509_certificate(der_data, default_backend())
# Step two,save it to the format you want.
# PEM key
key_val = key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption()
)
# DER key
key_val = key.private_bytes(
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption()
)
# PEM cert
cert_val = cert.public_bytes(serialization.Encoding.PEM)
# DER cert
cert_val = cert.public_bytes(serialization.Encoding.DER)
A: convert certificate to der using python
first we load the file
cert_file = keyfile.read()
Then we convert it to pem format
from OpenSSL import crypto
cert_pem = crypto.load_certificate(crypto.FILETYPE_PEM, cert_file)
now we are generating the der-output
i.e.: output equals to openssl x509 -outform der -in certificate.pem -out certificate.der.
cert_der = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert_pem)
A: The inital question is: "Exact the public key from private key", this because the openSSL command states "pubout" in initial question.
Using OpenSSL this can be done with: (note that "pubout" defines OUTPUT as public key only)
openssl ALGORITHM_USED -in private_key -pubout -outform DER
But with Python cryptography module you can exact the public key from private key (note this seems applicable for RSA and EC based cryptography).
With Python:
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.backends import default_backend
# Create private key (example uses elliptic curve encryption)
priv_key = ec.generate_private_key(ec.SECP256K1, default_backend())
pub_key = priv_key.public_key()
pub_key_pem = pub_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with open('public_key.pem', 'wb') as outfile:
outfile.write(public_key_pem)
More info on cryptography documentation: https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/#cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey
| |
doc_23530274
|
This is the changes that I have tried to implement with no success. Can someone shine a light on how to proceed?
// Conditional function checking for authorized users
function is_authorized_user() {
if ( is_user_logged_in() ) {
$user = wp_get_current_user();
$caps = $user->allcaps;
if ( ( isset($caps['edit_product']) && $caps['edit_product'] ) ||
array( 'custom_user_role1', 'custom_user_role2', $user->roles ) )
return true;
} else
return false;
}
How to make it work for an array of user roles, instead of just one? Any help is appreciated.
A: As you have 2 arrays to compare:
*
*your 2 custom roles (in an array)
*the current user roles (that is an array)
you can use array_intersect() php function to make it work this way:
// Conditional function checking for authorized users
function is_authorized_user(){
if ( is_user_logged_in() ) {
$user = wp_get_current_user();
$caps = $user->allcaps;
if ( ( isset($caps['edit_product']) && $caps['edit_product'] ) ||
array_intersect( ['custom_user_role1', 'custom_user_role2'], $user->roles ) ) {
return true;
}
return false;
}
else {
return false;
}
}
It should work now for multiple user roles.
| |
doc_23530275
|
List<string> department = new List<string>();
Add the data one by one is working but I cannot push the multiple data into the list:
using System;
namespace Linq_Basics
{
class Program
{
static void Main(string[] args)
{
List<names> names = new List<names>();
names.Add(new names { Firstname = "viki", Lastname = "Amar" });
names.Add(new names { Firstname = "Sankar", Lastname = "Pandi" });
names.Add(new names { Firstname = "Bala", Lastname = "Murugan" });
foreach (var v in names)
{
Console.WriteLine("The Firstname is {0} and the last name is {1}", v.Firstname, v.Lastname);
}
List<string> department = new List<string>();
department.Add(new List<string> { "ece", "cse" });
Console.ReadKey();
}
}
class names
{
public string Firstname { get; set; }
public string Lastname { get; set; }
}
}
A: Use List<T>.AddRange method to add sequence of strings to list of strings:
department.AddRange(new List<string> { "ece", "cse" });
Or simply call Add twice to avoid new list creation:
department.Add("ece");
department.Add("cse");
Or use collection initializer if you want to hold only these two strings in department list:
var department = new List<string> { "ece", "sce" };
A: List<string> department = new List<string> { "ece", "cse" };
A: department defines a list which holds the string type. You are trying to add a List<string> to it instead of a string:
List<string> department = new List<string>();
department.Add(new List<string> { "ece", "cse" });
Needs to be:
department.Add("ece")
//etc...
Or:
department = new List<string>() { "ece", "cse" };
Or:
department.AddRange(new List<string> { "ece", "cse" });
| |
doc_23530276
|
Here's what I did
list=[]
for i in range(2):
userinput = input("Enter some numbers: ")
x = userinput.split()
list.append(x)
print(list)
The output is this
Enter some numbers: 1 2
Enter some numbers: 3 4
[['1', '2'], ['3', '4']]
...but I need it to be formatted [[1, 2], [3, 4]]
I'm very new python, and I feel like Ive exhausted all my options, I couldnt find anything online that works (tried the join, replace etc.) , unless its impossible with my code, please help
A: Use x = [int(i) for i in userinput.split()]; you have to convert the input strings to integers.
A: You'll need to convert each value to an int to avoid the quotes (the values entered by the end user are strings):
list=[]
for i in range(2):
userinput = input("Enter some numbers: ")
x = userinput.split()
list.append([int(v) for v in x])
print(list)
Output:
Enter some numbers: 3 2 1
Enter some numbers: 4 5 6
[[3, 2, 1], [4, 5, 6]]
| |
doc_23530277
|
For example, make it shrink and fly away and the appear from under?
With jQuery, javascript or CSS?
A: Sure! It doesn’t matter if it’s a font icon or an SVG icon, you can still animate the element with CSS.
Here’s an example animation:
@keyframes fly {
0% {
transform: scale(1);
}
25% {
transform: scale(0.5);
}
100% {
transform: scale(0.5) translate(100vw, -100vh);
}
}
.plane {
display: inline-block;
fill: #e24145;// for demo purposes, only applies to SVGs
&.is-active {
animation-name: fly;
animation-duration: 1.5s;
animation-fill-mode: forwards;
animation-timing-function: ease-in;
}
}
Working demo http://codepen.io/tedw/pen/PZXjYv
A: JQuery Animation
Here's an example using JQuery and CSS. I'm using a rocket icon () because the plane icon doesn't display reliably on my computer (✈️ ← all I can see is a rectangular box).
$(document).ready(function(){
$("#plane-icon").click(function(){
$(this).animate({
left:'180px',
top:'-20px',
fontSize:'20px'
},2000);
$(this).animate({
left:'0px',
top:'180px',
fontSize:'100px'
},0);
$(this).animate({
left:'0px',
top:'80px',
},1000);
});
});
.icon-wrap {
width:180px;
height:180px;
font-size:100px;
cursor:pointer;
position:relative;
overflow:hidden;
}
#plane-icon {
position:absolute;
top:80px;
left:0px;
}
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<div class="icon-wrap">
<span id="plane-icon"></span>
</div>
CSS Animation
Here's the same animation done with CSS keyframe animation instead of JQuery's animate() function. The animationend event trigger is used to reset the CSS class of the object after it has finished animating.
$(document).ready(function(){
$('body').on('webkitAnimationEnd oanimationend oAnimationEnd msAnimationEnd animationend',
function(){
$('#plane-icon').removeClass('launched');
}
);
$('#plane-icon').click(function(){
$(this).addClass('launched');
});
});
.icon-wrap {
width:180px;
height:180px;
font-size:100px;
overflow:hidden;
}
#plane-icon {
display:block;
cursor:pointer;
transform:translate(0px,80px) scale(1);
}
#plane-icon.launched {
animation-name:rocket;
animation-duration:3s;
cursor:default;
}
@keyframes rocket {
0% {
transform:translate(0px,80px) scale(1);
}
66% {
transform:translate(120px,-80px) scale(0.1);
}
67% {
transform:translate(120px,180px) scale(0.1);
}
68% {
transform:translate(0px,180px) scale(1);
}
100% {
transform:translate(0px,80px) scale(1);
}
}
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<div class="icon-wrap">
<span id="plane-icon"></span>
</div>
| |
doc_23530278
|
I have this in single.php:
<?php get_sidebar('left'); ?>
And my sidebar-left.php is:
<div class="sidebar-left">
<?php if ( !function_exists('dynamic_sidebar') || !dynamic_sidebar('sidebar1') ) : ?>
<?php endif; ?>
</div>
A: just use css media query:
@media (max-width:500px) {
.sidebar-left {
display: none;
}
}
A: There are two ways to do that:
*
*Use css media queries:
@media (max-width: 500px) {
.sidebar-left {
display: none;
}
}
*Use jQuery:
if ($(window).width() < 500) {
$( ".sidebar-left" ).remove();
}
It is not possible to prevent Wordpress (on server-side) to generate the sidebar html code, because Wordpress does not have information about the width of your browser window. You can read more about this 'Problem' here:
Getting the screen resolution using PHP
A: There is another possibility which I'm not 100% sure it does what you need, but you could remove it in general for mobile devices.
Wordpress has the function wp_is_mobile(), so you can create a condition to not call the sidebar if the user is coming from a mobile device - which usually have the smaller screens.
It should look something like this:
<?php
if (!wp_is_mobile() ) {
// if device is not mobile get sidebar
get_sidebar('left');
}
?>
Like that you would only show the sidebar when the user is not coming from a mobile device.
Here is the wordpress the function reference:
https://codex.wordpress.org/Function_Reference/wp_is_mobile
Edit:
Also be aware that if you change it in single.php you only change it for the view of a single post. Otherwise I think you would also need to edit page.php for page views and so on..
| |
doc_23530279
|
public class Dodgem extends JPanel implements KeyListener, ActionListener
{
public Dodgem()
{
addKeyListener(this);
setFocusable(true);
setPreferredSize(new Dimension(640,480));
player = new Projectile();
counter = 0;
t = new Timer(5, this);
t.start();
}
@Override
public void keyTyped(KeyEvent e)
{
}
@Override
public void keyPressed(KeyEvent e)
{
System.out.println("Key Pressed");
if(e.getKeyCode() == KeyEvent.VK_UP)
{
player.setVelY(-2);
}
if(e.getKeyCode() == KeyEvent.VK_DOWN)
{
player.setVelY(2);
}
if(e.getKeyCode() == KeyEvent.VK_LEFT)
{
player.setVelX(-2);
}
if(e.getKeyCode() == KeyEvent.VK_RIGHT)
{
player.setVelX(2);
}
}
@Override
public void keyReleased(KeyEvent e)
{
if(e.getKeyCode() == KeyEvent.VK_UP || e.getKeyCode() == KeyEvent.VK_DOWN)
{
player.setVelY(0);
}
if(e.getKeyCode() == KeyEvent.VK_LEFT || e.getKeyCode() == KeyEvent.VK_RIGHT)
{
player.setVelX(0);
}
}
A: Just looking at the column to the right of this question, you can see there are at least 10 questions titled "KeyListener isn't working?" or something similar. Take a look around before posting.
As for a solution, KeyListeners get events from whatever is in focus, from my experience. Adding them to a JPanel on a JFrame won't return any events, but adding them directly to the JFrame may help.
| |
doc_23530280
|
if(true) { ...code here }
This will always resolve to true and run the code within, so why even have it?
I'm code reviewing a .js file for a colleague and questioning whether or not this is necessary. The statement indeed resolves to true and runs the code within. Without the condition it also runs the code, so why check an absolute truthy condition?
| |
doc_23530281
|
For example:
std::wstring* myString = &(L"my basic sentence" + some_wstring_var + L"\r\n");
The above does not work, but the below does work:
std::wstring temp = (L"my basic sentence" + some_wstring_var + L"\r\n");
std::wstring* myString = &temp;
A: In the first example you are getting the address of a temporary. After that line has executed that wstring object you assigned myString to isn't available anymore (these are so called rvalues by the way). I think it should be obvious that in the second example you have a real object (a lvalue which is valid as long as it doesnt run ot of scope.
To overcome this limitation with the scope you can directly create a wstring on the heap, this might better suite your situation but without further information this is hard to tell:
std::wstring* myString = new std::wstring(L"my basic sentence" + some_wstring_var + L"\r\n");
The newly created wstring will me initialized with the contents of the temporary rvalue.
Just do not forget to destroy the pointer after you are done with it.
With C++11 things have complicated so temporaries can be reused more often for performance reasons. But this topic is very though and will exceed this question. I just wanted to mention it because it might interesst you aswell. For a really great explanation take a look at this SO question: What are move semantics?
A: std::wstring* myString = &(L"my basic sentence" + some_wstring_var + L"\r\n");
It points to a temporary object that its life time ends at the semicolon, so dereferencing the pointer and using it is undefined behavior.
std::wstring temp = (L"my basic sentence" + some_wstring_var + L"\r\n");
std::wstring* myString = &temp;
It points to a temporary object, but the life time is longer and dereferecing it is valid while that temporary object exists.
A: Of course you cannot assign the adress (that's what the & is) of a string constant to a variable. Have you thought about it? Where will the pointer point to?
| |
doc_23530282
|
Any idea why this error is happening and how can I result this
let app = XCUIApplication()
let alert = XCUIElement()
let existsPredicate = NSPredicate(format: "exists == true")
let systemAlert = alert.buttons["OK"]
testCase.addUIInterruptionMonitorWithDescription("Enable Notifications") { (alert) -> Bool in
alert.buttons["OK"].tap()
return true
}
self.buttons["Enable notifications"].tap()
testCase.expectationForPredicate(existsPredicate, evaluatedWithObject: systemAlert, handler: nil)
testCase.waitForExpectationsWithTimeout(5, handler: nil)
app.tap()
self.tabBars.buttons["Nearby"].tap()
A: Your alert is not a valid element from your app, as you've initialized it directly instead of derived it from your XCUIApplication instance. Therefore, the systemAlert that you pass into expectationForPredicate is not a valid XCUIElement either.
Change the assignment of alert to be derived from app, and that should fix your problem.
let alert = app.alerts.elementBoundByIndex(0)
| |
doc_23530283
|
ID
1
3
3
6
7
1
1
16
X
I want to loop through for each line and if it matches 1, extract the row number. In my code it works for two digit numbers (11, 12, 13...) but for single digit it does not work. It produces an empty output file. i tried with the substring substr($data[$j],0 ); to catch the whole string in the field but it doesn't work at all then. How can I fix this?
print "\nRead list file in\n";
open(FILE, "list.txt") or die("Unable to open file");
my @data = <FILE>; # this is the whole filestored
my $data_size = @data;
##name the input
print "Which number do you want to sort out?";
my $input = <>;
chomp($input);
print "\n";
## for each line of the file get the column numbers to cut based on input number
open OUTPUT, ">$input.txt";
print OUTPUT "0 \n";
for (my $j =0; $j < $data_size; $j++){
my $match = substr($data[$j],0,2);
if ($match eq $input){
print OUTPUT "$j\n";
}
}
print "data is in the files ";
A: I think you need to chomp() the array @data after you read it in to remove linefeeds, like this:
print "\nRead list file in\n";
open(FILE, "list.txt") or die("Unable to open file");
my @data = <FILE>; # this is the whole filestored
chomp @data;
A: The immediate problem is that the date you have read into @data has newlines at the end of each line, while you are using chomp on the string input from the terminal so that can never match.
If you are matching a two-digit number then substr $data[$j], 0, 2 will remove the newline for you, but anything short remains untouched.
You could just chomp @data, but there are another few details that could be improved, so this is a rewrite that should be useful
use strict;
use warnings;
print "\nRead list file in\n";
open my $in_fh, '<', 'list.txt' or die "Unable to open 'list.txt' for input: $!";
chomp(my @data = <$in_fh>);
close $in_fh;
print "Which number do you want to sort out_fh?";
chomp(my $input = <>);
print "\n";
open my $out_fh, '>', "$input.txt" or die "Unable to open '$input.txt' for output: $!";
print $out_fh "0 \n";
my $found;
for my $j (0 .. $#data) {
my ($match) = $data[$j] =~ /(\d+)/;
next unless $match;
if ($match == $input) {
print $out_fh "$j\n";
$found = 1;
}
}
print "Data is in the files\n" if $found;
| |
doc_23530284
|
Bur I don't want to specify each column name of the table and get the distinct data of all the columnn in separte result set.
+----+------+---------+-----------+
| Id | name | Address | City |
+----+------+---------+-----------+
| 1 | A | Max | Rajasthan |
| 2 | A | Min | Delhi |
| 1 | A | Max | Rajathan |
| 1 | A | Min | UP |
+----+------+---------+-----------+
This is the code of my Stored Procedure for getting different result set of each column
create proc sp_task1 @table varchar(20)
as
begin
exec('
select distinct id FROM ' +@table+'
')
exec('
select distinct name FROM ' +@table+'
')
exec('
select distinct address FROM ' +@table+'
')
exec('
select distinct city FROM ' +@table+'
')
end
exec sp_task1 @table = 'table1'
This is what I get in result when I Execute the SP.
+----+
| id |
+----+
| 1 |
| 2 |
+----+
+------+
| name |
+------+
| A |
+------+
+---------+
| Address |
+---------+
| Max |
| Min |
+---------+
+-----------+
| city |
+-----------+
| Rajasthan |
| Delhi |
+-----------+
Now, I want to do this dynamically without specifying the column names.
Please give me any kind of help regarding this issue.
A: You can use below query to do this.
TEST SETUP
CREATE TABLE Test(id int, name varchar(20), city varchar(20));
INSERT INTO Test
values(1,'abc','chennai'),
(2,'abc','bangalore');
CREATE PROCEDURE USP_GetDistinct(@TableName SYSNAME,@TableSchema SYSNAME )
AS
BEGIN
DECLARE @SQL NVARCHAR(MAX)
SET @SQL = (
SELECT 'SELECT DISTINCT ' +
Column_Name +
' FROM ' + QUOTENAME(TABLE_SCHEMA) + '.' + QUOTENAME(Table_Name) + CHAR(13)+ CHAR(10) + 'GO'+CHAR(13)+ CHAR(10)
FROM INFORMATION_SCHEMA.COLUMNS C
WHERE TABLE_NAME = @TableName AND table_schema = @TableSchema
FOR XML PATH(''), Type
).value('.', 'varchar(max)')
--PRINT @SQL
EXECUTE (@SQL)
END
Execute procedure
EXEC USP_GetDistinct @TableName='Test', @TableSchema='dbo'
Result set
+----+
| id |
+----+
| 1 |
| 2 |
+----+
+------+
| name |
+------+
| abc |
+------+
+-----------+
| city |
+-----------+
| bangalore |
| chennai |
+-----------+
| |
doc_23530285
|
I considered maybe this was because these functions were running asynchronously, and I was asking to get values that weren't yet stored in the cache, but that does not explain why it wouldn't print 'got data' ever.
My console logs-> 'start'->'data saved'->'end' (and no 'got data', ever)
In redis-cli-> flushall->get test->(nil)->run app.js(in the other terminal)->get test->"1, 2, 3, 4, 5"
I get no errors at all, the code runs, but does not do what I want it to.
Also, don't know if this is relevant, but when connecting to the Redis server, just Redis.createClient() only created a client but did not connect and when I looked it up, the general idea I got was that newer versions did not connect automatically and you had to manually redisClient.connect().
I struggled with this a bit at the start but seemed to have sorted this problem, but just thought I'd mention it, if I messed up somehow, please correct me, as I'm pretty new to NodeJs and codng in general.
My code:
const redisClient = Redis.createClient();
redisClient.connect();
const data = [1, 2, 3, 4, 5];
app.get('/', async(req, res, next) => {
console.log('start')
await redisClient.set('test', data);
console.log('data saved');
await redisClient.get('test', (error, test)=>{
console.log('got data');
console.log(test);
});
console.log('end');
});
Thanks!
A: I have seen your code. Based on my investigation you should remove the callback and keep await only while you get the data from redis.
I have investigated more this issue and have found that client.get() and client.set() function runs asynchronously. Hence it would achieve this way.
client.set('foo', 'bar', (err, reply) => {
if (err) throw err;
console.log(reply);
client.get('foo', (err, reply) => {
if (err) throw err;
console.log(reply);
});
});
But everytime is not the same use-case that we should set and get the value immediately.
To get rid of this, Following are the options.
*
*Promises and async/await
*you can promisify a subset of node_redis functions one at a time using native Node.js promises and util.promisify:
example:
const redis = require('redis');
const { promisify } = require('util');
const runApplication = async () => {
const client = redis.createClient();
const setAsync = promisify(client.set).bind(client);
const getAsync = promisify(client.get).bind(client);
await setAsync('foo', 'bar');
const fooValue = await getAsync('foo');
console.log(fooValue);
};
I have used the await here and solve an issue. In addition to that you can use redis.get().then() also to fetch the data rather than a callback.
I am also attaching the link with an example provided by redis repo
https://github.com/redis/node-redis/blob/master/examples/connect-as-acl-user.js
Following is the code, I have tested and it is working fine now.
redis.js
const redis = require("redis");
const redisClient = redis.createClient({
url: "redis://host:6379",
password: "password",
});
redisClient.connect();
// const { promisify } = require("util");
// promisify(redisClient.get).bind(redisClient);
// promisify(redisClient.set).bind(redisClient);
module.exports = redisClient;
index.js
const express = require("express");
const app = express();
const redisClient = require("./redis");
app.get("/set", async (req, res, next) => {
try {
const data = req.query.p;
await redisClient.set("test", data);
res.status(200).json({
message: "data cached",
data: data,
});
} catch (err) {
console.error(err);
res.status(500).json({
message: "Something went wrong",
});
}
});
app.get("/get", async (req, res, next) => {
try {
// const data = await redisClient.get("test");
const data = await reddisClient.get("test").then((data) => {
return data;
});
res.status(200).json({
message: "Cached data retrieved",
data,
});
} catch (err) {
console.error(err);
res.status(500).json({
message: "Something went wrong",
});
}
});
app.listen(process.env.PORT || 3000, () => {
console.log("Node server started");
});
Please find attached a screenshot of the output.
So the final thought is that, when we are using callback and wants to execute the code synchronously you should either use callback inside callback (but it is created callback hell, so it would not suggested anymore) or you should use promise/async await/native promisify library of nodejs.
Please visit below link to get the simplest understanding and example.
https://docs.redis.com/latest/rs/references/client_references/client_nodejs/
Hope my question clear your mind. I am happy to accept the relevant suggestion to improve an answer.
| |
doc_23530286
|
I am using AEM6.1. If I open the page in classic UI, its looks good. But when I open it in touch UI, its not loading properly.
*
*I already have a property "sling:resourceSuperType = wcm/foundation/components/page"
*Some of the content is coming on the page.
*It can be a JS issue, but even if I remove all the js files from my component, the issue is still there.
Anyone experienced the same issue?
A: This issue might be due to the 'Apache Sling Main Servlet' default configuration.
And also we can observe the error message 'components could not be loaded' or 'out of memory' in browser console when we are in Touch UI editor Mode.
By default 'Apache Sling Main Servlet' is configured with 1000 calls per request. If we increase this limit then the issue will be resolved.
For updating this follow the steps.
1. Go to the http://<host>:<port-number>/system/console/configMgr (ex: http://localhost:4502/system/console/configMgr)
2. Ctrl + F and Search for "Apache Sling Main Servlet".
3. Change the value of 'Number of Calls per Request' from 1000 (defaulut value) to 1500 or 2000.
4. Click on Save.
5. Now you can go to Touch UI edit mode and can be able to edit the components.
A: Check in your CRXDE under the folder /libs/wcm/core/content and look for a folder called "editor.html" with the sibling of just "editor". If you delete the editor.html folder and keep "editor", then your page will start working again.
| |
doc_23530287
| ||
doc_23530288
|
#include <fstream>
#include <string>
#include <vector>
#include <iostream>
using namespace std; // not recommended
double averageCalc(string[],int);
int main () {
double average;
string line;
ifstream myfile ("array_pgmdata.txt");
//int index = 0; // not needed
//string myArray[index]; // UB - if it even compiles, it's a VLA of size 0.
std::vector<std::string> myArray; // use this instead to be able to grow it
// dynamically
if (myfile) // open and in a good state
{
// while (! myfile.eof() ) // It'll not be eof when you've read the last line
// only when you try to read beynd the last line,
// so you'll add "line" one extra time at the end
// if you use that. Use this instead:
while(getline(myfile, line))
{
// myArray[index++] << line; // you have 0 elements in the array and
// can't add to it in any way
myArray.push_back(line);
}
}
else cout << "Unable to open file";
for(size_t idx=0; idx < myArray.size(); ++idx) {
std::cout << myArray[idx] << "\n";
}
average = averageCalc(myArray[], line); // error here
return 0;
}
double averageCalc(string nums[], int count)
{
int a, total, elements, averaged1, averaged2;
// string averaged2;
for(a = 0; a < count; a++)
{
total+=a;
elements++;
}
averaged1 = total / elements;
return averaged2;
}
A: There's a few problems here. Firstly, your function averageCalc expects a parameter of type string[] which is an array of strings. When you call the function, you are trying to pass it a std::vector<string>, which is not an array of strings, it is a class. Presumably, you would want to change your function to take in a vector, like so:
double averageCalc( const std::vector<string> & nums ); // no need for size now
The other issue you have is in calling your function. When you call it, you pass myArray[] as a parameter, which is the error you compiler is giving you. This is not valid syntax, you simply want to pass in myArray.
A: I think that the error occurs becase firstly you create the array with std::vector<std::string> myArray; so the data is string type but when you want to calculate the average value the function expects a value int, double etc. in order to perform math. Either change the string to int or use a function to convert it:
int main()
{
string s = "12345";
// object from the class stringstream
stringstream geek(s);
// The object has the value 12345 and stream
// it to the integer x
int x = 0;
geek >> x;
// Now the variable x holds the value 12345
cout << "Value of x : " << x;
return 0;
}
| |
doc_23530289
|
What's the best way to put an icon in one of the tabs and have the tab text below it (keeping in mind best practices and scalability).
https://codepen.io/anon/pen/BzAEQb
<html>
<head>
<!-- Material Design Lite -->
<script src="https://code.getmdl.io/1.1.3/material.min.js"></script>
<link rel="stylesheet" href="https://code.getmdl.io/1.1.3/material.indigo-pink.min.css">
<!-- Material Design icon font -->
<link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons">
</head>
<body>
<!-- Simple header with fixed tabs. -->
<div class="mdl-layout mdl-js-layout mdl-layout--fixed-header
mdl-layout--fixed-tabs">
<header class="mdl-layout__header">
<div class="mdl-layout__header-row">
<!-- Title -->
<span class="mdl-layout-title">Title</span>
</div>
<!-- Tabs -->
<div class="mdl-layout__tab-bar mdl-js-ripple-effect">
<a href="#fixed-tab-1" class="mdl-layout__tab is-active"><i class="material-icons">person</i><span class="MY-CLASS-2">TAB 1</span> </a>
<a href="#fixed-tab-2" class="mdl-layout__tab">Tab 2</a>
<a href="#fixed-tab-3" class="mdl-layout__tab">Tab 3</a>
</div>
</header>
<div class="mdl-layout__drawer">
<span class="mdl-layout-title">Title</span>
</div>
<main class="mdl-layout__content">
<section class="mdl-layout__tab-panel is-active" id="fixed-tab-1">
<div class="page-content"><!-- Your content goes here --></div>
</section>
<section class="mdl-layout__tab-panel" id="fixed-tab-2">
<div class="page-content"><!-- Your content goes here --></div>
</section>
<section class="mdl-layout__tab-panel" id="fixed-tab-3">
<div class="page-content"><!-- Your content goes here --></div>
</section>
</main>
</div>
</body>
</html>
A: html
<div class="item">
<img src=""/>
<span class="caption">Tab</span>
</div>
css
div.item {
vertical-align: top;
display: inline-block;
text-align: center;
width: 50px;
}
img {
width: 30px;
height: 30px;
background-color: blue;
}
.caption {
display: block;
}
http://jsfiddle.net/ZhLk4/360/
A: I can do it by structuring one of the divs like so:
<a href="#scroll-tab-1" class="mdl-layout__tab is-active MY-CLASS-1"><i class="material-icons">person</i><span class="MY-CLASS-2">Profile</span> </a>
And adding the following styles:
.MY-CLASS-1 {
position: relative;
width: 60px;
}
.MY-CLASS-2 {
display: block;
position: absolute;
bottom: -12px;
font-size: smaller;
left: 198px;
}
https://codepen.io/anon/pen/XKEQZr
How can I make this code better overall?
A:
ul,
ul li {
margin: 0;
padding: 0;
list-style-type: none;
}
ul li {
display: inline-block;
}
ul li a {
background: #efefef;
padding: 0 15px;
text-decoration: none;
line-height: 40px;
display: inline-block;
}
ul li a span {
background: url('http://findicons.com/files/icons/2255/cologne/32/user.png') no-repeat #ccc;
width: 30px;
height: 30px;
margin: 0 15px 0 0;
display: inline-block;
vertical-align: middle;
}
<html>
<head>
</head>
<body>
<ul>
<li><a href=""><span></span>aaaa</a></li>
<li><a href=""><span></span>bbbb</a></li>
<li><a href="">cccc</a></li>
<li><a href=""><span></span>dddd</a></li>
</ul>
</body>
</html>
| |
doc_23530290
|
RadkyData.Columns.Add("ČísloDokladuDodavatele", typeof(string));
RadkyData.Columns.Add("Množství", typeof(string));
RadkyData.Columns.Add("NákupníCena", typeof(string));
RadkyData.Columns.Add("PřepočtováJednotka", typeof(string));
RadkyData.Columns.Add("Přepočtovýkoeficient", typeof(string));
RadkyData.Columns.Add("DoporučenáCena", typeof(string));
RadkyData.Columns.Add("KódZboží", typeof(string));
RadkyData.Columns.Add("Ean", typeof(string));
RadkyData.Columns.Add("Název", typeof(string));
RadkyData.Columns.Add("DPH", typeof(string));
RadkyData.Columns.Add("OS1", typeof(string));
RadkyData.Columns.Add("OS2", typeof(string));
RadkyData.Columns.Add("OS3", typeof(string));
RadkyData.Columns.Add("OS4", typeof(string));
RadkyData.Columns.Add("OS5", typeof(string));
RadkyData.Columns.Add("US1", typeof(string));
RadkyData.Columns.Add("US2", typeof(string));
RadkyData.Columns.Add("US3", typeof(string));
RadkyData.Columns.Add("Oddělení", typeof(string));
RadkyData.Columns.Add("MěrnáJednotka", typeof(string));
RadkyData.Columns.Add("Barva", typeof(string));
RadkyData.Columns.Add("Velikost", typeof(string));
RadkyData.Columns.Add("Délka", typeof(string));
RadkyData.Columns.Add("Šířka", typeof(string));
RadkyData.Columns.Add("TypDokladu(PNS)", typeof(string));
RadkyData.Columns.Add("Datum OD", typeof(string));
RadkyData.Columns.Add("Datum DO", typeof(string));
RadkyData.Columns.Add("Značka", typeof(string));
RadkyData.Columns.Add("Sezona", typeof(string));
RadkyData.Columns.Add("Výrobní č.", typeof(string));
RadkyData.Columns.Add("Popis_Rozpis", typeof(string));
RadkyData.Columns.Add("Sufix EAN", typeof(string));
I need SUM column "Množství" and column "NákupníCena", where is the columns: KódZboží,Ean,Velikost are the same in rows.
This is my code:
var res = RadkyData.AsEnumerable()
.Select(r=>new
{
c1 = r.Field<string>("ČísloDokladuDodavatele"),
c2 = r.Field<string>("Množství"),
c3 = r.Field<string>("NákupníCena"),
c4 = r.Field<string>("PřepočtováJednotka"),
c5 = r.Field<string>("Přepočtovýkoeficient"),
c6 = r.Field<string>("DoporučenáCena"),
c7 = r.Field<string>("KódZboží"),
c8 = r.Field<string>("Ean"),
c9 = r.Field<string>("Název"),
c10 = r.Field<string>("DPH"),
c11 = r.Field<string>("OS1"),
c12 = r.Field<string>("OS2"),
c13 = r.Field<string>("OS3"),
c14 = r.Field<string>("OS4"),
c15 = r.Field<string>("OS5"),
c16 = r.Field<string>("US1"),
c17 = r.Field<string>("US2"),
c18 = r.Field<string>("US3"),
c19 = r.Field<string>("Oddělení"),
c20 = r.Field<string>("MěrnáJednotka"),
c21 = r.Field<string>("Barva"),
c22 = r.Field<string>("Velikost"),
c23 = r.Field<string>("Délka"),
c24 = r.Field<string>("Šířka"),
c25 = r.Field<string>("TypDokladu(PNS)"),
c26 = r.Field<string>("Datum OD"),
c27 = r.Field<string>("Datum DO"),
c28 = r.Field<string>("Značka"),
c29 = r.Field<string>("Sezona"),
c30 = r.Field<string>("Výrobní č."),
c31 = r.Field<string>("Popis_Rozpis"),
c32 = r.Field<string>("Sufix EAN")
})
.GroupBy(g => new { g.c1, g.c4, g.c5, g.c6, g.c7, g.c8, g.c9, g.c10, g.c11, g.c12, g.c13, g.c14, g.c15, g.c16, g.c17, g.c18, g.c19, g.c20, g.c21, g.c22, g.c23, g.c24, g.c25, g.c26, g.c27, g.c28, g.c29, g.c30, g.c31, g.c32 })
.Select(x => new {
col1 = x.Key.c1,
col2 = x.Sum(s => double.Parse(s.c2, System.Globalization.CultureInfo.InvariantCulture)),
col3 = x.Sum(s => double.Parse(s.c3, System.Globalization.CultureInfo.InvariantCulture)),
col4 = x.Key.c4,
col5 = x.Key.c5,
col6 = x.Key.c6,
col7 = x.Key.c7,
col8 = x.Key.c8,
col9 = x.Key.c9,
col10 = x.Key.c10,
col11 = x.Key.c11,
col12 = x.Key.c12,
col13 = x.Key.c13,
col14 = x.Key.c14,
col15 = x.Key.c15,
col16 = x.Key.c16,
col17 = x.Key.c17,
col18 = x.Key.c18,
col19 = x.Key.c19,
col20 = x.Key.c20,
col21 = x.Key.c21,
col22 = x.Key.c22,
col23 = x.Key.c23,
col24 = x.Key.c24,
col25 = x.Key.c25,
col26 = x.Key.c26,
col27 = x.Key.c27,
col28 = x.Key.c28,
col29 = x.Key.c29,
col30 = x.Key.c30,
col31 = x.Key.c31,
col32 = x.Key.c32
});
But this code not Grouping and suming rows...Have you any ideas please?
Example: I have 2 same rows with diferent columns "Množství" and Nákupní cena". From this 2 rows i want to do one row where is SUM "Množství" and Nákupní cena".
Short variant of code:
var res = RadkyData.AsEnumerable()
.Select(r=>new
{
c1 = r.Field<string>("ČísloDokladuDodavatele"),
c2 = r.Field<string>("Množství"),
c3 = r.Field<string>("NákupníCena"),
})
.GroupBy(g => new { g.c1 })
.Select(x => new {
col1 = x.Key.c1,
col2 = x.Sum(s => double.Parse(s.c2, System.Globalization.CultureInfo.InvariantCulture)),
col3 = x.Sum(s => double.Parse(s.c3, System.Globalization.CultureInfo.InvariantCulture)),
});
A: Hier I created a table:
Hier is the sql query you are trying to implement with Linq and the results:
Hier is the Linq code I used to get the SUMs:
var result = ctx.Set<RadkyData>()
.GroupBy(x => new { x.c1, x.c2, x.c3, x.c4, x.c5, x.c6 })
.Select(x => new
{
col1 = x.Key.c1,
col_Mnozstvi = x.Sum(y => y.Množství),
col_NakupniCena = x.Sum(y => y.NákupníCena),
col2 = x.Key.c2,
col3 = x.Key.c3,
col4 = x.Key.c4,
col5 = x.Key.c5,
col6 = x.Key.c6
});
Look, all the fields but "Množství" and Nákupní cena" have equal values.
Hier is the result of the Linq - it is the same with the result of an SQL query:
You wrote:
I need SUM column "Množství" and column "NákupníCena", where is the columns: KódZboží,Ean,Velikost are the same in rows.
But you are grouping by ALL fields, where the values can differ! You have to group only by those rows where the values are being expected to be the same. These are in your case only "KódZboží", "Ean" and "Velikost". In this case you'll get the right SUM.
| |
doc_23530291
|
I have tried to connect my Access database directly to my SQL server, but my server does not allow that due to security reasons.
I have found a module which will print the queries in one file but it only prints the titles, which is useful but not exactly what I am looking for.
Here is that module code:
Public Sub IterateQueryDefsCollection()
Dim dbMain As DAO.Database
Dim qdf As DAO.QueryDef
Dim qdfTemp As DAO.QueryDef
Set dbMain = CurrentDb
For Each qdf In dbMain.QueryDefs
Debug.Print qdf.Name 'Prints name of query
Set qdfTemp = dbMain.QueryDefs(qdf.Name)
Debug.Print qdfTemp.SQL 'Prints SQL Syntax of query
Next
End Sub
I don't write these codes and am still fairly new to SQL so pretty much my question is...is there a way to have all of my queries along with their data to be exported to a .sql file.
Thank you in advance.
A: Your SQL server's authentication process does not 'know' who or what application is is attempting to connect to it. If you provide valid credentials, then it should accept the connection attempt. What type of credentials do you provide when you connect to the SQL Server normally ? (when it works) Use the same ones when you specify the credentials that Access attempts to connect with, and the Access connection will also work. If you created a linked table(s) inside access, Access should ask you to specify these connection credentials. Once you have a valid linked Table, connected to the sql server, you will be able to run the Access queries against the SQL Server Table (using the Access Linked Table as a pointer)
A: I'm puzzled as to why you'd want to update ten queries each week. Seems to me these should be parameter driven, etc. Note that the following is not moving the data across. Seems to me some append queries in code should handle that quite nicely.
The following is a module I wrote to attempt to upsize all the queries in an Access MDB to the server. Note that as some of these queries were "stacked", that is they called other queries, you had to run this subroutine several times until it couldn't upsize any more.
Sub CopyAllQueriesAsViewsDAO()
Dim strError As String, strQueryName As String, lngQueryID As Long
Dim Q As QueryDef, blnSuccessfulQ As Boolean
Dim strSQL As String, strNewSQL As String, strConnect As String
Dim intCountFailure As Integer, intCountSuccessful As Integer
Dim intAlreadyAnError As Integer, strAction As String
Dim mydatabase As DAO.Database, myquerydef As DAO.QueryDef
On Error GoTo tagError
strConnect = "ODBC;DRIVER={sql server};DATABASE=" & _
strTestDatabaseName & ";SERVER=" & strSQLServerName & ";" & _
"Trusted_Connection=Yes"
DoCmd.Hourglass True
For Each Q In dbsPermanent.QueryDefs
intAlreadyAnError = 0
strQueryName = Q.Name
If Left(strQueryName, 4) = "~sq_" Then
Else
strError = ""
strAction = ""
lngQueryID = FetchQueryID(strQueryName, blnSuccessfulQ) ' Add the record or locate the ID
If blnSuccessfulQ = False Then
strNewSQL = adhReplace(Q.SQL, vbCrLf, " ")
strNewSQL = Left(strNewSQL, InStr(strNewSQL, ";") - 1)
strNewSQL = ConvertTrueFalseTo10(strNewSQL)
tagRetryAfterCleanup:
Set myquerydef = dbsPermanent.CreateQueryDef("") 'Q.Name & " DAO Test")
myquerydef.ReturnsRecords = False
myquerydef.Connect = strConnect
myquerydef.SQL = "CREATE VIEW [" & strQueryName & "] AS " & strNewSQL
myquerydef.Execute
myquerydef.Close
strSQL = "UPDATE zCreateQueryErrors SET zcqeErrorMsg = 'Successful' " & _
"WHERE ID=" & lngQueryID & ";"
CurrentDb.Execute strSQL, dbFailOnError
intCountSuccessful = intCountSuccessful + 1
End If
End If
tagResumeAfterError:
Next
DoCmd.Hourglass False
MsgBox "There were " & intCountSuccessful & " successful." & vbCrLf & _
intCountFailure & " failures."
Exit Sub
tagError:
' MsgBox Err.Description
Dim errX As DAO.Error, strFunctionName As String, intPosnFunction As Integer
Dim strThisError As String
If Errors.Count > 1 Then
For Each errX In DAO.Errors
strThisError = mID(errX.Description, 48)
If intAlreadyAnError > 5 Then ' Hit 10 errors so don't attempt to clean up the query
If errX.Number <> 3146 Then
strError = strError & "After fix: " & errX.Number & ": " & strThisError & " "
End If
Else
Select Case errX.Number
Case 3146 ' Ignore as this is the generic OLE db error
Case 195 ' 'xxx' is not a recognized function name. > Insert dbo. in front of function name
intAlreadyAnError = intAlreadyAnError + 1
strFunctionName = mID(strThisError, 2, InStr(2, strThisError, "'") - 2)
intPosnFunction = InStr(strNewSQL, strFunctionName)
strNewSQL = Left(strNewSQL, intPosnFunction - 1) & "dbo." & mID(strNewSQL, intPosnFunction)
strAction = strAction & "Inserted dbo for " & strFunctionName & " "
Resume tagRetryAfterCleanup
' The ORDER BY clause is invalid in views, .... , unless TOP is also specified.
Case 1033 'TOP 100 PERCENT
strNewSQL = Left(strNewSQL, 7) & " TOP 100 PERCENT " & mID(strNewSQL, 8)
strAction = strAction & "Inserted TOP 100 PERCENT "
Resume tagRetryAfterCleanup
Case Else
strError = strError & errX.Number & ": " & mID(errX.Description, 48) & " "
End Select
End If
Next errX
Else
strError = Err.Number & ", " & Err.Description
End If
strSQL = "UPDATE zCreateQueryErrors SET zcqeErrorMsg = '" & adhHandleQuotes(strError) & "', " & _
"zcqeAction = '" & strAction & "', zcqeFinalSQL = '" & adhHandleQuotes(strNewSQL) & "' " & _
"WHERE ID=" & lngQueryID & ";"
CurrentDb.Execute strSQL, dbFailOnError
intCountFailure = intCountFailure + 1
Resume tagResumeAfterError
End Sub
Public Function ConvertTrueFalseTo10(strIncoming As String)
Dim strIntermediate As String, intPosn As Integer
strIntermediate = strIncoming
intPosn = InStr(strIntermediate, "=false")
While intPosn <> 0
strIntermediate = Left(strIntermediate, intPosn - 1) & "=0" & mID(strIntermediate, intPosn + 6)
intPosn = InStr(strIntermediate, "=false")
Wend
intPosn = InStr(strIntermediate, "=true")
While intPosn <> 0
strIntermediate = Left(strIntermediate, intPosn - 1) & "=1" & mID(strIntermediate, intPosn + 5)
intPosn = InStr(strIntermediate, "=true")
Wend
ConvertTrueFalseTo10 = strIntermediate
End Function
Function FetchQueryID(strQueryName As String, blnSuccessfulQ As Boolean) As Long
Dim myRS As Recordset
Dim strSQL As String
blnSuccessfulQ = False
strSQL = "SELECT ID, zcqeErrorMsg FROM zCreateQueryErrors " & _
"WHERE zcqeName='" & strQueryName & "';"
Set myRS = dbsPermanent.OpenRecordset(strSQL, dbOpenSnapshot)
If myRS.EOF Then
Set myRS = dbsPermanent.OpenRecordset("zCreateQueryErrors", dbOpenSnapshot)
myRS.AddNew
myRS!zcqeName = strQueryName
myRS.Update
myRS.Move 0, myRS.LastModified
FetchQueryID = myRS!ID
Else
myRS.MoveFirst
FetchQueryID = myRS!ID
If myRS!zcqeErrorMsg = "Successful" Then
blnSuccessfulQ = True
End If
End If
myRS.Close
Set myRS = Nothing
End Function
Public Function adhHandleQuotes(strValue As String) As String
' Fix up all instances of a quote within a string by
' breaking up the string, and inserting Chr$(34) whereever
' you find a quote within the string. This way, Jet can
' handle the string for searching.
'
' From Access 97 Developer's Handbook
' by Litwin, Getz, and Gilbert (Sybex)
' Copyright 1997. All rights reserved.
'
' Solution suggested by Jurgen Welz, a diligent reader.
' In:
' strValue: Value to fix up.
' Out:
' Return value: the text, with quotes fixed up.
' Requires:
' adhReplace (or some other function that will replace
' one string with another)
'
' Example:
' adhHandleQuotes("John "Big-Boy" O'Neil") returns
' "John " & Chr$(34) & "Big-Boy" & Chr$(34) & " O'Neil"
Const QUOTE As String = """"
Const SingleQUOTE As String = "'"
adhHandleQuotes = adhReplace(strValue, SingleQUOTE, _
SingleQUOTE & SingleQUOTE)
End Function
Function adhReplace(ByVal varValue As Variant, _
ByVal strFind As String, ByVal strReplace As String) As Variant
' Replace all instances of strFind with strReplace in varValue.
' From Access 97 Developer's Handbook
' by Litwin, Getz, and Gilbert (Sybex)
' Copyright 1997. All rights reserved.
' In:
' varValue: value you want to modify
' strFind: string to find
' strReplace: string to replace strFind with
'
' Out:
' Return value: varValue, with all occurrences of strFind
' replaced with strReplace.
Dim intLenFind As Integer
Dim intLenReplace As Integer
Dim intPos As Integer
If IsNull(varValue) Then
adhReplace = Null
Else
intLenFind = Len(strFind)
intLenReplace = Len(strReplace)
intPos = 1
Do
intPos = InStr(intPos, varValue, strFind)
If intPos > 0 Then
varValue = Left(varValue, intPos - 1) & _
strReplace & mID(varValue, intPos + intLenFind)
intPos = intPos + intLenReplace
End If
Loop Until intPos = 0
End If
adhReplace = varValue
End Function
A: You ask if there's a way to export a SQL file for this. There is, but you have to write the code to do it, i.e., walk through each row of your query results and write an insert statement (i.e., INSERT INTO ( Field1, Field2 ) VALUES ( value1, value2 ) ( value3, value4), etc.) that is in the correct SQL dialect for the target database engine.
However, it might just be easily to export to a CSV file (or tab-delimited or whatever) and have your database import that file.
There's no real way to know how to answer your question, though, as there's not enough detail on what the queries actually do. If they are INSERT statements, the above will be exactly what you want.
If they are UPDATES, it's more complicated.
But perhaps the above can get you started.
| |
doc_23530292
| ||
doc_23530293
|
I would like that :
*
*the posts which have a key 1 are classified according to the key 1,
*the posts which have a key 1 and a key 2 are classified according to
key 1,
*the posts which do not have a key 1 but a key 2 are classified
according to key 2,
*and after posts that do not have a key.
Here is one of the tests, but which did not give the expected result:
$query = new WP_Query([
'meta_query' => [
'relation' => 'OR',
['key' => 'key1', 'compare' => 'EXISTS'],
['key' => 'key1', 'compare' => 'NOT EXISTS'],
['key' => 'key2', 'compare' => 'EXISTS'],
['key' => 'key2', 'compare' => 'NOT EXISTS'],
],
'order' => 'DESC',
'orderby' => 'meta_value_num',
]);
Do you know how to do it ?
| |
doc_23530294
|
Error message:
[ERROR] : ** BUILD FAILED **
[ERROR] : The following build commands failed:
[ERROR] : CompileC build/Intermediates/HelloWorld.build/Debug- iphonesimulator/HelloWorld.build/Objects-normal/x86_64/MediaModule.o Classes/MediaModule.m normal x86_64 objective-c com.apple.compilers.llvm.clang.1_0.compiler
[ERROR] : (1 failure)
A: Since you specified you use TiSDK 6.0.1 and xcode 7.2.1 you have an incompatible combination.
Always look at the Titanium Compatibility Matrix for exactly this reason. In the xCode section it says you need at least xcode 8.0 for it to work. If you want to stick with xcode 7 you can go up to Titanium 5.5
| |
doc_23530295
|
Duplicate entry '0' for key 'PRIMARY' SQL=INSERT INTO irnfl_updates (extension_id, name, element, type, version) VALUES (10000, 'JSN Template Framework', 'jsntplframework', 'plugin', '2.3.6')
What is the fault? How I can resolve this?
A: This probably means that the primary key field in your irnfl_updates table should be defined as auto_increment and it's not. To fix it, just alter the table and enable the constraint.
A: When loading your administrator page, Joomla checks for updates to the installed extensions. The updates table got somehow corrupted, or there's a bug in the update stream for that specific extension.
You have several possible solutions.
*
*Open System -> Clear Cache, clear everything, then go to Extensions -> Extension Manager -> Updates and click on "Find updates". This should reset your problem.
*Manually truncate the "irnfl_updates" table from phpMyAdmin or similar tool. It will be repopulated anyway.
*Remove the "jsntplframework" plugin and reinstall. Please make a backup first, and be sure you are able to restore it if something goes wrong.
A: I recommend to create a full backup of the current situation.
Use phpMyAdmin to solve it directly in your database:
go to the #__users table.
Check the highest ID (e.g. 987).
Under [Operations] you can under "Table Options" set the AUTO_INCREMENT.
Set it to the highest ID + 1 (e.g. 988).
| |
doc_23530296
|
Callback<TableColumn<Map, String>, TableCell<Map, String>>
cellFactoryForComboBox = (TableColumn<Map, String> p) -> {
ComboBoxTableCell<Map, String> cell = new ComboBoxTableCell<>();
cell.setComboBoxEditable(true);
return cell;
};
Col1.setCellFactory(cellFactoryForComboBox);
Col2.setCellFactory(cellFactoryForComboBox);
How can I populate dropDown lists for each column with values in
ObservableList<String> List1
ObservableList<String> List2
How to do this properly?
A: Create seperate cellFactorys for each column and pass the ObservableList to the constructor of ComboBoxTableCell:
Col1.setCellFactory((TableColumn<Map, String> p) -> {
ComboBoxTableCell<Map, String> cell = new ComboBoxTableCell<>(List1);
cell.setComboBoxEditable(true);
return cell;
});
Col2.setCellFactory((TableColumn<Map, String> p) -> {
ComboBoxTableCell<Map, String> cell = new ComboBoxTableCell<>(List2);
cell.setComboBoxEditable(true);
return cell;
});
| |
doc_23530297
|
here are my model classes
public int Get(int? id)
{
JalkahoitolaEntities entities = new JalkahoitolaEntities();
List<int?> items = (from o in entities.Recieved_ammounts
where o.ProductId == id
select o.UnitStock).ToList();
if (items.Sum() == 0 || items.Sum() == null)
{
Product ProductToBeRemoved = (from o in entities.Products
where o.ProductId == id
select o).First();
entities.Products.Remove(ProductToBeRemoved);
entities.SaveChanges();
}
entities.Dispose();
return 1;
}
A: If I understood your question correctly, you want to delete ProductGroup then you are missing with deletion of ProductGroup .Update your code to add Deletion logic for ProductGroup as well.
Retrieve Prodcut to be removed
Product ProductToBeRemoved = (from o in entities.Products
where o.ProductId == id
select o).First();
entities.Products.Remove(ProductToBeRemoved);
Delete ProductGroup based on ProductToBeRemoved GroupId
ProductGroup ProductGroupToBeRemoved = (from o in entities.ProductGroup
where o.GroupId== ProductToBeRemoved .GroupId
select o).First();
entities.ProductGroup.Remove(ProductGroupToBeRemoved);
Then call the SaveChanges()
UPDATE : After reading OP's comment.
You don't need to write multiple queries for this. Try using some thing like
var prodGroupsToRemove = (from ra in entities.Recieved_ammounts
join prod in entities.Products on ra.ProductId equals prod.ProductId
join prodGrp in entities.ProductGroup on prod.GroupId equals prodGrp.GroupID
where (ra.UnitStock > 0)
select new { GroupId = prodGrp.GroupId , ProductId = prod.ProductId}).ToList();
Above query will return a list of GroupsID,ProductId you want to delete.
Note You need to filter out Products/Group based on result to delete.This might not work simple copy/paste, You may need to update it a bit.
| |
doc_23530298
|
For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
Code:
numbers = [6, 7, 8, 3]
k = 14
def add_to_k(numbers_list,k_value):
truth = False
pairs = []
for i in numbers_list:
for l in numbers_list:
added = i + l
if added == k_value:
if numbers_list.index(i) == numbers_list.index(l):
pass
else:
paired = str(i) + ", " + str(l)
pairs += paired
truth = True
if truth == True:
print("Two numbers in the list added together is {}: ".format(k_value) + str(pairs))
else:
print("Sorry, none give " + str(k_value))
add_to_k(numbers,k)
This code returns an output like this:
Two numbers in the list added together is 17: ['6', ',', ' ', '8', '8', ',', ' ', '6']
but I want it to give me the two numbers that add to 17 in tuples. For example, [(1,2),(3,4)]
A: The way you should append the values to paired has changed in below code
numbers = [6, 7, 8, 3]
k = 14
def add_to_k(numbers_list,k_value):
truth = False
pairs = []
for i in numbers_list:
for l in numbers_list:
added = i + l
if added == k_value:
if numbers_list.index(i) == numbers_list.index(l):
pass
else:
## modification done here
paired = (i,l)
pairs.append(paired)
## modification end
truth = True
if truth == True:
print("Two numbers in the list added together is 17: " + str(pairs))
else:
print("Sorry, none give " + str(k_value))
add_to_k(numbers,k)
output:
Two numbers in the list added together is 17: [(6, 8), (8, 6)]
Different Approach Using itertools
import itertools
Option 1: If you want to produce the unique set of numbers, then you use itertools.combinations
combinations = list(itertools.combinations(numbers, 2))
Option 2: If you want to produce the all combination set of numbers, then you use itertools.permutations
combinations = list(itertools.permutations(numbers,2))
Then check the sum of values to k
output = [comb for comb in combinations if comb[0]+comb[1] == k]
output
Option 1: output usingitertools.combinations
[(6, 8)]
Option 2: output using itertools.permutations
[(6, 8),(8, 6)]
A: List comprehension should work.
li = [10, 15, 3, 7]
k=14
res = k in [a+b for a in li for b in li]
print (res)
Output:
True
| |
doc_23530299
|
I have uploaded .apk file (build/app/outputs/flutter-apk/app-debug.apk) to Google Drive and downloaded .apk file in real Android phone, but I can't install the application is in a real Android phone.
I downloaded the .apk file:
When I click on the "app-debug.apk" item, the following screen pops up:
When I click the install button, it loads and displays the following screen:
Also, the Android version of my Android phone is 8.1.0.
I didn't share the code because I don't think the code has anything to do with this.
Why can't I install Flutter app in real Android phone? I would appreciate any help. Thank you in advance!
A: There are 2 options:
*
*You can build your app from Android Studio (or another IDE) - Instead of selecting an emulator, select your own Android phone. You have to switch your phone to Developer mode.
*Publish your app on Google play console, internal testing or closed testing, add yourself as a tester then install the app. You'll need a Developer account.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.